Compare commits

...

52 Commits

Author SHA1 Message Date
Potuz
ff2ed9428b use block number instead 2023-11-24 10:59:16 -03:00
Potuz
23c2fd4443 Check builder's payload parent hash 2023-11-24 10:37:30 -03:00
terence
6c85587d14 Update broadcast method to use BlobSidecar instead of SingedBlobSidecar (#13221)
* Update broadcast method to use BlobSidecar instead of SingedBlobSidecar

* Fix test
2023-11-24 07:18:00 +00:00
terence
6daa72634d Fix forkchoice pkg's comments grammar (#13217) 2023-11-22 17:27:42 -08:00
Potuz
07ee42660a lock RecentBlockSlot (#13212)
* lock RecentBlockSlot

* Kasey's fix
2023-11-22 16:58:00 -03:00
hzysvilla
4b5db8003b Comment typo (#13209)
* Update config.go

* Update flags.go

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-11-22 18:54:19 +00:00
Radosław Kapka
4b3c511a26 POST version of GetValidators and GetValidatorBalances (#13199)
* POST versions of GetValidators and GetValidatorBalances

* post statuses

* balances test

* group params

* test error cases
2023-11-22 17:30:52 +00:00
terence
8902ad3a20 Implement Slot-Dependent Caching for Blobs Bundle (#13205) 2023-11-22 07:23:50 -08:00
kasey
1123df7432 Verified roblobs (#13190)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-11-21 18:44:38 +00:00
Radosław Kapka
a7edec9b98 Better error handling in REST VC (#13203) 2023-11-21 17:42:55 +01:00
Nicolás Pernas Maradei
10ccf1840f [2/5] light client http api (#12984)
Co-authored-by: Lizhang <lizhang@polymerlabs.org>
2023-11-21 13:26:39 +01:00
terence
d035be29cd Optimize ReplayBlocks for Zero Diff (#13198)
* Stategen: replay block return early when zero diff

* Fix test setup
2023-11-17 18:19:05 +00:00
Preston Van Loon
bba8dd6f1e bazel: Run buildifier, general cleanup (#13193)
* Run buildifier

* Other BUILD.bazel cleanup, rm swagger stuff.

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-11-16 18:41:37 +00:00
terence
8f5ae760ee Add concurrency test for getting attestation state (#13196)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-11-16 17:52:59 +00:00
terence
5ba91a5216 Add construct_generic_block_test to build file (#13195)
* Add construct_generic_block_test test to build file

* Use the right require library

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-11-15 20:33:56 +00:00
james-prysm
4c381938e1 HTTP validator API: wallet endpoints (#13171)
* converting wallet calls to pure http

* fixing proto and gaz

* adding routes and fixing test

* fixing error handling

* fixing protos after conflict with develop

* adding deprecation notice

* fixing route test

* review feedback

* addressing more comments

* updating comment to be more clear

* fixing web_api proto
2023-11-15 19:40:14 +00:00
james-prysm
d4726f2866 HTTP Validator API: slashing protection import and export (#13165)
* adding migration for import and export slashing protection

* Update validator/rpc/handle_slashing.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/handle_slashing.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/handle_slashing.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/handle_slashing.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing comments

* fixing unit test errors after view comments

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-11-15 17:35:22 +00:00
terence
4e3419e870 Enhance Validation for Block by Root RPC Requests (#13184)
* blk-by-root-check-root

* Account for gap

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-11-14 17:13:01 +00:00
terence
ac06362baf Add a helper for max request block (#13173)
* Add a helper for max request block

* Add test

* Use deneb fork epoch from config

* Fix comment
2023-11-14 05:50:51 +00:00
Radosław Kapka
28aa11c976 Config HTTP endpoints (#13168)
* Config HTTP endpoints

* error on unsupported type

* type assertion
2023-11-13 23:38:23 +00:00
Radosław Kapka
798d5ec585 Remove default value of circuit breaker flags (#13186)
* Update default value of `max-builder-epoch-missed-slots`

* remove the default value

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-11-13 18:55:55 +00:00
Radosław Kapka
9b97f3fd92 Return 404 from eth/v1/beacon/headers when there are no blocks (#13185)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-11-13 18:06:26 +00:00
Radosław Kapka
0946b5853f Pool slashings HTTP endpoints (#13148)
* Pool slashings HTTP endpoints

* e2e fix

* commit

* remove pb files

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-11-13 17:22:39 +00:00
Nishant Das
1530d17977 Fix Withdrawals (#13181)
* fix withdrawals

* disable it
2023-11-09 13:50:57 +00:00
Potuz
e46f9c5631 KZG Commitment inclusion proof verifier (#13174)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-11-08 19:04:21 +00:00
Preston Van Loon
3097601530 pgo: Enable pgo behind release flag (#13158)
* Revert a54e61ecb0

* Configure the use of pgo profiles behind the release config flag (--config=release)
2023-11-08 13:33:26 +00:00
Nishant Das
4a515c36e6 Deneb E2E (#13040)
* save changes

* add dep

* add changes

* add latest changes

* push changes

* hack it for mainnet

* fix deps

* update it

* add changes

* fix e2e

* revert it

* gaz

* remove log

* preston's review

* clear up

* add more logs

* fix nonce gaps

* make it better

* fix blobs

* set value

* add support for deneb scenario paths

* update to fix scenario

* go mod

* clean up

* fix up

* reduce cog complexity

* lint

* remove

* go sec

* Update testing/endtoend/evaluators/fork.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update proto/ssz_proto_library.bzl

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* fix

* radek's review

* make it atomic

* gaz

* add deneb case

* remove deneb activation

* change e2e yaml

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-11-08 12:24:23 +00:00
Potuz
afaeff9d4c Merkle Proofs of KZG commitments (#13159)
* Merkle Proofs of KZG commitments

* fix mock

* Implement Merkle proof spectests

* Check Proof construction in spectests

* fix Merkle proof generator

* Add unit test

* add ssz package unit tests

* add benchmark

* fix typo in comment

* ProposerSlashing was repeated

* Terence's review

* move to consensus_blocks

* use existing error
2023-11-06 08:49:35 -03:00
terence
f663f605d2 Add blob getters (#13170)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-11-04 14:35:56 +00:00
Terence
12f7143c4f Validator client: remove blob signing (#13169) 2023-11-03 12:10:15 -07:00
Radosław Kapka
1f250f7e89 Validator HTTP endpoints (#13167)
* HTTP validator endpoints

* Sammy's review

* capitalize errors

* test fix

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-11-03 16:59:04 +00:00
Sammy Rosso
0f65e51d1e Blob filesystem: Save Blobs (#13129)
* Add Save blob and tests

* Remove locks

* Remove test cleanup

* Fix go mod

* Cleanup

* Add checksum

* Add file hashing to fileutil

* Move test

* Check data when exists

* Add one more test

* Rename

* Gaz

* Add packaged level comment

* Save full sidecar + reviews

* Use path builder in test

* Use other BlobSidecar

* Cleanup

* Fix gosec

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-11-03 16:24:30 +00:00
Radosław Kapka
d1dd8471a3 Debug HTTP endpoints (#13164)
* Debug HTTP endpoints

* register endpoints

* tests

* small fixes

* config test fix
2023-11-03 15:33:46 +00:00
Terence
7a6487b746 Remove pending blobs queue (#13166) 2023-11-03 07:07:43 -07:00
Potuz
daa6d2e741 Implement Merkle proof spectests (#13146)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-11-02 23:08:16 +00:00
Raul Jordan
8a743a6430 Update Terms of Service (#13163)
* tos updates

* fixes
2023-11-02 17:11:11 +00:00
james-prysm
c0fb16a96f HTTP validator API: health endpoints (#13149)
* updating health endpoints

* updating tests

* updating tests

* moving where the header is written and adding allow origin header

* removing header

* Update validator/rpc/handlers_health.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/handlers_health.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update validator/rpc/handlers_health.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* radek's comments

* Update handlers_health.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* adding the correct errors to handle error

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-11-02 15:51:21 +00:00
Terence
57eda1de63 Add RO blob sidecar (#13144) 2023-11-01 10:03:49 -07:00
Preston Van Loon
a54e61ecb0 pgo: remove default pprof profile (#13150) 2023-10-31 21:43:41 +00:00
james-prysm
27b4e32e1c HTTP Validator API: /eth/v1/keystores (#13113)
* WIP

* fixing tests

* fixing bazel

* fixing api client

* fixing tests

* fixing more tests and bazel

* fixing trace and more bazel issues

* fixing router path function definitions

* fixing more tests and deep source issues

* adding delete test

* if a route is provided, reregister before the catch all on the middleware.

* fixing linting

* fixing deepsource complaint

* gaz

* more deepsource issues

* fixing missed err check

* changing how routes are registered

* radek reviews

* Update validator/rpc/handlers_keymanager.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Update validator/rpc/handlers_keymanager.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* fixing unit test after sammy's review

* adding radek's comments

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2023-10-31 16:33:54 +00:00
Nishant Das
b56bf00682 Fix Pending Queue Deadline Bug (#13145)
* rearrange deadline

* naming
2023-10-31 06:40:41 +00:00
Preston Van Loon
b24b60dbd8 zig: Update zig to recent main branch commit (#13142)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-10-30 23:48:45 +00:00
Potuz
dc9d34b41b fix head slot in log (#13139) 2023-10-30 16:56:20 -03:00
Radosław Kapka
2ef0b3526d Fix block proposals in the REST validator client (#13116)
* Fix block proposals in the REST validator client

* fix graffiti test

* return empty graffiti

* fallback to old endpoints

* logs

* handle 404

* everything passes

* review from James

* log undecoded value

* test fixes and additions

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-10-30 18:15:45 +00:00
Sammy Rosso
047613069e Rename Blob retention epoch flag (#13124)
* Rename flag and add alias

* Update cmd/beacon-chain/flags/base.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Fix sentence

* Fix TestConfigureBlobRetentionEpoch

* Fix silly mistake

* Reviews

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-10-30 17:35:31 +00:00
Justin Traglia
159a5dd69d Check that blobs count is correct when unblinding (#13118)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-10-30 16:49:32 +01:00
hyunchel
470ea6d717 Remove no-op cancel func (#13069)
This cancel function is currently a no-op due to the blank identifier.
One might argue that the cancel func should be restored from no-op by
replacing the blank identifier with the proper variable. When the parent
context is cancelled, however, all the functions down the call tree with
the context will be notified of the cancellation anyway. Removing the
cancel function would not change any outcome under the current
implementation.

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-10-30 14:43:25 +00:00
Radosław Kapka
b441f20e6a Remove /node/peers/{peer_id} from Beacon API evaluator (#13138) 2023-10-30 14:21:07 +00:00
Potuz
2ea5bff9c0 Log when sending FCU with payload attributes (#13137)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-10-30 12:30:20 +00:00
terencechain
c2433ff854 Update spectest and changed minimal preset for field elements (#13090)
* update trusted setup

* update dependencies

* Update workspace

---------

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-10-30 11:41:58 +00:00
Preston Van Loon
82640b3d88 Enable profile guided optimization for beacon-chain (#13035)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-10-30 05:52:36 +00:00
Radosław Kapka
f925aded66 Allow unknown fields in Beacon API responses (#13131) 2023-10-27 16:47:50 +00:00
428 changed files with 19435 additions and 16715 deletions

View File

@@ -27,6 +27,7 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
# Release flags
build:release --compilation_mode=opt
build:release --stamp
build:release --define pgo_enabled=1
# Build binary with cgo symbolizer for debugging / profiling.
build:cgo_symbolizer --copt=-g

View File

@@ -144,6 +144,11 @@ config_setting(
values = {"define": "coverage_enabled=1"},
)
config_setting(
name = "pgo_enabled",
values = {"define": "pgo_enabled=1"},
)
common_files = {
"//:LICENSE.md": "LICENSE.md",
"//:README.md": "README.md",

View File

@@ -1,45 +1,53 @@
## Terms of Use
# Terms of Use
Effective as of November 2, 2023
Effective as of Oct 14, 2020
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Offchain Labs, Inc. (as successor in interest to Prysmatic Labs LLC) (referenced herein as “Offchain Labs”, “we” or “us”). If you do not agree to the Terms, do not download or use Prysm. Additionally, the Terms of Use available at https://arbitrum.io/tos (or any successor site, the “OCL Terms of Use”) are hereby incorporated by reference into these Terms. In the event of any conflict between provisions set forth herein and those set forth in the OCL Terms of Use, the provisions set forth herein shall control.
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Prysmatic Labs (referenced herein as “we” or “us”). If you do not agree to the Terms, do not download or use Prysm.
## About Prysm
### About Prysm
Prysm is a client implementation for Ethereum consensus protocol for a proof-of-stake blockchain. To participate in the network, a user must send ETH from the Eth1.0 chain into a validator deposit contract, which will queue in the user as a validator in the system. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
Prysm is a client implementation for the Ethereum blockchains consensus protocol. To participate in the network, a user must send ETH from the Ethereum mainnet blockchain to a validator deposit smart contract on Ethereum mainnet. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
### Licensing Terms
Prysm is a fully open-source software program licensed pursuant to the GNU General Public License v3.0.
## Licensing Terms
Prysm is an open-source software program licensed pursuant to the GNU General Public License v3.0.
The Offchain Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Offchain Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
PLEASE READ THESE TERMS CAREFULLY, AS THE OCL TERMS OF USE INCORPORATED BY REFERENCE HEREIN CONTAIN AN AGREEMENT TO ARBITRATE AND OTHER IMPORTANT INFORMATION REGARDING YOUR LEGAL RIGHTS, REMEDIES, AND OBLIGATIONS. THE AGREEMENT TO ARBITRATE REQUIRES (WITH LIMITED EXCEPTION) THAT YOU SUBMIT CLAIMS YOU HAVE AGAINST US TO BINDING AND FINAL ARBITRATION, AND FURTHER (1) YOU WILL ONLY BE PERMITTED TO PURSUE CLAIMS AGAINST OFFCHAIN LABS ON AN INDIVIDUAL BASIS, NOT AS A PLAINTIFF OR CLASS MEMBER IN ANY CLASS OR REPRESENTATIVE ACTION OR PROCEEDING, (2) YOU WILL ONLY BE PERMITTED TO SEEK RELIEF (INCLUDING MONETARY, INJUNCTIVE, AND DECLARATORY RELIEF) ON AN INDIVIDUAL BASIS, AND (3) YOU MAY NOT BE ABLE TO HAVE ANY CLAIMS YOU HAVE AGAINST US RESOLVED BY A JURY OR IN A COURT OF LAW.
The Prysmatic Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Prysmatic Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
## Risks of Operating Prysm
### Risks of Operating Prysm
The use of Prysm and acting as a validator on the Ethereum network can lead to loss of money. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm including the security of your ETH and meeting any applicable minimum system requirements.
The use of Prysm and acting as a validator on the Ethereum network can lead to loss of money, tokens and value. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm, including the security of your ETH and meeting any applicable minimum system requirements.
Use of Prysm and the ability to receive rewards or penalties may be affected at any time by mistakes made by the user or other users, software problems such as bugs, errors, incorrectly constructed transactions, unsafe cryptographic libraries or malware affecting the network, technical failures in the hardware of a user, security problems experienced by a user and/or actions or inactions of third parties and/or events experienced by third parties, among other risks. We cannot and do not guarantee that any user of Prysm will make money, that the Prysm network will operate in accordance with the documentation or that transactions will be effective or secure.
We make no claims that Prysm is appropriate or permitted for use in any specific jurisdiction. Access to Prysm may not be legal by certain persons or in certain jurisdictions or countries. If you access Prysm, you do so on your own initiative and are responsible for compliance with local laws.
YOU ACKNOWLEDGE THAT WE ARE NOT RESPONSIBLE FOR ANY RISKS ASSOCIATED WITH YOUR USE OF PRYSM, AND CANNOT BE HELD LIABLE FOR ANY RESULTING LOSSES THAT YOU EXPERIENCE WHILE ACCESSING OR USING PRYSM.
Some Internet plans will charge an additional amount for any excess upload bandwidth used that isnt included in the plan and may terminate your connection without warning because of overuse. We advise that you check whether your Internet connection is subjected to such limitations and monitor your bandwidth use so that you can stop Prysm before you reach your upload limit.
BY ACCESSING AND USING PRYSM, YOU REPRESENT AND WARRANT THAT YOU UNDERSTAND THE INHERENT RISKS ASSOCIATED WITH USING CRYPTOGRAPHIC AND BLOCKCHAIN-BASED SYSTEMS, AND THAT YOU HAVE A WORKING KNOWLEDGE OF THE USAGE AND INTRICACIES OF DIGITAL ASSETS, SUCH AS THOSE FOLLOWING THE ETHEREUM TOKEN STANDARD (ERC-20). YOU FURTHER UNDERSTAND THAT THE MARKETS FOR DIGITAL ASSETS ARE HIGHLY VOLATILE DUE TO VARIOUS FACTORS, INCLUDING ADOPTION, SPECULATION, TECHNOLOGY, SECURITY, AND REGULATION. YOU ACKNOWLEDGE AND ACCEPT THAT THE COST AND SPEED OF TRANSACTING WITH CRYPTOGRAPHIC AND BLOCKCHAIN-BASED SYSTEMS SUCH AS ETHEREUM ARE VARIABLE AND MAY INCREASE DRAMATICALLY AT ANY TIME. YOU UNDERSTAND THAT ANYONE CAN CREATE A TOKEN, INCLUDING FAKE VERSIONS OF EXISTING TOKENS AND TOKENS THAT FALSELY CLAIM TO REPRESENT PROJECTS, AND ACKNOWLEDGE AND ACCEPT THE RISK THAT YOU MAY MISTAKENLY INTERACT WITH THOSE OR OTHER TOKENS. YOU FURTHER ACKNOWLEDGE THAT WE ARE NOT RESPONSIBLE FOR ANY OF THE VARIABLES OR RISKS DESCRIBED IN THESE TERMS. YOU UNDERSTAND AND AGREE TO ASSUME FULL RESPONSIBILITY FOR ALL OF THE RISKS OF ACCESSING AND USING PRYSM. YOU ARE SOLELY RESPONSIBLE FOR YOUR WALLETS, FOR SAFEGUARDING THE ASSOCIATED PRIVATE KEY AND FOR ANY ACTIVITY THAT OCCURS USING YOUR WALLET. WITHOUT LIMITING THE FOREGOING, YOU ALSO UNDERSTAND THAT THERE MAY BE TAX AND REGULATORY RISKS RELATED TO USING PRYSM. IT IS YOUR SOLE RESPONSIBILITY TO DETERMINE WHETHER, AND TO WHAT EXTENT, ANY TAXES APPLY TO ANY TRANSACTIONS YOU CONDUCT IN CONNECTION WITH YOUR USE OF PRYSM, AND TO WITHHOLD, COLLECT, REPORT AND REMIT THE CORRECT AMOUNTS OF TAXES TO THE APPROPRIATE TAX AUTHORITIES. DIGITAL ASSETS, BLOCKCHAIN TECHNOLOGY, AND ANY RELATED SOFTWARE AND SERVICES ARE ALSO SUBJECT TO LEGAL AND REGULATORY UNCERTAINTY IN THE UNITED STATES AND OTHER JURISDICTIONS. YOU UNDERSTAND THAT LEGISLATIVE AND REGULATORY CHANGES OR ACTIONS MAY ADVERSELY AFFECT THE USAGE, TRANSFERABILITY, TRANSACTABILITY AND ACCESSIBILITY RELATED TO PRYSM.
### Warranty Disclaimer
PRYSM IS PROVIDED ON AN “AS-IS” BASIS AND MAY INCLUDE ERRORS, OMISSIONS, OR OTHER INACCURACIES. PRYSMATIC LABS AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS OR WARRANTIES ABOUT PRYSM FOR ANY PURPOSE, AND HEREBY EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT OR ANY OTHER IMPLIED WARRANTY UNDER THE UNIFORM COMPUTER INFORMATION TRANSACTIONS ACT AS ENACTED BY ANY STATE. WE ALSO MAKE NO REPRESENTATIONS OR WARRANTIES THAT PRYSM WILL OPERATE ERROR-FREE, UNINTERRUPTED, OR IN A MANNER THAT WILL MEET YOUR REQUIREMENTS AND/OR NEEDS. THEREFORE, YOU ASSUME THE ENTIRE RISK REGARDING THE QUALITY AND/OR PERFORMANCE OF PRYSM AND ANY TRANSACTIONS ENTERED INTO THEREON.
We make no claims that Prysm is appropriate or permitted for use in any specific jurisdiction. Access to Prysm may not be legal by certain persons or in certain jurisdictions or countries. If you access Prysm, you do so on your own initiative and are responsible for compliance with all Applicable Law (as defined below), including, without limitation, for the avoidance of doubt, local laws.
### Limitation of Liability
In no event will Prysmatic Labs or any of its contributors be liable, whether in contract, warranty, tort (including negligence, whether active, passive or imputed), product liability, strict liability or other theory, breach of statutory duty or otherwise arising out of, or in connection with, your use of Prysm, for any direct, indirect, incidental, special or consequential damages (including any loss of profits or data, business interruption or other pecuniary loss, or damage, loss or other compromise of data, in each case whether direct, indirect, incidental, special or consequential) arising out of use Prysm, even if we or other users have been advised of the possibility of such damages. The foregoing limitations and disclaimers shall apply to the maximum extent permitted by applicable law, even if any remedy fails of its essential purpose. You acknowledge and agree that the limitations of liability afforded us hereunder constitute a material and actual inducement and condition to entering into these Terms, and are reasonable, fair and equitable in scope to protect our legitimate interests in light of the fact that we are not receiving consideration from you for providing Prysm.
Some Internet plans will charge additional amounts for bandwidth or any excess upload bandwidth used that isnt included in the plan and may terminate your connection without warning because of overuse. We advise that you check whether your Internet connection is subjected to any such limitations and monitor your bandwidth use and upload volumes.
### Indemnification
To the maximum extent permitted by law, you will defend, indemnify and hold Prysmatic Labs and its contributors harmless from and against any and all claims, actions, suits, investigations, or proceedings by any third party (including any party or purported party to or beneficiary or purported beneficiary of any transaction on Prysm), as well as any and all losses, liabilities,
damages, costs, and expenses (including reasonable attorneys fees) arising out of, accruing from, or in any way related to (i) your breach of the terms of this Agreement, (ii) any transaction, or the failure to occur of any transaction on Prysm, and (iii) your negligence, fraud, or willful misconduct.
## Warranty Disclaimer
### Compliance with Laws and Tax Obligations
Your use of Prysm is subject to all applicable laws of any governmental authority, including, without limitation, federal, state and foreign securities laws, tax laws, tariff and trade laws, ordinances, judgments, decrees, injunctions, writs and orders or like actions of any governmental authority and rules, regulations, orders, interpretations, licenses, and permits of any federal,
regional, state, county, municipal or other governmental authority and you agree to comply with all such laws in your use of Prysm. The users of Prysm are solely responsible to determinate what, if any, taxes apply to their ETH transactions. The owners of, or contributors to, Prysm are not responsible for determining the taxes that apply to ETH transactions.
PRYSM IS PROVIDED ON AN “AS-IS” BASIS AND MAY INCLUDE ERRORS, OMISSIONS, OR OTHER INACCURACIES. WITHOUT LIMITING ANYTHING SET FORTH ELSEWHERE IN THESE TERMS, OFFCHAIN LABS AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS OR WARRANTIES ABOUT PRYSM FOR ANY PURPOSE, AND HEREBY EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT OR ANY OTHER IMPLIED WARRANTY UNDER THE UNIFORM COMPUTER INFORMATION TRANSACTIONS ACT AS ENACTED BY ANY STATE OR OTHER GOVERNMENTAL AUTHORITY. WE ALSO MAKE NO REPRESENTATIONS OR WARRANTIES THAT PRYSM WILL OPERATE ERROR-FREE, UNINTERRUPTED, OR IN A MANNER THAT WILL MEET YOUR REQUIREMENTS AND/OR NEEDS. THEREFORE, YOU ASSUME THE ENTIRE RISK REGARDING THE QUALITY AND/OR PERFORMANCE OF PRYSM AND ANY TRANSACTIONS ENTERED INTO THEREON.
### Miscellaneous
These Terms will be construed and enforced in accordance with the laws of the state of Illinois as applied to agreements entered into and completely performed in Illinois. You agree to the personal jurisdiction by and venue in Illinois and waive any objection to such jurisdiction or venue.
## Limitation of Liability
We reserve the right to revise these Terms, and your rights and obligations are at all times subject to the then-current Terms provided on Prysm. Your continued use of Prysm constitutes acceptance of such revised Terms.
IN NO EVENT WILL OFFCHAIN LABS OR ANY OF ITS AFFILIATES OR ITS OR ANY SUCH AFFILIATES DIRECTORS, OFFICERS, EMPLOYEES, AGENTS, OR REPRESENTATIVES OR ANY CONTRIBUTORS (COLLECTIVELY, THE “OCL PARTIES”) BE LIABLE, WHETHER IN CONTRACT, WARRANTY, TORT (INCLUDING NEGLIGENCE, WHETHER ACTIVE, PASSIVE OR IMPUTED), PRODUCT LIABILITY, STRICT LIABILITY OR OTHER THEORY, BREACH OF STATUTORY DUTY OR OTHERWISE ARISING OUT OF, OR IN CONNECTION WITH, YOUR USE OF PRYSM, FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL DAMAGES (INCLUDING ANY LOSS OF PROFITS OR DATA, BUSINESS INTERRUPTION OR OTHER PECUNIARY LOSS, OR DAMAGE, LOSS OR OTHER COMPROMISE OF DATA, IN EACH CASE WHETHER DIRECT, INDIRECT, INCIDENTAL, SPECIAL OR CONSEQUENTIAL) ARISING OUT OF USE PRYSM, EVEN IF WE OR OTHER USERS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. The foregoing limitations and disclaimers shall apply to the maximum extent permitted by Applicable Law, even if any remedy fails of its essential purpose. You acknowledge and agree that the limitations of liability afforded us hereunder constitute a material and actual inducement and condition to entering into these Terms, and are reasonable, fair and equitable in scope to protect our legitimate interests in light of the fact that we are not receiving consideration from you for providing Prysm.
These Terms constitute the entire agreement between you and Prysmatic Labs regarding use of Prysm and will supersede all prior agreements whether, written or oral. No usage of trade or other regular practice or method of dealing between the parties will be used to modify, interpret, supplement, or alter the terms of these Terms.
## Indemnification
To the maximum extent permitted by Applicable Law, you will defend, indemnify and hold each OCL Party harmless from and against any and all claims, actions, suits, investigations, or proceedings by any third party (including any party or purported party to or beneficiary or purported beneficiary of any transaction or other activity on Prysm), as well as any and all losses, liabilities, damages, costs, and expenses (including reasonable attorneys fees and costs) arising out of, accruing from, or in any way related to (i) your breach of the terms of this Agreement, (ii) any transaction, or the failure to occur of any transaction on Prysm, and (iii) your negligence, fraud, or willful misconduct.
## Compliance with Laws
Your use of Prysm is subject to all applicable laws of any governmental authority, including, without limitation, federal, state and foreign securities laws, tax laws, tariff and trade laws, ordinances, judgments, decrees, injunctions, writs and orders or like actions of any governmental authority and rules, regulations, orders, interpretations, licenses, and permits of any federal, regional, state, county, municipal or other governmental authority (collectively, “Applicable Law”) and you agree to comply with all such Applicable Law in your use of Prysm. The users of Prysm are solely responsible to determinate what, if any, taxes apply to their ETH transactions. The owners of, or contributors to, Prysm are not responsible for determining the taxes that apply to ETH transactions.
## Miscellaneous
These Terms will be governed by the laws of the State of Delaware without regard to its conflict of law provisions. With respect to any disputes or claims not subject to arbitration, as set forth in the OCL Terms of Use, you and Offchain Labs submit to the personal and exclusive jurisdiction of the state and federal courts located within New York, New York and waive any objection to such jurisdiction and venue. The failure of Offchain Labs to exercise or enforce any right or provision of these Terms will not constitute a waiver of such right or provision.
We reserve the right to revise these Terms, and your rights and obligations are at all times subject to the then-current Terms provided on Prysm. Your use of Prysm following any such revision to these Terms constitutes acceptance of such revised Terms.
These Terms constitute the entire agreement between you and Offchain Labs regarding use of Prysm and will supersede all prior agreements whether, written or oral. No usage of trade or other regular practice or method of dealing between the parties will be used to modify, interpret, supplement, or alter the terms of these Terms.
If any portion of these Terms is held invalid or unenforceable, such invalidity or enforceability will not affect the other provisions of these Terms, which will remain in full force and effect, and the invalid or unenforceable portion will be given effect to the greatest extent possible. The failure of a party to require performance of any provision will not affect that partys right to require performance at any time thereafter, nor will a waiver of any breach or default of these Terms or any provision of these Terms constitute a waiver of any subsequent breach or default or a waiver of the provision itself.
If any portion of these Terms is held invalid or unenforceable, such invalidity or enforceability will not affect the other provisions of these Terms, which will remain in full force and effect, and the invalid or unenforceable portion will be given effect to the greatest extent possible. The failure of a party to require performance of any provision will not affect that partys right to require performance at any time thereafter, nor will a waiver of any breach or default of these Terms or any provision of these Terms constitute a waiver of any subsequent breach or default or a waiver of the provision itself.

View File

@@ -27,7 +27,23 @@ http_archive(
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
zig_toolchains()
# Temporarily use a nightly build until 0.12.0 is released.
# See: https://github.com/prysmaticlabs/prysm/issues/13130
zig_toolchains(
host_platform_sha256 = {
"linux-aarch64": "45afb8e32adde825165f4f293fcea9ecea503f7f9ec0e9bf4435afe70e67fb70",
"linux-x86_64": "f136c6a8a0f6adcb057d73615fbcd6f88281b3593f7008d5f7ed514ff925c02e",
"macos-aarch64": "05d995853c05243151deff47b60bdc2674f1e794a939eaeca0f42312da031cee",
"macos-x86_64": "721754ba5a50f31e8a1f0e1a74cace26f8246576878ac4a8591b0ee7b6db1fc1",
"windows-x86_64": "93f5248b2ea8c5ee8175e15b1384e133edc1cd49870b3ea259062a2e04164343",
},
url_formats = [
"https://ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
"https://mirror.bazel.build/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
"https://prysmaticlabs.com/mirror/ziglang.org/builds/zig-{host_platform}-{version}.{_ext}",
],
version = "0.12.0-dev.1349+fa022d1ec",
)
# Register zig sdk toolchains with support for Ubuntu 20.04 (Focal Fossa) which has an EOL date of April, 2025.
# For ubuntu glibc support, see https://launchpad.net/ubuntu/+source/glibc
@@ -247,9 +263,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_test_version = "v1.4.0-beta.2-hotfix"
consensus_spec_version = "v1.4.0-beta.2"
consensus_spec_version = "v1.4.0-beta.3"
bls_test_version = "v0.1.1"
@@ -265,8 +279,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "99770a001189f66204a4ef79161c8002bcbbcbd8236f1c6479bd5b83a3c68d42",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_test_version,
sha256 = "67ae5b8fc368853da23d4297e480a4b7f4722fb970d1c7e2b6a5b7faef9cb907",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -281,8 +295,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "56763f6492ee137108271007d62feef60d8e3f1698e53dee4bc4b07e55f7326b",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_test_version,
sha256 = "82474f29fff4abd09fb1e71bafa98827e2573cf0ad02cf119610961831dc3bb5",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -297,8 +311,8 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "bc1cac1a991cdc7426efea14385dcf215df85ed3f0572b824ad6a1d7ca0c89ad",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_test_version,
sha256 = "60e4b6eb6c341daab7ee5614a8e3f28567247c504c593b951bfe919622c8ef8f",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
http_archive(
@@ -312,7 +326,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "c5898001aaab2a5bb38a39ff9d17a52f1f9befcc26e63752cbf556040f0c884e",
sha256 = "fdab9756c93a250219ff6a10d5a9faee1e2e6878a14508410409e307362c6991",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -14,6 +14,7 @@ go_library(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/rpc/apimiddleware:go_default_library",
"//beacon-chain/rpc/eth/beacon:go_default_library",
"//beacon-chain/rpc/eth/config:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/state:go_default_library",
"//consensus-types/interfaces:go_default_library",
@@ -22,7 +23,6 @@ go_library(
"//encoding/ssz/detect:go_default_library",
"//io/file:go_default_library",
"//network/forks:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",

View File

@@ -13,17 +13,16 @@ import (
"strconv"
"text/template"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/network/forks"
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/config"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
log "github.com/sirupsen/logrus"
)
@@ -179,12 +178,12 @@ func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, er
}
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
func (c *Client) GetConfigSpec(ctx context.Context) (*v1.SpecResponse, error) {
func (c *Client) GetConfigSpec(ctx context.Context) (*config.GetSpecResponse, error) {
body, err := c.Get(ctx, getConfigSpecPath)
if err != nil {
return nil, errors.Wrap(err, "error requesting configSpecPath")
}
fsr := &v1.SpecResponse{}
fsr := &config.GetSpecResponse{}
err = json.Unmarshal(body, fsr)
if err != nil {
return nil, err

View File

@@ -8,7 +8,6 @@ go_library(
deps = [
"//api/client:go_default_library",
"//validator/rpc:go_default_library",
"//validator/rpc/apimiddleware:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -9,7 +9,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/client"
"github.com/prysmaticlabs/prysm/v4/validator/rpc"
"github.com/prysmaticlabs/prysm/v4/validator/rpc/apimiddleware"
)
const (
@@ -42,14 +41,14 @@ func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
if err != nil {
return nil, err
}
if len(jsonlocal.Keystores) == 0 && len(jsonremote.Data) == 0 {
if len(jsonlocal.Data) == 0 && len(jsonremote.Data) == 0 {
return nil, errors.New("there are no local keys or remote keys on the validator")
}
hexKeys := make(map[string]bool)
for index := range jsonlocal.Keystores {
hexKeys[jsonlocal.Keystores[index].ValidatingPubkey] = true
for index := range jsonlocal.Data {
hexKeys[jsonlocal.Data[index].ValidatingPubkey] = true
}
for index := range jsonremote.Data {
hexKeys[jsonremote.Data[index].Pubkey] = true
@@ -62,12 +61,12 @@ func (c *Client) GetValidatorPubKeys(ctx context.Context) ([]string, error) {
}
// GetLocalValidatorKeys calls the keymanager APIs for local validator keys
func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*apimiddleware.ListKeystoresResponseJson, error) {
func (c *Client) GetLocalValidatorKeys(ctx context.Context) (*rpc.ListKeystoresResponse, error) {
localBytes, err := c.Get(ctx, localKeysPath, client.WithAuthorizationToken(c.Token()))
if err != nil {
return nil, err
}
jsonlocal := &apimiddleware.ListKeystoresResponseJson{}
jsonlocal := &rpc.ListKeystoresResponse{}
if err := json.Unmarshal(localBytes, jsonlocal); err != nil {
return nil, errors.Wrap(err, "failed to parse local keystore list")
}

View File

@@ -24,6 +24,7 @@ go_library(
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//connectivity:go_default_library",
"@org_golang_google_grpc//credentials:go_default_library",
"@org_golang_google_grpc//credentials/insecure:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -6,8 +6,6 @@ import (
"fmt"
"net"
"net/http"
"path"
"strings"
"time"
"github.com/gorilla/mux"
@@ -19,6 +17,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
var _ runtime.Service = (*Gateway)(nil)
@@ -178,24 +177,6 @@ func (g *Gateway) corsMiddleware(h http.Handler) http.Handler {
return c.Handler(h)
}
const swaggerDir = "proto/prysm/v1alpha1/"
// SwaggerServer returns swagger specification files located under "/swagger/"
func SwaggerServer() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !strings.HasSuffix(r.URL.Path, ".swagger.json") {
log.Debugf("Not found: %s", r.URL.Path)
http.NotFound(w, r)
return
}
log.Debugf("Serving %s\n", r.URL.Path)
p := strings.TrimPrefix(r.URL.Path, "/swagger/")
p = path.Join(swaggerDir, p)
http.ServeFile(w, r, p)
}
}
// dial the gRPC server.
func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientConn, error) {
switch network {
@@ -211,19 +192,21 @@ func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientC
// dialTCP creates a client connection via TCP.
// "addr" must be a valid TCP address with a port number.
func (g *Gateway) dialTCP(ctx context.Context, addr string) (*grpc.ClientConn, error) {
security := grpc.WithInsecure()
var security grpc.DialOption
if len(g.cfg.remoteCert) > 0 {
creds, err := credentials.NewClientTLSFromFile(g.cfg.remoteCert, "")
if err != nil {
return nil, err
}
security = grpc.WithTransportCredentials(creds)
} else {
// Use insecure credentials when there's no remote cert provided.
security = grpc.WithTransportCredentials(insecure.NewCredentials())
}
opts := []grpc.DialOption{
security,
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
}
return grpc.DialContext(ctx, addr, opts...)
}
@@ -240,7 +223,7 @@ func (g *Gateway) dialUnix(ctx context.Context, addr string) (*grpc.ClientConn,
return d(addr, 0)
}
opts := []grpc.DialOption{
grpc.WithInsecure(),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(f),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.cfg.maxCallRecvMsgSize))),
}

View File

@@ -1,19 +0,0 @@
load("//tools:target_migration.bzl", "moved_targets")
moved_targets(
[
":push_images_debug",
":push_images_alpine",
":push_images",
":image_bundle_debug",
":image_debug",
":image_bundle_alpine",
":image_bundle",
":image_with_creation_time",
":image_alpine",
":image",
":go_default_test",
":beacon-chain",
],
"//cmd/beacon-chain",
)

View File

@@ -50,6 +50,7 @@ go_library(
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/execution:go_default_library",
@@ -69,6 +70,7 @@ go_library(
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/payload-attribute:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -12,10 +12,10 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
@@ -45,7 +45,7 @@ type ForkchoiceFetcher interface {
HighestReceivedBlockSlot() primitives.Slot
ReceivedBlocksLastEpoch() (uint64, error)
InsertNode(context.Context, state.BeaconState, [32]byte) error
ForkChoiceDump(context.Context) (*ethpbv1.ForkChoiceDump, error)
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
NewSlot(context.Context, primitives.Slot) error
ProposerBoost() [32]byte
}
@@ -530,3 +530,10 @@ func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (
func (s *Service) BlockBeingSynced(root [32]byte) bool {
return s.blockBeingSynced.isSyncing(root)
}
// RecentBlockSlot returns block slot form fork choice store
func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.Slot(root)
}

View File

@@ -4,8 +4,8 @@ import (
"context"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
)
// CachedHeadRoot returns the corresponding value from Forkchoice
@@ -51,7 +51,7 @@ func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32
}
// ForkChoiceDump returns the corresponding value from forkchoice
func (s *Service) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
func (s *Service) ForkChoiceDump(ctx context.Context) (*forkchoice.Dump, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.ForkChoiceDump(ctx)

View File

@@ -30,6 +30,8 @@ var (
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
)
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
// An invalid block is the block that fails state transition based on the core protocol rules.
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
// Some examples of invalid blocks are:

View File

@@ -157,6 +157,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
if hasAttr && payloadID != nil {
var pId [8]byte
copy(pId[:], payloadID[:])
log.WithFields(logrus.Fields{
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(arg.headRoot[:])),
"headSlot": headBlk.Slot(),
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
}).Info("Forkchoice updated with payload attributes for proposal")
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
log.WithFields(logrus.Fields{

File diff suppressed because one or more lines are too long

View File

@@ -12,7 +12,7 @@ import (
// - Expected KZG commitments match the number of blobs in the block
// - That the number of proofs match the number of blobs
// - That the proofs are verified against the KZG commitments
func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.BlobSidecar) error {
func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.DeprecatedBlobSidecar) error {
if len(commitments) != len(sidecars) {
return fmt.Errorf("could not check data availability, expected %d commitments, obtained %d",
len(commitments), len(sidecars))

View File

@@ -9,7 +9,7 @@ import (
)
func TestIsDataAvailable(t *testing.T) {
sidecars := make([]*ethpb.BlobSidecar, 0)
sidecars := make([]*ethpb.DeprecatedBlobSidecar, 0)
commitments := make([][]byte, 0)
require.NoError(t, IsDataAvailable(commitments, sidecars))
}

View File

@@ -147,15 +147,3 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
log.WithFields(fields).Debug("Synced new payload")
return nil
}
func logBlobSidecar(scs []*ethpb.BlobSidecar, startTime time.Time) {
if len(scs) == 0 {
return
}
log.WithFields(logrus.Fields{
"count": len(scs),
"slot": scs[0].Slot,
"block": hex.EncodeToString(scs[0].BlockRoot),
"validationTime": time.Since(startTime),
}).Debug("Synced new blob sidecars")
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
@@ -164,6 +165,8 @@ func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
}
}
// WithClockSychronizer sets the ClockSetter/ClockWaiter values to be used by services that need to block until
// the genesis timestamp is known (ClockWaiter) or which determine the genesis timestamp (ClockSetter).
func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
return func(s *Service) error {
s.clockSetter = gs
@@ -172,9 +175,18 @@ func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
}
}
// WithSyncComplete sets a channel that is used to notify blockchain service that the node has synced to head.
func WithSyncComplete(c chan struct{}) Option {
return func(s *Service) error {
s.syncComplete = c
return nil
}
}
// WithBlobStorage sets the blob storage backend for the blockchain service.
func WithBlobStorage(b *filesystem.BlobStorage) Option {
return func(s *Service) error {
s.blobStorage = b
return nil
}
}

View File

@@ -2,6 +2,7 @@ package blockchain
import (
"context"
"sync"
"testing"
"time"
@@ -145,6 +146,61 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
}
func TestService_GetAttPreState_Concurrency(t *testing.T) {
service, _ := minimalTestService(t)
ctx := context.Background()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
err = s.SetFinalizedCheckpoint(&ethpb.Checkpoint{Root: ckRoot})
require.NoError(t, err)
val := &ethpb.Validator{PublicKey: bytesutil.PadTo([]byte("foo"), 48), WithdrawalCredentials: bytesutil.PadTo([]byte("bar"), fieldparams.RootLength)}
err = s.SetValidators([]*ethpb.Validator{val})
require.NoError(t, err)
err = s.SetBalances([]uint64{0})
require.NoError(t, err)
r := [32]byte{'g'}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: ckRoot}))
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
var wg sync.WaitGroup
errChan := make(chan error, 1000)
for i := 0; i < 1000; i++ {
wg.Add(1)
go func() {
defer wg.Done()
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}
_, err := service.getAttPreState(ctx, cp1)
if err != nil {
errChan <- err
}
}()
}
go func() {
wg.Wait()
close(errChan)
}()
select {
case <-time.After(10 * time.Second):
t.Fatal("Test timed out")
case err, ok := <-errChan:
if ok && err != nil {
require.ErrorContains(t, "not a checkpoint in forkchoice", err)
}
}
}
func TestStore_SaveCheckpointState(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx

View File

@@ -6,17 +6,17 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
@@ -353,11 +353,15 @@ func (s *Service) databaseDACheck(ctx context.Context, b consensusblocks.ROBlock
if len(commitments) == 0 {
return nil
}
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, b.Root())
missing, err := missingIndices(s.blobStorage, b.Root(), commitments)
if err != nil {
return errors.Wrap(err, "could not get blob sidecars")
return err
}
return kzg.IsDataAvailable(commitments, sidecars)
if len(missing) == 0 {
return nil
}
// TODO: don't worry that this error isn't informative, it will be superceded by a detailed sidecar cache error.
return errors.New("not all kzg commitments are available")
}
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
@@ -529,11 +533,43 @@ func (s *Service) runLateBlockTasks() {
}
}
// missingIndices uses the expected commitments from the block to determine
// which BlobSidecar indices would need to be in the database for DA success.
// It returns a map where each key represents a missing BlobSidecar index.
// An empty map means we have all indices; a non-empty map can be used to compare incoming
// BlobSidecars against the set of known missing sidecars.
func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte) (map[uint64]struct{}, error) {
if len(expected) == 0 {
return nil, nil
}
if len(expected) > fieldparams.MaxBlobsPerBlock {
return nil, errMaxBlobsExceeded
}
indices, err := bs.Indices(root)
if err != nil {
return nil, err
}
missing := make(map[uint64]struct{}, len(expected))
for i := range expected {
ui := uint64(i)
if len(expected[i]) > 0 {
if !indices[i] {
missing[ui] = struct{}{}
}
}
}
return missing, nil
}
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
// The function will first check the database to see if all sidecars have been persisted. If any
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
if signed.Version() < version.Deneb {
return nil
}
t := time.Now()
block := signed.Block()
if block == nil {
@@ -552,59 +588,35 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
if err != nil {
return errors.Wrap(err, "could not get KZG commitments")
}
// expected is the number of kzg commitments observed in the block.
expected := len(kzgCommitments)
if expected == 0 {
return nil
}
// Read first from db in case we have the blobs
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
switch {
case err == nil:
if len(sidecars) >= expected {
s.blobNotifiers.delete(root)
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
log.WithField("root", fmt.Sprintf("%#x", root)).Warn("removing blob sidecars with invalid proofs")
if err2 := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err2 != nil {
log.WithError(err2).Error("could not delete sidecars")
}
return err
}
logBlobSidecar(sidecars, t)
return nil
}
case errors.Is(err, db.ErrNotFound):
// If the blob sidecars haven't arrived yet, the subsequent code will wait for them.
// Note: The system will not exit with an error in this scenario.
default:
log.WithError(err).Error("could not get blob sidecars from DB")
// get a map of BlobSidecar indices that are not currently available.
missing, err := missingIndices(s.blobStorage, root, kzgCommitments)
if err != nil {
return err
}
// If there are no missing indices, all BlobSidecars are available.
if len(missing) == 0 {
return nil
}
found := map[uint64]struct{}{}
for _, sc := range sidecars {
found[sc.Index] = struct{}{}
}
// The gossip handler for blobs writes the index of each verified blob referencing the given
// root to the channel returned by blobNotifiers.forRoot.
nc := s.blobNotifiers.forRoot(root)
for {
select {
case idx := <-nc:
found[idx] = struct{}{}
if len(found) != expected {
// Delete each index seen in the notification channel.
delete(missing, idx)
// Read from the channel until there are no more missing sidecars.
if len(missing) > 0 {
continue
}
// Once all sidecars have been observed, clean up the notification channel.
s.blobNotifiers.delete(root)
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
if err != nil {
return errors.Wrap(err, "could not get blob sidecars")
}
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
log.WithField("root", fmt.Sprintf("%#x", root)).Warn("removing blob sidecars with invalid proofs")
if err2 := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err2 != nil {
log.WithError(err2).Error("could not delete sidecars")
}
return err
}
logBlobSidecar(sidecars, t)
return nil
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")

View File

@@ -18,6 +18,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
@@ -2115,3 +2116,99 @@ func Test_commitmentsToCheck(t *testing.T) {
})
}
}
func TestMissingIndices(t *testing.T) {
cases := []struct {
name string
expected [][]byte
present []uint64
result map[uint64]struct{}
root [32]byte
err error
}{
{
name: "zero len",
},
{
name: "expected exceeds max",
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock + 1),
err: errMaxBlobsExceeded,
},
{
name: "first missing",
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock),
present: []uint64{1, 2, 3, 4, 5},
result: fakeResult([]uint64{0}),
},
{
name: "all missing",
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock),
result: fakeResult([]uint64{0, 1, 2, 3, 4, 5}),
},
{
name: "none missing",
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock),
present: []uint64{0, 1, 2, 3, 4, 5},
result: fakeResult([]uint64{}),
},
{
name: "one commitment, missing",
expected: fakeCommitments(1),
present: []uint64{},
result: fakeResult([]uint64{0}),
},
{
name: "3 commitments, 1 missing",
expected: fakeCommitments(3),
present: []uint64{1},
result: fakeResult([]uint64{0, 2}),
},
{
name: "3 commitments, none missing",
expected: fakeCommitments(3),
present: []uint64{0, 1, 2},
result: fakeResult([]uint64{}),
},
{
name: "3 commitments, all missing",
expected: fakeCommitments(3),
present: []uint64{},
result: fakeResult([]uint64{0, 1, 2}),
},
}
for _, c := range cases {
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
t.Run(c.name, func(t *testing.T) {
require.NoError(t, bm.CreateFakeIndices(c.root, c.present))
missing, err := missingIndices(bs, c.root, c.expected)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, len(c.result), len(missing))
for key := range c.result {
m, ok := missing[key]
require.Equal(t, true, ok)
require.Equal(t, c.result[key], m)
}
})
}
}
func fakeCommitments(n int) [][]byte {
f := make([][]byte, n)
for i := range f {
f[i] = make([]byte, 48)
}
return f
}
func fakeResult(missing []uint64) map[uint64]struct{} {
r := make(map[uint64]struct{}, len(missing))
for i := range missing {
r[missing[i]] = struct{}{}
}
return r
}

View File

@@ -3,7 +3,7 @@ package blockchain
import (
"context"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
)
// SendNewBlobEvent sends a message to the BlobNotifier channel that the blob
@@ -13,11 +13,11 @@ func (s *Service) sendNewBlobEvent(root [32]byte, index uint64) {
}
// ReceiveBlob saves the blob to database and sends the new event
func (s *Service) ReceiveBlob(ctx context.Context, b *ethpb.BlobSidecar) error {
if err := s.cfg.BeaconDB.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{b}); err != nil {
func (s *Service) ReceiveBlob(ctx context.Context, b blocks.VerifiedROBlob) error {
if err := s.blobStorage.Save(b); err != nil {
return err
}
s.sendNewBlobEvent([32]byte(b.BlockRoot), b.Index)
s.sendNewBlobEvent(b.BlockRoot(), b.Index)
return nil
}

View File

@@ -43,7 +43,7 @@ type BlockReceiver interface {
// BlobReceiver interface defines the methods of chain service for receiving new
// blobs
type BlobReceiver interface {
ReceiveBlob(context.Context, *ethpb.BlobSidecar) error
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
@@ -258,11 +258,6 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
return s.hasBlockInInitSyncOrDB(ctx, root)
}
// RecentBlockSlot returns block slot form fork choice store
func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
return s.cfg.ForkChoiceStore.Slot(root)
}
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
s.cfg.ForkChoiceStore.Lock()

View File

@@ -20,6 +20,7 @@ import (
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
f "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
@@ -63,6 +64,7 @@ type Service struct {
syncComplete chan struct{}
blobNotifiers *blobNotifierMap
blockBeingSynced *currentlySyncingBlock
blobStorage *filesystem.BlobStorage
}
// config options for the service.

View File

@@ -446,11 +446,10 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
s := &Service{
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
}
blk := &ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}}
blk := util.NewBeaconBlock()
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
bs := &ethpb.BeaconState{FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)}}
beaconState, err := state_native.InitializeFromProtoPhase0(bs)
beaconState, err := util.NewBeaconState()
require.NoError(b, err)
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))

View File

@@ -56,7 +56,7 @@ func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ ui
return nil
}
func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.SignedBlobSidecar) error {
func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar) error {
mb.broadcastCalled = true
return nil
}

View File

@@ -24,11 +24,11 @@ go_library(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",

View File

@@ -23,11 +23,11 @@ import (
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
forkchoice2 "github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
)
@@ -72,7 +72,7 @@ type ChainService struct {
OptimisticRoots map[[32]byte]bool
BlockSlot primitives.Slot
SyncingRoot [32]byte
Blobs []*ethpb.BlobSidecar
Blobs []blocks.VerifiedROBlob
}
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
@@ -574,7 +574,7 @@ func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, roo
}
// ForkChoiceDump mocks the same method in the chain service
func (s *ChainService) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
func (s *ChainService) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
if s.ForkChoiceStore != nil {
return s.ForkChoiceStore.ForkChoiceDump(ctx)
}
@@ -613,7 +613,7 @@ func (c *ChainService) BlockBeingSynced(root [32]byte) bool {
}
// ReceiveBlob implements the same method in the chain service
func (c *ChainService) ReceiveBlob(_ context.Context, b *ethpb.BlobSidecar) error {
func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) error {
c.Blobs = append(c.Blobs, b)
return nil
}

View File

@@ -1,6 +1,11 @@
package db
import "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
import (
"errors"
"os"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
)
// ErrNotFound can be used to determine if an error from a method in the database package
// represents a "not found" error. These often require different handling than a low-level
@@ -19,3 +24,9 @@ var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
var ErrNotFoundGenesisBlockRoot = kv.ErrNotFoundGenesisBlockRoot
// IsNotFound allows callers to treat errors from a flat-file database, where the file record is missing,
// as equivalent to db.ErrNotFound.
func IsNotFound(err error) bool {
return errors.Is(err, ErrNotFound) || os.IsNotExist(err)
}

View File

@@ -0,0 +1,37 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"blob.go",
"ephemeral.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/blocks:go_default_library",
"//io/file:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/logging:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_spf13_afero//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["blob_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_spf13_afero//:go_default_library",
],
)

View File

@@ -0,0 +1,179 @@
package filesystem
import (
"fmt"
"os"
"path"
"strconv"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/io/file"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/logging"
log "github.com/sirupsen/logrus"
"github.com/spf13/afero"
)
var (
errIndexOutOfBounds = errors.New("blob index in file name > MaxBlobsPerBlock")
)
const (
sszExt = "ssz"
partExt = "part"
directoryPermissions = 0700
)
// NewBlobStorage creates a new instance of the BlobStorage object. Note that the implementation of BlobStorage may
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
// initialized once per beacon node.
func NewBlobStorage(base string) (*BlobStorage, error) {
base = path.Clean(base)
if err := file.MkdirAll(base); err != nil {
return nil, err
}
fs := afero.NewBasePathFs(afero.NewOsFs(), base)
return &BlobStorage{fs: fs}, nil
}
// BlobStorage is the concrete implementation of the filesystem backend for saving and retrieving BlobSidecars.
type BlobStorage struct {
fs afero.Fs
}
// Save saves blobs given a list of sidecars.
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
fname := namerForSidecar(sidecar)
sszPath := fname.path()
exists, err := afero.Exists(bs.fs, sszPath)
if err != nil {
return err
}
if exists {
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("ignoring a duplicate blob sidecar Save attempt")
return nil
}
// Serialize the ethpb.BlobSidecar to binary data using SSZ.
sidecarData, err := sidecar.MarshalSSZ()
if err != nil {
return errors.Wrap(err, "failed to serialize sidecar data")
}
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
return err
}
partPath := fname.partPath()
// Create a partial file and write the serialized data to it.
partialFile, err := bs.fs.Create(partPath)
if err != nil {
return errors.Wrap(err, "failed to create partial file")
}
_, err = partialFile.Write(sidecarData)
if err != nil {
closeErr := partialFile.Close()
if closeErr != nil {
return closeErr
}
return errors.Wrap(err, "failed to write to partial file")
}
err = partialFile.Close()
if err != nil {
return err
}
// Atomically rename the partial file to its final name.
err = bs.fs.Rename(partPath, sszPath)
if err != nil {
return errors.Wrap(err, "failed to rename partial file to final name")
}
return nil
}
// Get retrieves a single BlobSidecar by its root and index.
// Since BlobStorage only writes blobs that have undergone full verification, the return
// value is always a VerifiedROBlob.
func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, error) {
expected := blobNamer{root: root, index: idx}
encoded, err := afero.ReadFile(bs.fs, expected.path())
var v blocks.VerifiedROBlob
if err != nil {
return v, err
}
s := &ethpb.BlobSidecar{}
if err := s.UnmarshalSSZ(encoded); err != nil {
return v, err
}
ro, err := blocks.NewROBlobWithRoot(s, root)
if err != nil {
return blocks.VerifiedROBlob{}, err
}
return verification.BlobSidecarNoop(ro)
}
// Indices generates a bitmap representing which BlobSidecar.Index values are present on disk for a given root.
// This value can be compared to the commitments observed in a block to determine which indices need to be found
// on the network to confirm data availability.
func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]bool, error) {
var mask [fieldparams.MaxBlobsPerBlock]bool
rootDir := blobNamer{root: root}.dir()
entries, err := afero.ReadDir(bs.fs, rootDir)
if err != nil {
if os.IsNotExist(err) {
return mask, nil
}
return mask, err
}
for i := range entries {
if entries[i].IsDir() {
continue
}
name := entries[i].Name()
if !strings.HasSuffix(name, sszExt) {
continue
}
parts := strings.Split(name, ".")
if len(parts) != 2 {
continue
}
u, err := strconv.ParseUint(parts[0], 10, 64)
if err != nil {
return mask, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
}
if u >= fieldparams.MaxBlobsPerBlock {
return mask, errIndexOutOfBounds
}
mask[u] = true
}
return mask, nil
}
type blobNamer struct {
root [32]byte
index uint64
}
func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
}
func (p blobNamer) dir() string {
return fmt.Sprintf("%#x", p.root)
}
func (p blobNamer) fname(ext string) string {
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, ext))
}
func (p blobNamer) partPath() string {
return p.fname(partExt)
}
func (p blobNamer) path() string {
return p.fname(sszExt)
}

View File

@@ -0,0 +1,100 @@
package filesystem
import (
"bytes"
"testing"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/spf13/afero"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
func TestBlobStorage_SaveBlobData(t *testing.T) {
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, fieldparams.MaxBlobsPerBlock)
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
t.Run("no error for duplicate", func(t *testing.T) {
fs, bs := NewEphemeralBlobStorageWithFs(t)
existingSidecar := testSidecars[0]
blobPath := namerForSidecar(existingSidecar).path()
// Serialize the existing BlobSidecar to binary data.
existingSidecarData, err := ssz.MarshalSSZ(existingSidecar)
require.NoError(t, err)
require.NoError(t, bs.Save(existingSidecar))
// No error when attempting to write twice.
require.NoError(t, bs.Save(existingSidecar))
content, err := afero.ReadFile(fs, blobPath)
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(existingSidecarData, content))
// Deserialize the BlobSidecar from the saved file data.
savedSidecar := &ethpb.BlobSidecar{}
err = savedSidecar.UnmarshalSSZ(content)
require.NoError(t, err)
// Compare the original Sidecar and the saved Sidecar.
require.DeepSSZEqual(t, existingSidecar.BlobSidecar, savedSidecar)
})
t.Run("indices", func(t *testing.T) {
bs := NewEphemeralBlobStorage(t)
sc := testSidecars[2]
require.NoError(t, bs.Save(sc))
actualSc, err := bs.Get(sc.BlockRoot(), sc.Index)
require.NoError(t, err)
expectedIdx := [fieldparams.MaxBlobsPerBlock]bool{false, false, true}
actualIdx, err := bs.Indices(actualSc.BlockRoot())
require.NoError(t, err)
require.Equal(t, expectedIdx, actualIdx)
})
t.Run("round trip write then read", func(t *testing.T) {
bs := NewEphemeralBlobStorage(t)
err := bs.Save(testSidecars[0])
require.NoError(t, err)
expected := testSidecars[0]
actual, err := bs.Get(expected.BlockRoot(), expected.Index)
require.NoError(t, err)
require.DeepSSZEqual(t, expected, actual)
})
}
func TestBlobIndicesBounds(t *testing.T) {
fs, bs := NewEphemeralBlobStorageWithFs(t)
root := [32]byte{}
okIdx := uint64(fieldparams.MaxBlobsPerBlock - 1)
writeFakeSSZ(t, fs, root, okIdx)
indices, err := bs.Indices(root)
require.NoError(t, err)
var expected [fieldparams.MaxBlobsPerBlock]bool
expected[okIdx] = true
for i := range expected {
require.Equal(t, expected[i], indices[i])
}
oobIdx := uint64(fieldparams.MaxBlobsPerBlock)
writeFakeSSZ(t, fs, root, oobIdx)
_, err = bs.Indices(root)
require.ErrorIs(t, err, errIndexOutOfBounds)
}
func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, idx uint64) {
namer := blobNamer{root: root, index: idx}
require.NoError(t, fs.MkdirAll(namer.dir(), 0700))
fh, err := fs.Create(namer.path())
require.NoError(t, err)
_, err = fh.Write([]byte("derp"))
require.NoError(t, err)
require.NoError(t, fh.Close())
}

View File

@@ -0,0 +1,53 @@
package filesystem
import (
"testing"
"github.com/spf13/afero"
)
// NewEphemeralBlobStorage should only be used for tests.
// The instance of BlobStorage returned is backed by an in-memory virtual filesystem,
// improving test performance and simplifying cleanup.
func NewEphemeralBlobStorage(_ testing.TB) *BlobStorage {
return &BlobStorage{fs: afero.NewMemMapFs()}
}
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
// in order to interact with it outside the parameters of the BlobStorage api.
func NewEphemeralBlobStorageWithFs(_ testing.TB) (afero.Fs, *BlobStorage) {
fs := afero.NewMemMapFs()
return fs, &BlobStorage{fs: fs}
}
type BlobMocker struct {
fs afero.Fs
bs *BlobStorage
}
// CreateFakeIndices creates empty blob sidecar files at the expected path for the given
// root and indices to influence the result of Indices().
func (bm *BlobMocker) CreateFakeIndices(root [32]byte, indices []uint64) error {
for i := range indices {
n := blobNamer{root: root, index: indices[i]}
if err := bm.fs.MkdirAll(n.dir(), directoryPermissions); err != nil {
return err
}
f, err := bm.fs.Create(n.path())
if err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
}
return nil
}
// NewEpehmeralBlobStorageWithMocker returns a *BlobMocker value in addition to the BlobStorage value.
// BlockMocker encapsulates things blob path construction to avoid leaking implementation details.
func NewEphemeralBlobStorageWithMocker(_ testing.TB) (*BlobMocker, *BlobStorage) {
fs := afero.NewMemMapFs()
bs := &BlobStorage{fs: fs}
return &BlobMocker{fs: fs, bs: bs}, bs
}

View File

@@ -56,8 +56,8 @@ type ReadOnlyDatabase interface {
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
// Blob operations.
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error)
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error)
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error)
// origin checkpoint sync support
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
@@ -95,7 +95,7 @@ type NoHeadAccessDatabase interface {
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
// Blob operations.
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.DeprecatedBlobSidecar) error
DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error

View File

@@ -124,6 +124,7 @@ go_test(
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_golang_snappy//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
"@io_etcd_go_bbolt//:go_default_library",

View File

@@ -55,7 +55,7 @@ func (rk blobRotatingKey) BlockRoot() []byte {
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
// in the rotating keys buffer, we overwrite all elements for that slot. Otherwise, we merge the blob with an existing one.
// Trying to replace a newer blob with an older one is an error.
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) error {
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.DeprecatedBlobSidecar) error {
if len(scs) == 0 {
return errEmptySidecar
}
@@ -123,7 +123,7 @@ func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) e
// validUniqueSidecars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and
// there are no more than MAX_BLOBS_PER_BLOCK sidecars.
func validUniqueSidecars(scs []*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error) {
func validUniqueSidecars(scs []*ethpb.DeprecatedBlobSidecar) ([]*ethpb.DeprecatedBlobSidecar, error) {
if len(scs) == 0 {
return nil, errEmptySidecar
}
@@ -167,7 +167,7 @@ func validUniqueSidecars(scs []*ethpb.BlobSidecar) ([]*ethpb.BlobSidecar, error)
}
// sortSidecars sorts the sidecars by their index.
func sortSidecars(scs []*ethpb.BlobSidecar) {
func sortSidecars(scs []*ethpb.DeprecatedBlobSidecar) {
sort.Slice(scs, func(i, j int) bool {
return scs[i].Index < scs[j].Index
})
@@ -178,7 +178,7 @@ func sortSidecars(scs []*ethpb.BlobSidecar) {
// Otherwise, the result will be filtered to only include the specified indices.
// An error will result if an invalid index is specified.
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsByRoot")
defer span.End()
@@ -207,7 +207,7 @@ func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices .
return filterForIndices(sc, indices...)
}
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
if len(indices) == 0 {
return sc.Sidecars, nil
}
@@ -215,7 +215,7 @@ func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobS
// in ascending order from eg 0..3, without gaps. This allows us to assume the indices argument
// maps 1:1 with indices in the BlobSidecars storage object.
maxIdx := uint64(len(sc.Sidecars)) - 1
sidecars := make([]*ethpb.BlobSidecar, len(indices))
sidecars := make([]*ethpb.DeprecatedBlobSidecar, len(indices))
for i, idx := range indices {
if idx > maxIdx {
return nil, errors.Wrapf(ErrNotFound, "BlobSidecars missing index: index %d", idx)
@@ -230,7 +230,7 @@ func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobS
// Otherwise, the result will be filtered to only include the specified indices.
// An error will result if an invalid index is specified.
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsBySlot")
defer span.End()
@@ -281,7 +281,7 @@ func (s *Store) DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
func blobSidecarKey(blob *ethpb.BlobSidecar) blobRotatingKey {
func blobSidecarKey(blob *ethpb.DeprecatedBlobSidecar) blobRotatingKey {
key := slotKey(blob.Slot)
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
key = append(key, blob.BlockRoot...)

View File

@@ -22,7 +22,7 @@ import (
bolt "go.etcd.io/bbolt"
)
func equalBlobSlices(expect []*ethpb.BlobSidecar, got []*ethpb.BlobSidecar) error {
func equalBlobSlices(expect []*ethpb.DeprecatedBlobSidecar, got []*ethpb.DeprecatedBlobSidecar) error {
if len(expect) != len(got) {
return fmt.Errorf("mismatched lengths, expect=%d, got=%d", len(expect), len(got))
}
@@ -80,7 +80,7 @@ func TestStore_BlobSidecars(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
}
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
@@ -94,7 +94,7 @@ func TestStore_BlobSidecars(t *testing.T) {
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
// we'll request indices 0 and 3, so make a slice with those indices for comparison
expect := make([]*ethpb.BlobSidecar, 2)
expect := make([]*ethpb.DeprecatedBlobSidecar, 2)
expect[0] = scs[0]
expect[1] = scs[3]
@@ -136,7 +136,7 @@ func TestStore_BlobSidecars(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
}
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
@@ -150,7 +150,7 @@ func TestStore_BlobSidecars(t *testing.T) {
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
// we'll request indices 0 and 3, so make a slice with those indices for comparison
expect := make([]*ethpb.BlobSidecar, 2)
expect := make([]*ethpb.DeprecatedBlobSidecar, 2)
expect[0] = scs[0]
expect[1] = scs[3]
@@ -191,11 +191,11 @@ func TestStore_BlobSidecars(t *testing.T) {
for i := 0; i < fieldparams.MaxBlobsPerBlock; i++ {
scs[i].Slot = primitives.Slot(i)
scs[i].BlockRoot = bytesutil.PadTo([]byte{byte(i)}, 32)
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{scs[i]}))
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{scs[i]}))
br := bytesutil.ToBytes32(scs[i].BlockRoot)
saved, err := db.BlobSidecarsByRoot(ctx, br)
require.NoError(t, err)
require.NoError(t, equalBlobSlices([]*ethpb.BlobSidecar{scs[i]}, saved))
require.NoError(t, equalBlobSlices([]*ethpb.DeprecatedBlobSidecar{scs[i]}, saved))
}
})
t.Run("saving a new blob for rotation (batch)", func(t *testing.T) {
@@ -226,7 +226,7 @@ func TestStore_BlobSidecars(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
}
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
@@ -238,7 +238,7 @@ func TestStore_BlobSidecars(t *testing.T) {
sc.Slot = sc.Slot + newRetentionSlot
}
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
}
_, err = db.BlobSidecarsBySlot(ctx, 100)
@@ -264,7 +264,7 @@ func TestStore_BlobSidecars(t *testing.T) {
sc.Slot = sc.Slot + newRetentionSlot
}
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
}
_, err = db.BlobSidecarsBySlot(ctx, 100)
@@ -278,7 +278,7 @@ func TestStore_BlobSidecars(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
}
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
@@ -304,8 +304,8 @@ func TestStore_BlobSidecars(t *testing.T) {
eScs := generateEquivocatingBlobSidecars(t, fieldparams.MaxBlobsPerBlock/2)
for i, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{sc}))
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{eScs[i]}))
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{eScs[i]}))
}
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
@@ -318,15 +318,15 @@ func TestStore_BlobSidecars(t *testing.T) {
})
}
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
blobSidecars := make([]*ethpb.BlobSidecar, n)
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.DeprecatedBlobSidecar {
blobSidecars := make([]*ethpb.DeprecatedBlobSidecar, n)
for i := uint64(0); i < n; i++ {
blobSidecars[i] = generateBlobSidecar(t, i)
}
return blobSidecars
}
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.DeprecatedBlobSidecar {
blob := make([]byte, 131072)
_, err := rand.Read(blob)
require.NoError(t, err)
@@ -336,7 +336,7 @@ func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
kzgProof := make([]byte, 48)
_, err = rand.Read(kzgProof)
require.NoError(t, err)
return &ethpb.BlobSidecar{
return &ethpb.DeprecatedBlobSidecar{
BlockRoot: bytesutil.PadTo([]byte{'a'}, 32),
Index: index,
Slot: 100,
@@ -348,15 +348,15 @@ func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
}
}
func generateEquivocatingBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
blobSidecars := make([]*ethpb.BlobSidecar, n)
func generateEquivocatingBlobSidecars(t *testing.T, n uint64) []*ethpb.DeprecatedBlobSidecar {
blobSidecars := make([]*ethpb.DeprecatedBlobSidecar, n)
for i := uint64(0); i < n; i++ {
blobSidecars[i] = generateEquivocatingBlobSidecar(t, i)
}
return blobSidecars
}
func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.DeprecatedBlobSidecar {
blob := make([]byte, 131072)
_, err := rand.Read(blob)
require.NoError(t, err)
@@ -367,7 +367,7 @@ func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSide
_, err = rand.Read(kzgProof)
require.NoError(t, err)
return &ethpb.BlobSidecar{
return &ethpb.DeprecatedBlobSidecar{
BlockRoot: bytesutil.PadTo([]byte{'c'}, 32),
Index: index,
Slot: 100,
@@ -382,16 +382,16 @@ func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSide
func Test_validUniqueSidecars_validation(t *testing.T) {
tests := []struct {
name string
scs []*ethpb.BlobSidecar
scs []*ethpb.DeprecatedBlobSidecar
err error
}{
{name: "empty", scs: []*ethpb.BlobSidecar{}, err: errEmptySidecar},
{name: "empty", scs: []*ethpb.DeprecatedBlobSidecar{}, err: errEmptySidecar},
{name: "too many sidecars", scs: generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock+1), err: errBlobSidecarLimit},
{name: "invalid slot", scs: []*ethpb.BlobSidecar{{Slot: 1}, {Slot: 2}}, err: errBlobSlotMismatch},
{name: "invalid proposer index", scs: []*ethpb.BlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, err: errBlobProposerMismatch},
{name: "invalid root", scs: []*ethpb.BlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, err: errBlobRootMismatch},
{name: "invalid parent root", scs: []*ethpb.BlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, err: errBlobParentMismatch},
{name: "happy path", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 1}}},
{name: "invalid slot", scs: []*ethpb.DeprecatedBlobSidecar{{Slot: 1}, {Slot: 2}}, err: errBlobSlotMismatch},
{name: "invalid proposer index", scs: []*ethpb.DeprecatedBlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, err: errBlobProposerMismatch},
{name: "invalid root", scs: []*ethpb.DeprecatedBlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, err: errBlobRootMismatch},
{name: "invalid parent root", scs: []*ethpb.DeprecatedBlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, err: errBlobParentMismatch},
{name: "happy path", scs: []*ethpb.DeprecatedBlobSidecar{{Index: 0}, {Index: 1}}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -408,43 +408,43 @@ func Test_validUniqueSidecars_validation(t *testing.T) {
func Test_validUniqueSidecars_dedup(t *testing.T) {
cases := []struct {
name string
scs []*ethpb.BlobSidecar
expected []*ethpb.BlobSidecar
scs []*ethpb.DeprecatedBlobSidecar
expected []*ethpb.DeprecatedBlobSidecar
err error
}{
{
name: "duplicate sidecar",
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 1}},
expected: []*ethpb.BlobSidecar{{Index: 1}},
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 1}},
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}},
},
{
name: "single sidecar",
scs: []*ethpb.BlobSidecar{{Index: 1}},
expected: []*ethpb.BlobSidecar{{Index: 1}},
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}},
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}},
},
{
name: "multiple duplicates",
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
},
{
name: "ok number after de-dupe, > 6 before",
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
},
{
name: "max unique, no dupes",
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
expected: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
},
{
name: "too many unique",
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
err: errBlobSidecarLimit,
},
{
name: "too many unique with dupes",
scs: []*ethpb.BlobSidecar{{Index: 1}, {Index: 1}, {Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 1}, {Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
err: errBlobSidecarLimit,
},
}
@@ -462,7 +462,7 @@ func Test_validUniqueSidecars_dedup(t *testing.T) {
}
func TestStore_sortSidecars(t *testing.T) {
scs := []*ethpb.BlobSidecar{
scs := []*ethpb.DeprecatedBlobSidecar{
{Index: 6},
{Index: 4},
{Index: 2},
@@ -480,7 +480,7 @@ func TestStore_sortSidecars(t *testing.T) {
func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
s := setupDB(b)
ctx := context.Background()
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{
{BlockRoot: bytesutil.PadTo([]byte{'a'}, 32), Slot: 0},
}))
@@ -490,7 +490,7 @@ func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
r := make([]byte, 32)
_, err := rand.Read(r)
require.NoError(b, err)
scs := []*ethpb.BlobSidecar{
scs := []*ethpb.DeprecatedBlobSidecar{
{BlockRoot: r, Slot: primitives.Slot(i)},
}
k := blobSidecarKey(scs[0])
@@ -502,7 +502,7 @@ func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
})
require.NoError(b, err)
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{
{BlockRoot: bytesutil.PadTo([]byte{'b'}, 32), Slot: 131071},
}))
@@ -529,7 +529,7 @@ func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
}
func TestBlobRotatingKey(t *testing.T) {
k := blobSidecarKey(&ethpb.BlobSidecar{
k := blobSidecarKey(&ethpb.DeprecatedBlobSidecar{
Slot: 1,
BlockRoot: []byte{2},
})

View File

@@ -11,7 +11,7 @@ import (
var maxEpochsToPersistBlobs = params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
// ConfigureBlobRetentionEpoch sets the for blob retention based on command-line context. It sets the local config `maxEpochsToPersistBlobs`.
// ConfigureBlobRetentionEpoch sets the epoch for blob retention based on command-line context. It sets the local config `maxEpochsToPersistBlobs`.
// If the flag is not set, the spec default `MinEpochsForBlobsSidecarsRequest` is used.
// An error if the input epoch is smaller than the spec default value.
func ConfigureBlobRetentionEpoch(cliCtx *cli.Context) error {

View File

@@ -35,5 +35,5 @@ func TestConfigureBlobRetentionEpoch(t *testing.T) {
require.NoError(t, set.Set(flags.BlobRetentionEpoch.Name, strconv.FormatUint(minEpochsForSidecarRequest-1, 10)))
cliCtx = cli.NewContext(&app, set, nil)
err := ConfigureBlobRetentionEpoch(cliCtx)
require.ErrorContains(t, "extend-blob-retention-epoch smaller than spec default", err)
require.ErrorContains(t, "blob-retention-epochs smaller than spec default", err)
}

View File

@@ -1,7 +1,12 @@
package kv
import (
"io"
"os"
"testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/sirupsen/logrus"
)
func init() {
@@ -10,3 +15,9 @@ func init() {
panic(err)
}
}
func TestMain(m *testing.M) {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(io.Discard)
os.Exit(m.Run())
}

View File

@@ -17,8 +17,8 @@ go_library(
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/eth/v1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -31,9 +31,9 @@ go_library(
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
@@ -69,11 +69,11 @@ go_test(
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/forkchoice:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -12,9 +12,9 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
forkchoice2 "github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
@@ -475,7 +475,7 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
// `blocks`. This slice must be ordered from child to parent. It includes all
// blocks **except** the first one (that is the one with the highest slot
// number). All blocks are assumed to be a strict chain
// where blocks[i].Parent = blocks[i+1]. Also we assume that the parent of the
// where blocks[i].Parent = blocks[i+1]. Also, we assume that the parent of the
// last block in this list is already included in forkchoice store.
func (f *ForkChoice) InsertChain(ctx context.Context, chain []*forkchoicetypes.BlockAndCheckpoints) error {
if len(chain) == 0 {
@@ -554,24 +554,24 @@ func (f *ForkChoice) UnrealizedJustifiedPayloadBlockHash() [32]byte {
}
// ForkChoiceDump returns a full dump of forkchoice.
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceDump, error) {
jc := &v1.Checkpoint{
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*forkchoice2.Dump, error) {
jc := &ethpb.Checkpoint{
Epoch: f.store.justifiedCheckpoint.Epoch,
Root: f.store.justifiedCheckpoint.Root[:],
}
ujc := &v1.Checkpoint{
ujc := &ethpb.Checkpoint{
Epoch: f.store.unrealizedJustifiedCheckpoint.Epoch,
Root: f.store.unrealizedJustifiedCheckpoint.Root[:],
}
fc := &v1.Checkpoint{
fc := &ethpb.Checkpoint{
Epoch: f.store.finalizedCheckpoint.Epoch,
Root: f.store.finalizedCheckpoint.Root[:],
}
ufc := &v1.Checkpoint{
ufc := &ethpb.Checkpoint{
Epoch: f.store.unrealizedFinalizedCheckpoint.Epoch,
Root: f.store.unrealizedFinalizedCheckpoint.Root[:],
}
nodes := make([]*v1.ForkChoiceNode, 0, f.NodeCount())
nodes := make([]*forkchoice2.Node, 0, f.NodeCount())
var err error
if f.store.treeRootNode != nil {
nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
@@ -583,7 +583,7 @@ func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceDump, er
if f.store.headNode != nil {
headRoot = f.store.headNode.root
}
resp := &v1.ForkChoiceDump{
resp := &forkchoice2.Dump{
JustifiedCheckpoint: jc,
UnrealizedJustifiedCheckpoint: ujc,
FinalizedCheckpoint: fc,

View File

@@ -780,9 +780,9 @@ func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, viable)
st, broot, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, root, [32]byte{'B'}, 0, 0)
st, bRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, root, [32]byte{'B'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, broot))
require.NoError(t, f.InsertNode(ctx, st, bRoot))
// Epoch start
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
@@ -794,55 +794,55 @@ func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
require.Equal(t, false, viable)
// No Children but impossible checkpoint
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
st, croot, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, broot, [32]byte{'C'}, 0, 0)
st, cRoot, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, bRoot, [32]byte{'C'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, croot))
require.NoError(t, f.InsertNode(ctx, st, cRoot))
// Children in same epoch
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
st, droot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, broot, [32]byte{'D'}, 0, 0)
st, dRoot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, bRoot, [32]byte{'D'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, droot))
require.NoError(t, f.InsertNode(ctx, st, dRoot))
// Children in next epoch but boundary
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
require.NoError(t, err)
require.Equal(t, false, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, false, viable)
// Boundary block
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: droot, Epoch: 1})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: dRoot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: droot, Epoch: 0})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: dRoot, Epoch: 0})
require.NoError(t, err)
require.Equal(t, false, viable)
// Children in next epoch
st, eroot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, broot, [32]byte{'E'}, 0, 0)
st, eRoot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, bRoot, [32]byte{'E'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, st, eroot))
require.NoError(t, f.InsertNode(ctx, st, eRoot))
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
require.NoError(t, err)
require.Equal(t, true, viable)
}

View File

@@ -6,8 +6,8 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/config/params"
forkchoice2 "github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
@@ -151,7 +151,7 @@ func (n *Node) arrivedAfterOrphanCheck(genesisTime uint64) (bool, error) {
}
// nodeTreeDump appends to the given list all the nodes descending from this one
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]*v1.ForkChoiceNode, error) {
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*forkchoice2.Node) ([]*forkchoice2.Node, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
@@ -159,7 +159,7 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]
if n.parent != nil {
parentRoot = n.parent.root
}
thisNode := &v1.ForkChoiceNode{
thisNode := &forkchoice2.Node{
Slot: n.slot,
BlockRoot: n.root[:],
ParentRoot: parentRoot[:],
@@ -174,9 +174,9 @@ func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]
Timestamp: n.timestamp,
}
if n.optimistic {
thisNode.Validity = v1.ForkChoiceNodeValidity_OPTIMISTIC
thisNode.Validity = forkchoice2.Optimistic
} else {
thisNode.Validity = v1.ForkChoiceNodeValidity_VALID
thisNode.Validity = forkchoice2.Valid
}
nodes = append(nodes, thisNode)

View File

@@ -5,8 +5,8 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
@@ -86,7 +86,7 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
// Input child is the best descendant
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
@@ -99,7 +99,7 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
// Input child is the best descendant
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
@@ -119,7 +119,7 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
// Input child is the best descendant
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
@@ -237,7 +237,7 @@ func TestNode_SetFullyValidated(t *testing.T) {
require.NoError(t, err)
require.Equal(t, false, opt)
respNodes := make([]*v1.ForkChoiceNode, 0)
respNodes := make([]*forkchoice.Node, 0)
respNodes, err = f.store.treeRootNode.nodeTreeDump(ctx, respNodes)
require.NoError(t, err)
require.Equal(t, len(respNodes), f.NodeCount())

View File

@@ -156,9 +156,9 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
// Ancestors have the added weights of their children. Genesis is a special exception at 0 weight,
require.Equal(t, f.store.treeRootNode.weight, uint64(0))
// Proposer boost score with this tests parameters is 8
// Proposer boost score with these test parameters is 8
// Each of the nodes received one attestation accounting for 10.
// Node D is the only one with a proposer boost still applied:
// Node D is the only one with proposer boost still applied:
//
// (1: 48) -> (2: 38) -> (3: 10)
// \--------------->(4: 18)

View File

@@ -28,7 +28,7 @@ const orphanLateBlockProposingEarly = 2
// 3- The beacon node is serving a validator that will propose during the next
// slot.
//
// This function only applies an heuristic to decide if the beacon will update
// This function only applies a heuristic to decide if the beacon will update
// the engine's view of head with the parent block or the incoming block. It
// does not guarantee an attempted reorg. This will only be decided later at
// proposal time by calling GetProposerHead.

View File

@@ -61,7 +61,7 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
}
// insert registers a new block node to the fork choice store's node list.
// It then updates the new node's parent with best child and descendant node.
// It then updates the new node's parent with the best child and descendant node.
func (s *Store) insert(ctx context.Context,
slot primitives.Slot,
root, parentRoot, payloadHash [fieldparams.RootLength]byte,

View File

@@ -82,7 +82,7 @@ func TestStore_Head_Itself(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(context.Background(), state, blkRoot))
// Since the justified node does not have a best descendant so the best node
// Since the justified node does not have a best descendant, the best node
// is itself.
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: indexToHash(1)}
h, err := f.store.head(context.Background())
@@ -213,7 +213,7 @@ func TestStore_Prune_ReturnEarly(t *testing.T) {
require.Equal(t, nodeCount, f.NodeCount())
}
// This unit tests starts with a simple branch like this
// This unit test starts with a simple branch like this
//
// - 1
// /

View File

@@ -298,7 +298,7 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
// Check that the justification point is not the parent's.
// This tests that the heuristics in pullTips did not apply and
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
@@ -315,7 +315,7 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
// Check that the justification point is not the parent's.
// This tests that the heuristics in pullTips did not apply and
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
@@ -331,7 +331,7 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
require.NoError(tt, err)
require.NoError(tt, f.InsertNode(ctx, st, root))
// Check that the justification point is not the parent's.
// This tests that the heuristics in pullTips did not apply and
// This test checks that the heuristics in pullTips did not apply and
// the test continues to compute a bogus unrealized
// justification
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)

View File

@@ -6,8 +6,8 @@ import (
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
forkchoice2 "github.com/prysmaticlabs/prysm/v4/consensus-types/forkchoice"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
)
// BalancesByRooter is a handler to obtain the effective balances of the state
@@ -62,7 +62,7 @@ type Getter interface {
NodeCount() int
HighestReceivedBlockSlot() primitives.Slot
ReceivedBlocksLastEpoch() (uint64, error)
ForkChoiceDump(context.Context) (*v1.ForkChoiceDump, error)
ForkChoiceDump(context.Context) (*forkchoice2.Dump, error)
Weight(root [32]byte) (uint64, error)
Tips() ([][32]byte, []primitives.Slot)
IsOptimistic(root [32]byte) (bool, error)

View File

@@ -53,12 +53,8 @@ func DefaultConfig(enableDebugRPCEndpoints bool, httpModules string) MuxConfig {
if flags.EnableHTTPEthAPI(httpModules) {
ethRegistrations := []gateway.PbHandlerRegistration{
ethpbservice.RegisterBeaconChainHandler,
ethpbservice.RegisterBeaconValidatorHandler,
ethpbservice.RegisterEventsHandler,
}
if enableDebugRPCEndpoints {
ethRegistrations = append(ethRegistrations, ethpbservice.RegisterBeaconDebugHandler)
}
ethMux := gwruntime.NewServeMux(
gwruntime.WithMarshalerOption(gwruntime.MIMEWildcard, &gwruntime.HTTPBodyMarshaler{
Marshaler: &gwruntime.JSONPb{

View File

@@ -9,39 +9,12 @@ import (
)
func TestDefaultConfig(t *testing.T) {
t.Run("Without debug endpoints", func(t *testing.T) {
cfg := DefaultConfig(false, "eth,prysm")
assert.NotNil(t, cfg.EthPbMux.Mux)
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
assert.Equal(t, "/internal/eth/v2/", cfg.EthPbMux.Patterns[1])
assert.Equal(t, 3, len(cfg.EthPbMux.Registrations))
assert.NotNil(t, cfg.V1AlphaPbMux.Mux)
require.Equal(t, 2, len(cfg.V1AlphaPbMux.Patterns))
assert.Equal(t, "/eth/v1alpha1/", cfg.V1AlphaPbMux.Patterns[0])
assert.Equal(t, "/eth/v1alpha2/", cfg.V1AlphaPbMux.Patterns[1])
assert.Equal(t, 4, len(cfg.V1AlphaPbMux.Registrations))
})
t.Run("With debug endpoints", func(t *testing.T) {
cfg := DefaultConfig(true, "eth,prysm")
assert.NotNil(t, cfg.EthPbMux.Mux)
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
assert.Equal(t, "/internal/eth/v2/", cfg.EthPbMux.Patterns[1])
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
assert.NotNil(t, cfg.V1AlphaPbMux.Mux)
require.Equal(t, 2, len(cfg.V1AlphaPbMux.Patterns))
assert.Equal(t, "/eth/v1alpha1/", cfg.V1AlphaPbMux.Patterns[0])
assert.Equal(t, "/eth/v1alpha2/", cfg.V1AlphaPbMux.Patterns[1])
assert.Equal(t, 5, len(cfg.V1AlphaPbMux.Registrations))
})
t.Run("Without Prysm API", func(t *testing.T) {
cfg := DefaultConfig(true, "eth")
assert.NotNil(t, cfg.EthPbMux.Mux)
require.Equal(t, 2, len(cfg.EthPbMux.Patterns))
assert.Equal(t, "/internal/eth/v1/", cfg.EthPbMux.Patterns[0])
assert.Equal(t, 4, len(cfg.EthPbMux.Registrations))
assert.Equal(t, 2, len(cfg.EthPbMux.Registrations))
assert.Equal(t, (*gateway.PbMux)(nil), cfg.V1AlphaPbMux)
})
t.Run("Without Eth API", func(t *testing.T) {

View File

@@ -24,6 +24,7 @@ go_library(
"//beacon-chain/cache/depositcache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/db/slasherkv:go_default_library",
"//beacon-chain/deterministic-genesis:go_default_library",
@@ -85,6 +86,7 @@ go_test(
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/execution/testing:go_default_library",
"//beacon-chain/monitor:go_default_library",

View File

@@ -40,7 +40,7 @@ func configureHistoricalSlasher(cliCtx *cli.Context) error {
return err
}
cmdConfig := cmd.Get()
// Allow up to 4096 attestations at a time to be requested from the beacon nde.
// Allow up to 4096 attestations at a time to be requested from the beacon node.
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations)) // lint:ignore uintcast -- Page size should not exceed int64 with these constants.
cmd.Init(cmdConfig)
log.Warnf(

View File

@@ -26,6 +26,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositsnapshot"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/slasherkv"
interopcoldstart "github.com/prysmaticlabs/prysm/v4/beacon-chain/deterministic-genesis"
@@ -112,6 +113,7 @@ type BeaconNode struct {
forkChoicer forkchoice.ForkChoicer
clockWaiter startup.ClockWaiter
initialSyncComplete chan struct{}
BlobStorage *filesystem.BlobStorage
}
// New creates a new node instance, sets up configuration options, and registers
@@ -200,6 +202,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
if err != nil {
return nil, err
}
log.Debugln("Starting DB")
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
return nil, err
@@ -639,6 +642,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithProposerIdsCache(b.proposerIdsCache),
blockchain.WithClockSynchronizer(gs),
blockchain.WithSyncComplete(syncComplete),
blockchain.WithBlobStorage(b.BlobStorage),
)
blockchainService, err := blockchain.NewService(b.ctx, opts...)
@@ -717,6 +721,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}) erro
regularsync.WithClockWaiter(b.clockWaiter),
regularsync.WithInitialSyncComplete(initialSyncComplete),
regularsync.WithStateNotifier(b),
regularsync.WithBlobStorage(b.BlobStorage),
)
return b.services.RegisterService(rs)
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/monitor"
@@ -70,7 +71,8 @@ func TestNodeStart_Ok(t *testing.T) {
ctx := cli.NewContext(&app, set, nil)
node, err := New(ctx, WithBlockchainFlagOptions([]blockchain.Option{}),
WithBuilderFlagOptions([]builder.Option{}),
WithExecutionChainOptions([]execution.Option{}))
WithExecutionChainOptions([]execution.Option{}),
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
require.NoError(t, err)
node.services = &runtime.ServiceRegistry{}
go func() {
@@ -148,11 +150,12 @@ func TestClearDB(t *testing.T) {
set.String("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A", "fee recipient")
require.NoError(t, set.Set("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A"))
context := cli.NewContext(&app, set, nil)
_, err = New(context, WithExecutionChainOptions([]execution.Option{
execution.WithHttpEndpoint(endpoint),
}))
options := []Option{
WithExecutionChainOptions([]execution.Option{execution.WithHttpEndpoint(endpoint)}),
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
}
_, err = New(context, options...)
require.NoError(t, err)
require.LogsContain(t, hook, "Removing database")
}

View File

@@ -3,6 +3,7 @@ package node
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
)
@@ -32,3 +33,11 @@ func WithBuilderFlagOptions(opts []builder.Option) Option {
return nil
}
}
// WithBlobStorage sets the BlobStorage backend for the BeaconNode
func WithBlobStorage(bs *filesystem.BlobStorage) Option {
return func(bn *BeaconNode) error {
bn.BlobStorage = bs
return nil
}
}

View File

@@ -32,7 +32,6 @@ go_test(
name = "go_default_test",
srcs = [
"aggregated_test.go",
"benchmark_test.go",
"block_test.go",
"forkchoice_test.go",
"seen_bits_test.go",

View File

@@ -1,20 +0,0 @@
package kv_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations/kv"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
)
func BenchmarkAttCaches(b *testing.B) {
ac := kv.NewAttCaches()
att := &ethpb.Attestation{}
for i := 0; i < b.N; i++ {
assert.NoError(b, ac.SaveUnaggregatedAttestation(att))
assert.NoError(b, ac.DeleteAggregatedAttestation(att))
}
}

View File

@@ -204,7 +204,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
// BroadcastBlob broadcasts a blob to the p2p network, the message is assumed to be
// broadcasted to the current fork and to the input subnet.
func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error {
func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error {
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
defer span.End()
if blob == nil {
@@ -223,7 +223,7 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
return nil
}
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar, forkDigest [4]byte) {
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
_, span := trace.StartSpan(ctx, "p2p.broadcastBlob")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.

View File

@@ -469,18 +469,18 @@ func TestService_BroadcastBlob(t *testing.T) {
}),
}
blobSidecar := &ethpb.SignedBlobSidecar{
Message: &ethpb.BlobSidecar{
BlockRoot: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength),
Index: 1,
Slot: 2,
BlockParentRoot: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength),
ProposerIndex: 3,
Blob: bytesutil.PadTo([]byte{'C'}, fieldparams.BlobLength),
KzgCommitment: bytesutil.PadTo([]byte{'D'}, fieldparams.BLSPubkeyLength),
KzgProof: bytesutil.PadTo([]byte{'E'}, fieldparams.BLSPubkeyLength),
},
Signature: bytesutil.PadTo([]byte{'F'}, fieldparams.BLSSignatureLength),
header := util.HydrateSignedBeaconHeader(&ethpb.SignedBeaconBlockHeader{})
commitmentInclusionProof := make([][]byte, 17)
for i := range commitmentInclusionProof {
commitmentInclusionProof[i] = bytesutil.PadTo([]byte{}, 32)
}
blobSidecar := &ethpb.BlobSidecar{
Index: 1,
Blob: bytesutil.PadTo([]byte{'C'}, fieldparams.BlobLength),
KzgCommitment: bytesutil.PadTo([]byte{'D'}, fieldparams.BLSPubkeyLength),
KzgProof: bytesutil.PadTo([]byte{'E'}, fieldparams.BLSPubkeyLength),
SignedBlockHeader: header,
CommitmentInclusionProof: commitmentInclusionProof,
}
subnet := uint64(0)
@@ -508,7 +508,7 @@ func TestService_BroadcastBlob(t *testing.T) {
incomingMessage, err := sub.Next(ctx)
require.NoError(t, err)
result := &ethpb.SignedBlobSidecar{}
result := &ethpb.BlobSidecar{}
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
require.DeepEqual(t, result, blobSidecar)
}(t)

View File

@@ -21,7 +21,7 @@ var gossipTopicMappings = map[string]proto.Message{
SyncContributionAndProofSubnetTopicFormat: &ethpb.SignedContributionAndProof{},
SyncCommitteeSubnetTopicFormat: &ethpb.SyncCommitteeMessage{},
BlsToExecutionChangeSubnetTopicFormat: &ethpb.SignedBLSToExecutionChange{},
BlobSubnetTopicFormat: &ethpb.SignedBlobSidecar{},
BlobSubnetTopicFormat: &ethpb.BlobSidecar{},
}
// GossipTopicMappings is a function to return the assigned data type

View File

@@ -35,7 +35,7 @@ type Broadcaster interface {
Broadcast(context.Context, proto.Message) error
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
}
// SetStreamHandler configures p2p to handle streams of a certain topic ID.

View File

@@ -144,7 +144,7 @@ func (_ *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *
}
// BroadcastBlob -- fake.
func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.SignedBlobSidecar) error {
func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar) error {
return nil
}

View File

@@ -43,7 +43,7 @@ func (m *MockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uin
}
// BroadcastBlob broadcasts a blob for mock.
func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.BlobSidecar) error {
m.BroadcastCalled.Store(true)
return nil
}

View File

@@ -178,7 +178,7 @@ func (p *TestP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *
}
// BroadcastBlob broadcasts a blob for mock.
func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.BlobSidecar) error {
p.BroadcastCalled.Store(true)
return nil
}

View File

@@ -28,8 +28,10 @@ go_library(
"//beacon-chain/rpc/eth/beacon:go_default_library",
"//beacon-chain/rpc/eth/blob:go_default_library",
"//beacon-chain/rpc/eth/builder:go_default_library",
"//beacon-chain/rpc/eth/config:go_default_library",
"//beacon-chain/rpc/eth/debug:go_default_library",
"//beacon-chain/rpc/eth/events:go_default_library",
"//beacon-chain/rpc/eth/light-client:go_default_library",
"//beacon-chain/rpc/eth/node:go_default_library",
"//beacon-chain/rpc/eth/rewards:go_default_library",
"//beacon-chain/rpc/eth/validator:go_default_library",
@@ -73,6 +75,7 @@ go_test(
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/execution/testing:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/sync/initial-sync/testing:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -12,14 +12,10 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware",
visibility = ["//visibility:public"],
deps = [
"//api:go_default_library",
"//api/gateway/apimiddleware:go_default_library",
"//api/grpc:go_default_library",
"//beacon-chain/rpc/eth/events:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network/http:go_default_library",
"//proto/eth/v2:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
@@ -37,16 +33,12 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//api:go_default_library",
"//api/gateway/apimiddleware:go_default_library",
"//api/grpc:go_default_library",
"//beacon-chain/rpc/eth/events:go_default_library",
"//config/params:go_default_library",
"//proto/eth/v2:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//time/slots:go_default_library",
"@com_github_gogo_protobuf//types:go_default_library",
"@com_github_r3labs_sse_v2//:go_default_library",
],
)

View File

@@ -2,178 +2,17 @@ package apimiddleware
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"github.com/prysmaticlabs/prysm/v4/api"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/api/grpc"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/r3labs/sse/v2"
)
type sszConfig struct {
fileName string
responseJson SszResponse
}
func handleProduceBlockSSZ(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
config := sszConfig{
fileName: "produce_beacon_block.ssz",
responseJson: &VersionedSSZResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
func handleProduceBlindedBlockSSZ(
m *apimiddleware.ApiProxyMiddleware,
endpoint apimiddleware.Endpoint,
w http.ResponseWriter,
req *http.Request,
) (handled bool) {
config := sszConfig{
fileName: "produce_blinded_beacon_block.ssz",
responseJson: &VersionedSSZResponseJson{},
}
return handleGetSSZ(m, endpoint, w, req, config)
}
func handleGetSSZ(
m *apimiddleware.ApiProxyMiddleware,
endpoint apimiddleware.Endpoint,
w http.ResponseWriter,
req *http.Request,
config sszConfig,
) (handled bool) {
ssz := http2.SszRequested(req)
if !ssz {
return false
}
if errJson := prepareSSZRequestForProxying(m, endpoint, req); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
grpcResponse, errJson := m.ProxyRequest(req)
if errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
grpcResponseBody, errJson := apimiddleware.ReadGrpcResponseBody(grpcResponse.Body)
if errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
respHasError, errJson := apimiddleware.HandleGrpcResponseError(endpoint.Err, grpcResponse, grpcResponseBody, w)
if errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
if respHasError {
return true
}
if errJson := apimiddleware.DeserializeGrpcResponseBodyIntoContainer(grpcResponseBody, config.responseJson); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
respVersion, responseSsz, errJson := serializeMiddlewareResponseIntoSSZ(config.responseJson)
if errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
if errJson := writeSSZResponseHeaderAndBody(grpcResponse, w, responseSsz, respVersion, config.fileName); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
if errJson := apimiddleware.Cleanup(grpcResponse.Body); errJson != nil {
apimiddleware.WriteError(w, errJson, nil)
return true
}
return true
}
func prepareSSZRequestForProxying(m *apimiddleware.ApiProxyMiddleware, endpoint apimiddleware.Endpoint, req *http.Request) apimiddleware.ErrorJson {
req.URL.Scheme = "http"
req.URL.Host = m.GatewayAddress
req.RequestURI = ""
if errJson := apimiddleware.HandleURLParameters(endpoint.Path, req, endpoint.RequestURLLiterals); errJson != nil {
return errJson
}
if errJson := apimiddleware.HandleQueryParameters(req, endpoint.RequestQueryParams); errJson != nil {
return errJson
}
// We have to add new segments after handling parameters because it changes URL segment indexing.
req.URL.Path = "/internal" + req.URL.Path + "/ssz"
return nil
}
func preparePostedSSZData(req *http.Request) apimiddleware.ErrorJson {
buf, err := io.ReadAll(req.Body)
if err != nil {
return apimiddleware.InternalServerErrorWithMessage(err, "could not read body")
}
j := SszRequestJson{Data: base64.StdEncoding.EncodeToString(buf)}
data, err := json.Marshal(j)
if err != nil {
return apimiddleware.InternalServerErrorWithMessage(err, "could not prepare POST data")
}
req.Body = io.NopCloser(bytes.NewBuffer(data))
req.ContentLength = int64(len(data))
req.Header.Set("Content-Type", api.JsonMediaType)
return nil
}
func serializeMiddlewareResponseIntoSSZ(respJson SszResponse) (version string, ssz []byte, errJson apimiddleware.ErrorJson) {
// Serialize the SSZ part of the deserialized value.
data, err := base64.StdEncoding.DecodeString(respJson.SSZData())
if err != nil {
return "", nil, apimiddleware.InternalServerErrorWithMessage(err, "could not decode response body into base64")
}
return strings.ToLower(respJson.SSZVersion()), data, nil
}
func writeSSZResponseHeaderAndBody(grpcResp *http.Response, w http.ResponseWriter, respSsz []byte, respVersion, fileName string) apimiddleware.ErrorJson {
var statusCodeHeader string
for h, vs := range grpcResp.Header {
// We don't want to expose any gRPC metadata in the HTTP response, so we skip forwarding metadata headers.
if strings.HasPrefix(h, "Grpc-Metadata") {
if h == "Grpc-Metadata-"+grpc.HttpCodeMetadataKey {
statusCodeHeader = vs[0]
}
} else {
for _, v := range vs {
w.Header().Set(h, v)
}
}
}
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
w.Header().Set("Content-Type", api.OctetStreamMediaType)
w.Header().Set("Content-Disposition", "attachment; filename="+fileName)
w.Header().Set(api.VersionHeader, respVersion)
if statusCodeHeader != "" {
code, err := strconv.Atoi(statusCodeHeader)
if err != nil {
return apimiddleware.InternalServerErrorWithMessage(err, "could not parse status code")
}
w.WriteHeader(code)
} else {
w.WriteHeader(grpcResp.StatusCode)
}
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(respSsz))); err != nil {
return apimiddleware.InternalServerErrorWithMessage(err, "could not write response message")
}
return nil
}
func handleEvents(m *apimiddleware.ApiProxyMiddleware, _ apimiddleware.Endpoint, w http.ResponseWriter, req *http.Request) (handled bool) {
sseClient := sse.NewClient("http://" + m.GatewayAddress + "/internal" + req.URL.RequestURI())
sseClient.Headers["Grpc-Timeout"] = "0S"

View File

@@ -4,165 +4,16 @@ import (
"bytes"
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v4/api"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/api/grpc"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/r3labs/sse/v2"
)
type testSSZResponseJson struct {
Version string `json:"version"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data string `json:"data"`
}
func (t testSSZResponseJson) SSZVersion() string {
return t.Version
}
func (t testSSZResponseJson) SSZOptimistic() bool {
return t.ExecutionOptimistic
}
func (t testSSZResponseJson) SSZData() string {
return t.Data
}
func (t testSSZResponseJson) SSZFinalized() bool {
return t.Finalized
}
func TestPrepareSSZRequestForProxying(t *testing.T) {
middleware := &apimiddleware.ApiProxyMiddleware{
GatewayAddress: "http://apimiddleware.example",
}
endpoint := apimiddleware.Endpoint{
Path: "http://foo.example",
}
var body bytes.Buffer
request := httptest.NewRequest("GET", "http://foo.example", &body)
errJson := prepareSSZRequestForProxying(middleware, endpoint, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, "/internal/ssz", request.URL.Path)
}
func TestPreparePostedSszData(t *testing.T) {
var body bytes.Buffer
body.Write([]byte("body"))
request := httptest.NewRequest("POST", "http://foo.example", &body)
preparePostedSSZData(request)
assert.Equal(t, int64(19), request.ContentLength)
assert.Equal(t, api.JsonMediaType, request.Header.Get("Content-Type"))
}
func TestSerializeMiddlewareResponseIntoSSZ(t *testing.T) {
t.Run("ok", func(t *testing.T) {
j := testSSZResponseJson{
Version: "Version",
Data: "Zm9v",
}
v, ssz, errJson := serializeMiddlewareResponseIntoSSZ(j)
require.Equal(t, true, errJson == nil)
assert.DeepEqual(t, []byte("foo"), ssz)
assert.Equal(t, "version", v)
})
t.Run("invalid_data", func(t *testing.T) {
j := testSSZResponseJson{
Version: "Version",
Data: "invalid",
}
_, _, errJson := serializeMiddlewareResponseIntoSSZ(j)
require.Equal(t, false, errJson == nil)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode response body into base64"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestWriteSSZResponseHeaderAndBody(t *testing.T) {
responseSsz := []byte("ssz")
version := "version"
fileName := "test.ssz"
t.Run("ok", func(t *testing.T) {
response := &http.Response{
Header: http.Header{
"Foo": []string{"foo"},
"Grpc-Metadata-" + grpc.HttpCodeMetadataKey: []string{"204"},
},
}
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
errJson := writeSSZResponseHeaderAndBody(response, writer, responseSsz, version, fileName)
require.Equal(t, true, errJson == nil)
v, ok := writer.Header()["Foo"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "foo", v[0])
v, ok = writer.Header()["Content-Length"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "3", v[0])
v, ok = writer.Header()["Content-Type"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, api.OctetStreamMediaType, v[0])
v, ok = writer.Header()["Content-Disposition"]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "attachment; filename=test.ssz", v[0])
v, ok = writer.Header()[api.VersionHeader]
require.Equal(t, true, ok, "header not found")
require.Equal(t, 1, len(v), "wrong number of header values")
assert.Equal(t, "version", v[0])
assert.Equal(t, 204, writer.Code)
})
t.Run("no_grpc_status_code_header", func(t *testing.T) {
response := &http.Response{
Header: http.Header{},
StatusCode: 204,
}
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
errJson := writeSSZResponseHeaderAndBody(response, writer, responseSsz, version, fileName)
require.Equal(t, true, errJson == nil)
assert.Equal(t, 204, writer.Code)
})
t.Run("invalid_status_code", func(t *testing.T) {
response := &http.Response{
Header: http.Header{
"Foo": []string{"foo"},
"Grpc-Metadata-" + grpc.HttpCodeMetadataKey: []string{"invalid"},
},
}
responseSsz := []byte("ssz")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
errJson := writeSSZResponseHeaderAndBody(response, writer, responseSsz, version, fileName)
require.Equal(t, false, errJson == nil)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not parse status code"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestReceiveEvents(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
ch := make(chan *sse.Event)

View File

@@ -3,17 +3,14 @@ package apimiddleware
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
@@ -278,178 +275,3 @@ func preparePublishedBlindedBlock(endpoint *apimiddleware.Endpoint, _ http.Respo
}
return apimiddleware.InternalServerError(errors.New("unsupported block type"))
}
type phase0ProduceBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BeaconBlockJson `json:"data"`
}
type altairProduceBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BeaconBlockAltairJson `json:"data"`
}
type bellatrixProduceBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BeaconBlockBellatrixJson `json:"data"`
}
type capellaProduceBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BeaconBlockCapellaJson `json:"data"`
}
type denebProduceBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BeaconBlockContentsDenebJson `json:"data"`
}
type bellatrixProduceBlindedBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BlindedBeaconBlockBellatrixJson `json:"data"`
}
type capellaProduceBlindedBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BlindedBeaconBlockCapellaJson `json:"data"`
}
type denebProduceBlindedBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BlindedBeaconBlockContentsDenebJson `json:"data"`
}
func serializeProducedV2Block(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
respContainer, ok := response.(*ProduceBlockResponseV2Json)
if !ok {
return false, nil, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
}
var actualRespContainer interface{}
switch {
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
actualRespContainer = &phase0ProduceBlockResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.Phase0Block,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
actualRespContainer = &altairProduceBlockResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.AltairBlock,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
actualRespContainer = &bellatrixProduceBlockResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.BellatrixBlock,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
actualRespContainer = &capellaProduceBlockResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.CapellaBlock,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
actualRespContainer = &denebProduceBlockResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.DenebContents,
}
default:
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
}
j, err := json.Marshal(actualRespContainer)
if err != nil {
return false, nil, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal response")
}
return false, j, nil
}
func serializeProducedBlindedBlock(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
respContainer, ok := response.(*ProduceBlindedBlockResponseJson)
if !ok {
return false, nil, apimiddleware.InternalServerError(errors.New("container is not of the correct type"))
}
var actualRespContainer interface{}
switch {
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_PHASE0.String())):
actualRespContainer = &phase0ProduceBlockResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.Phase0Block,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_ALTAIR.String())):
actualRespContainer = &altairProduceBlockResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.AltairBlock,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_BELLATRIX.String())):
actualRespContainer = &bellatrixProduceBlindedBlockResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.BellatrixBlock,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_CAPELLA.String())):
actualRespContainer = &capellaProduceBlindedBlockResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.CapellaBlock,
}
case strings.EqualFold(respContainer.Version, strings.ToLower(ethpbv2.Version_DENEB.String())):
actualRespContainer = &denebProduceBlindedBlockResponseJson{
Version: respContainer.Version,
Data: respContainer.Data.DenebContents,
}
default:
return false, nil, apimiddleware.InternalServerError(fmt.Errorf("unsupported block version '%s'", respContainer.Version))
}
j, err := json.Marshal(actualRespContainer)
if err != nil {
return false, nil, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal response")
}
return false, j, nil
}
func prepareForkChoiceResponse(response interface{}) (apimiddleware.RunDefault, []byte, apimiddleware.ErrorJson) {
dump, ok := response.(*ForkChoiceDumpJson)
if !ok {
return false, nil, apimiddleware.InternalServerError(errors.New("response is not of the correct type"))
}
nodes := make([]*ForkChoiceNodeResponseJson, len(dump.ForkChoiceNodes))
for i, n := range dump.ForkChoiceNodes {
nodes[i] = &ForkChoiceNodeResponseJson{
Slot: n.Slot,
BlockRoot: n.BlockRoot,
ParentRoot: n.ParentRoot,
JustifiedEpoch: n.JustifiedEpoch,
FinalizedEpoch: n.FinalizedEpoch,
Weight: n.Weight,
Validity: n.Validity,
ExecutionBlockHash: n.ExecutionBlockHash,
ExtraData: &ForkChoiceNodeExtraDataJson{
UnrealizedJustifiedEpoch: n.UnrealizedJustifiedEpoch,
UnrealizedFinalizedEpoch: n.UnrealizedFinalizedEpoch,
Balance: n.Balance,
ExecutionOptimistic: n.ExecutionOptimistic,
TimeStamp: n.TimeStamp,
},
}
}
forkChoice := &ForkChoiceResponseJson{
JustifiedCheckpoint: dump.JustifiedCheckpoint,
FinalizedCheckpoint: dump.FinalizedCheckpoint,
ForkChoiceNodes: nodes,
ExtraData: &ForkChoiceResponseExtraDataJson{
BestJustifiedCheckpoint: dump.BestJustifiedCheckpoint,
UnrealizedJustifiedCheckpoint: dump.UnrealizedJustifiedCheckpoint,
UnrealizedFinalizedCheckpoint: dump.UnrealizedFinalizedCheckpoint,
ProposerBoostRoot: dump.ProposerBoostRoot,
PreviousProposerBoostRoot: dump.PreviousProposerBoostRoot,
HeadRoot: dump.HeadRoot,
},
}
result, err := json.Marshal(forkChoice)
if err != nil {
return false, nil, apimiddleware.InternalServerError(errors.New("could not marshal fork choice to JSON"))
}
return false, result, nil
}

View File

@@ -9,10 +9,8 @@ import (
"strings"
"testing"
"github.com/gogo/protobuf/types"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/config/params"
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/time/slots"
@@ -359,437 +357,3 @@ func TestPreparePublishedBlindedBlock(t *testing.T) {
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block type"))
})
}
func TestSerializeProducedV2Block(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
response := &ProduceBlockResponseV2Json{
Version: ethpbv2.Version_PHASE0.String(),
Data: &BeaconBlockContainerV2Json{
Phase0Block: &BeaconBlockJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &BeaconBlockBodyJson{},
},
},
}
runDefault, j, errJson := serializeProducedV2Block(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &phase0ProduceBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
require.NotNil(t, resp.Data)
beaconBlock := resp.Data
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
require.NotNil(t, beaconBlock.Body)
})
t.Run("Altair", func(t *testing.T) {
response := &ProduceBlockResponseV2Json{
Version: ethpbv2.Version_ALTAIR.String(),
Data: &BeaconBlockContainerV2Json{
AltairBlock: &BeaconBlockAltairJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &BeaconBlockBodyAltairJson{},
},
},
}
runDefault, j, errJson := serializeProducedV2Block(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &altairProduceBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
require.NotNil(t, resp.Data)
beaconBlock := resp.Data
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
require.NotNil(t, beaconBlock.Body)
})
t.Run("Bellatrix", func(t *testing.T) {
response := &ProduceBlockResponseV2Json{
Version: ethpbv2.Version_BELLATRIX.String(),
Data: &BeaconBlockContainerV2Json{
BellatrixBlock: &BeaconBlockBellatrixJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &BeaconBlockBodyBellatrixJson{},
},
},
}
runDefault, j, errJson := serializeProducedV2Block(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &capellaProduceBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
require.NotNil(t, resp.Data)
beaconBlock := resp.Data
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
require.NotNil(t, beaconBlock.Body)
})
t.Run("Capella", func(t *testing.T) {
response := &ProduceBlockResponseV2Json{
Version: ethpbv2.Version_CAPELLA.String(),
Data: &BeaconBlockContainerV2Json{
CapellaBlock: &BeaconBlockCapellaJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &BeaconBlockBodyCapellaJson{},
},
},
}
runDefault, j, errJson := serializeProducedV2Block(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &capellaProduceBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
require.NotNil(t, resp.Data)
beaconBlock := resp.Data
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
require.NotNil(t, beaconBlock.Body)
})
t.Run("Deneb", func(t *testing.T) {
response := &ProduceBlockResponseV2Json{
Version: ethpbv2.Version_DENEB.String(),
Data: &BeaconBlockContainerV2Json{
DenebContents: &BeaconBlockContentsDenebJson{
Block: &BeaconBlockDenebJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &BeaconBlockBodyDenebJson{},
},
BlobSidecars: []*BlobSidecarJson{{
BlockRoot: "root",
Index: "1",
Slot: "1",
BlockParentRoot: "root",
ProposerIndex: "1",
Blob: "blob",
KzgCommitment: "kzgcommitment",
KzgProof: "kzgproof",
}},
},
},
}
runDefault, j, errJson := serializeProducedV2Block(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &denebProduceBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
require.NotNil(t, resp.Data)
beaconBlock := resp.Data.Block
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
assert.NotNil(t, beaconBlock.Body)
require.Equal(t, 1, len(resp.Data.BlobSidecars))
sidecar := resp.Data.BlobSidecars[0]
assert.Equal(t, "root", sidecar.BlockRoot)
assert.Equal(t, "1", sidecar.Index)
assert.Equal(t, "1", sidecar.Slot)
assert.Equal(t, "root", sidecar.BlockParentRoot)
assert.Equal(t, "1", sidecar.ProposerIndex)
assert.Equal(t, "blob", sidecar.Blob)
assert.Equal(t, "kzgcommitment", sidecar.KzgCommitment)
assert.Equal(t, "kzgproof", sidecar.KzgProof)
})
t.Run("incorrect response type", func(t *testing.T) {
response := &types.Empty{}
runDefault, j, errJson := serializeProducedV2Block(response)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.Equal(t, 0, len(j))
require.NotNil(t, errJson)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "container is not of the correct type"))
})
t.Run("unsupported block version", func(t *testing.T) {
response := &ProduceBlockResponseV2Json{
Version: "unsupported",
}
runDefault, j, errJson := serializeProducedV2Block(response)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.Equal(t, 0, len(j))
require.NotNil(t, errJson)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block version"))
})
}
func TestSerializeProduceBlindedBlock(t *testing.T) {
t.Run("Phase 0", func(t *testing.T) {
response := &ProduceBlindedBlockResponseJson{
Version: ethpbv2.Version_PHASE0.String(),
Data: &BlindedBeaconBlockContainerJson{
Phase0Block: &BeaconBlockJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &BeaconBlockBodyJson{},
},
AltairBlock: nil,
},
}
runDefault, j, errJson := serializeProducedBlindedBlock(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &phase0ProduceBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
require.NotNil(t, resp.Data)
beaconBlock := resp.Data
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
require.NotNil(t, beaconBlock.Body)
})
t.Run("Altair", func(t *testing.T) {
response := &ProduceBlindedBlockResponseJson{
Version: ethpbv2.Version_ALTAIR.String(),
Data: &BlindedBeaconBlockContainerJson{
AltairBlock: &BeaconBlockAltairJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &BeaconBlockBodyAltairJson{},
},
},
}
runDefault, j, errJson := serializeProducedBlindedBlock(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &altairProduceBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
require.NotNil(t, resp.Data)
beaconBlock := resp.Data
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
require.NotNil(t, beaconBlock.Body)
})
t.Run("Bellatrix", func(t *testing.T) {
response := &ProduceBlindedBlockResponseJson{
Version: ethpbv2.Version_BELLATRIX.String(),
Data: &BlindedBeaconBlockContainerJson{
BellatrixBlock: &BlindedBeaconBlockBellatrixJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &BlindedBeaconBlockBodyBellatrixJson{},
},
},
}
runDefault, j, errJson := serializeProducedBlindedBlock(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &bellatrixProduceBlindedBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
require.NotNil(t, resp.Data)
beaconBlock := resp.Data
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
require.NotNil(t, beaconBlock.Body)
})
t.Run("Capella", func(t *testing.T) {
response := &ProduceBlindedBlockResponseJson{
Version: ethpbv2.Version_CAPELLA.String(),
Data: &BlindedBeaconBlockContainerJson{
CapellaBlock: &BlindedBeaconBlockCapellaJson{
Slot: "1",
ProposerIndex: "1",
ParentRoot: "root",
StateRoot: "root",
Body: &BlindedBeaconBlockBodyCapellaJson{},
},
},
}
runDefault, j, errJson := serializeProducedBlindedBlock(response)
require.Equal(t, nil, errJson)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.NotNil(t, j)
resp := &capellaProduceBlindedBlockResponseJson{}
require.NoError(t, json.Unmarshal(j, resp))
require.NotNil(t, resp.Data)
beaconBlock := resp.Data
assert.Equal(t, "1", beaconBlock.Slot)
assert.Equal(t, "1", beaconBlock.ProposerIndex)
assert.Equal(t, "root", beaconBlock.ParentRoot)
assert.Equal(t, "root", beaconBlock.StateRoot)
require.NotNil(t, beaconBlock.Body)
})
t.Run("incorrect response type", func(t *testing.T) {
response := &types.Empty{}
runDefault, j, errJson := serializeProducedV2Block(response)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.Equal(t, 0, len(j))
require.NotNil(t, errJson)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "container is not of the correct type"))
})
t.Run("unsupported block version", func(t *testing.T) {
response := &ProduceBlockResponseV2Json{
Version: "unsupported",
}
runDefault, j, errJson := serializeProducedV2Block(response)
require.Equal(t, apimiddleware.RunDefault(false), runDefault)
require.Equal(t, 0, len(j))
require.NotNil(t, errJson)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "unsupported block version"))
})
}
func TestPrepareForkChoiceResponse(t *testing.T) {
dump := &ForkChoiceDumpJson{
JustifiedCheckpoint: &CheckpointJson{
Epoch: "justified",
Root: "justified",
},
FinalizedCheckpoint: &CheckpointJson{
Epoch: "finalized",
Root: "finalized",
},
BestJustifiedCheckpoint: &CheckpointJson{
Epoch: "best_justified",
Root: "best_justified",
},
UnrealizedJustifiedCheckpoint: &CheckpointJson{
Epoch: "unrealized_justified",
Root: "unrealized_justified",
},
UnrealizedFinalizedCheckpoint: &CheckpointJson{
Epoch: "unrealized_finalized",
Root: "unrealized_finalized",
},
ProposerBoostRoot: "proposer_boost_root",
PreviousProposerBoostRoot: "previous_proposer_boost_root",
HeadRoot: "head_root",
ForkChoiceNodes: []*ForkChoiceNodeJson{
{
Slot: "node1_slot",
BlockRoot: "node1_block_root",
ParentRoot: "node1_parent_root",
JustifiedEpoch: "node1_justified_epoch",
FinalizedEpoch: "node1_finalized_epoch",
UnrealizedJustifiedEpoch: "node1_unrealized_justified_epoch",
UnrealizedFinalizedEpoch: "node1_unrealized_finalized_epoch",
Balance: "node1_balance",
Weight: "node1_weight",
ExecutionOptimistic: false,
ExecutionBlockHash: "node1_execution_block_hash",
TimeStamp: "node1_time_stamp",
Validity: "node1_validity",
},
{
Slot: "node2_slot",
BlockRoot: "node2_block_root",
ParentRoot: "node2_parent_root",
JustifiedEpoch: "node2_justified_epoch",
FinalizedEpoch: "node2_finalized_epoch",
UnrealizedJustifiedEpoch: "node2_unrealized_justified_epoch",
UnrealizedFinalizedEpoch: "node2_unrealized_finalized_epoch",
Balance: "node2_balance",
Weight: "node2_weight",
ExecutionOptimistic: true,
ExecutionBlockHash: "node2_execution_block_hash",
TimeStamp: "node2_time_stamp",
Validity: "node2_validity",
},
},
}
runDefault, j, errorJson := prepareForkChoiceResponse(dump)
assert.Equal(t, nil, errorJson)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
result := &ForkChoiceResponseJson{}
require.NoError(t, json.Unmarshal(j, result))
require.NotNil(t, result)
assert.Equal(t, "justified", result.JustifiedCheckpoint.Epoch)
assert.Equal(t, "justified", result.JustifiedCheckpoint.Root)
assert.Equal(t, "finalized", result.FinalizedCheckpoint.Epoch)
assert.Equal(t, "finalized", result.FinalizedCheckpoint.Root)
assert.Equal(t, "best_justified", result.ExtraData.BestJustifiedCheckpoint.Epoch)
assert.Equal(t, "best_justified", result.ExtraData.BestJustifiedCheckpoint.Root)
assert.Equal(t, "unrealized_justified", result.ExtraData.UnrealizedJustifiedCheckpoint.Epoch)
assert.Equal(t, "unrealized_justified", result.ExtraData.UnrealizedJustifiedCheckpoint.Root)
assert.Equal(t, "unrealized_finalized", result.ExtraData.UnrealizedFinalizedCheckpoint.Epoch)
assert.Equal(t, "unrealized_finalized", result.ExtraData.UnrealizedFinalizedCheckpoint.Root)
assert.Equal(t, "proposer_boost_root", result.ExtraData.ProposerBoostRoot)
assert.Equal(t, "previous_proposer_boost_root", result.ExtraData.PreviousProposerBoostRoot)
assert.Equal(t, "head_root", result.ExtraData.HeadRoot)
require.Equal(t, 2, len(result.ForkChoiceNodes))
node1 := result.ForkChoiceNodes[0]
require.NotNil(t, node1)
assert.Equal(t, "node1_slot", node1.Slot)
assert.Equal(t, "node1_block_root", node1.BlockRoot)
assert.Equal(t, "node1_parent_root", node1.ParentRoot)
assert.Equal(t, "node1_justified_epoch", node1.JustifiedEpoch)
assert.Equal(t, "node1_finalized_epoch", node1.FinalizedEpoch)
assert.Equal(t, "node1_unrealized_justified_epoch", node1.ExtraData.UnrealizedJustifiedEpoch)
assert.Equal(t, "node1_unrealized_finalized_epoch", node1.ExtraData.UnrealizedFinalizedEpoch)
assert.Equal(t, "node1_balance", node1.ExtraData.Balance)
assert.Equal(t, "node1_weight", node1.Weight)
assert.Equal(t, false, node1.ExtraData.ExecutionOptimistic)
assert.Equal(t, "node1_execution_block_hash", node1.ExecutionBlockHash)
assert.Equal(t, "node1_time_stamp", node1.ExtraData.TimeStamp)
assert.Equal(t, "node1_validity", node1.Validity)
node2 := result.ForkChoiceNodes[1]
require.NotNil(t, node2)
assert.Equal(t, "node2_slot", node2.Slot)
assert.Equal(t, "node2_block_root", node2.BlockRoot)
assert.Equal(t, "node2_parent_root", node2.ParentRoot)
assert.Equal(t, "node2_justified_epoch", node2.JustifiedEpoch)
assert.Equal(t, "node2_finalized_epoch", node2.FinalizedEpoch)
assert.Equal(t, "node2_unrealized_justified_epoch", node2.ExtraData.UnrealizedJustifiedEpoch)
assert.Equal(t, "node2_unrealized_finalized_epoch", node2.ExtraData.UnrealizedFinalizedEpoch)
assert.Equal(t, "node2_balance", node2.ExtraData.Balance)
assert.Equal(t, "node2_weight", node2.Weight)
assert.Equal(t, true, node2.ExtraData.ExecutionOptimistic)
assert.Equal(t, "node2_execution_block_hash", node2.ExecutionBlockHash)
assert.Equal(t, "node2_time_stamp", node2.ExtraData.TimeStamp)
assert.Equal(t, "node2_validity", node2.Validity)
}

View File

@@ -16,18 +16,8 @@ func (f *BeaconEndpointFactory) IsNil() bool {
// Paths is a collection of all valid beacon chain API paths.
func (_ *BeaconEndpointFactory) Paths() []string {
return []string{
"/eth/v1/beacon/pool/attester_slashings",
"/eth/v1/beacon/pool/proposer_slashings",
"/eth/v1/beacon/weak_subjectivity",
"/eth/v1/debug/beacon/heads",
"/eth/v2/debug/beacon/heads",
"/eth/v1/debug/fork_choice",
"/eth/v1/config/fork_schedule",
"/eth/v1/config/spec",
"/eth/v1/events",
"/eth/v1/validator/blocks/{slot}",
"/eth/v2/validator/blocks/{slot}",
"/eth/v1/validator/blinded_blocks/{slot}",
}
}
@@ -35,49 +25,10 @@ func (_ *BeaconEndpointFactory) Paths() []string {
func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, error) {
endpoint := apimiddleware.DefaultEndpoint()
switch path {
case "/eth/v1/beacon/pool/attester_slashings":
endpoint.PostRequest = &AttesterSlashingJson{}
endpoint.GetResponse = &AttesterSlashingsPoolResponseJson{}
case "/eth/v1/beacon/pool/proposer_slashings":
endpoint.PostRequest = &ProposerSlashingJson{}
endpoint.GetResponse = &ProposerSlashingsPoolResponseJson{}
case "/eth/v1/beacon/weak_subjectivity":
endpoint.GetResponse = &WeakSubjectivityResponse{}
case "/eth/v1/debug/beacon/heads":
endpoint.GetResponse = &ForkChoiceHeadsResponseJson{}
case "/eth/v2/debug/beacon/heads":
endpoint.GetResponse = &V2ForkChoiceHeadsResponseJson{}
case "/eth/v1/debug/fork_choice":
endpoint.GetResponse = &ForkChoiceDumpJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreSerializeMiddlewareResponseIntoJson: prepareForkChoiceResponse,
}
case "/eth/v1/config/fork_schedule":
endpoint.GetResponse = &ForkScheduleResponseJson{}
case "/eth/v1/config/spec":
endpoint.GetResponse = &SpecResponseJson{}
case "/eth/v1/events":
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleEvents}
case "/eth/v1/validator/blocks/{slot}":
endpoint.GetResponse = &ProduceBlockResponseJson{}
endpoint.RequestURLLiterals = []string{"slot"}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "randao_reveal", Hex: true}, {Name: "graffiti", Hex: true}}
case "/eth/v2/validator/blocks/{slot}":
endpoint.GetResponse = &ProduceBlockResponseV2Json{}
endpoint.RequestURLLiterals = []string{"slot"}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "randao_reveal", Hex: true}, {Name: "graffiti", Hex: true}}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedV2Block,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlockSSZ}
case "/eth/v1/validator/blinded_blocks/{slot}":
endpoint.GetResponse = &ProduceBlindedBlockResponseJson{}
endpoint.RequestURLLiterals = []string{"slot"}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "randao_reveal", Hex: true}, {Name: "graffiti", Hex: true}}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedBlindedBlock,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlindedBlockSSZ}
default:
return nil, errors.New("invalid path")
}

View File

@@ -1,10 +1,7 @@
package apimiddleware
import (
"strings"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
)
//----------------
@@ -26,48 +23,10 @@ type BlockRootResponseJson struct {
Finalized bool `json:"finalized"`
}
type AttesterSlashingsPoolResponseJson struct {
Data []*AttesterSlashingJson `json:"data"`
}
type ProposerSlashingsPoolResponseJson struct {
Data []*ProposerSlashingJson `json:"data"`
}
type ForkChoiceHeadsResponseJson struct {
Data []*ForkChoiceHeadJson `json:"data"`
}
type V2ForkChoiceHeadsResponseJson struct {
Data []*V2ForkChoiceHeadJson `json:"data"`
}
type ForkScheduleResponseJson struct {
Data []*ForkJson `json:"data"`
}
type DepositContractResponseJson struct {
Data *DepositContractJson `json:"data"`
}
type SpecResponseJson struct {
Data interface{} `json:"data"`
}
type ProduceBlockResponseJson struct {
Data *BeaconBlockJson `json:"data"`
}
type ProduceBlockResponseV2Json struct {
Version string `json:"version" enum:"true"`
Data *BeaconBlockContainerV2Json `json:"data"`
}
type ProduceBlindedBlockResponseJson struct {
Version string `json:"version" enum:"true"`
Data *BlindedBeaconBlockContainerJson `json:"data"`
}
type AggregateAttestationResponseJson struct {
Data *AttestationJson `json:"data"`
}
@@ -645,17 +604,6 @@ type PendingAttestationJson struct {
ProposerIndex string `json:"proposer_index"`
}
type ForkChoiceHeadJson struct {
Root string `json:"root" hex:"true"`
Slot string `json:"slot"`
}
type V2ForkChoiceHeadJson struct {
Root string `json:"root" hex:"true"`
Slot string `json:"slot"`
ExecutionOptimistic bool `json:"execution_optimistic"`
}
type DepositContractJson struct {
ChainId string `json:"chain_id"`
Address string `json:"address"`
@@ -691,34 +639,6 @@ type SyncCommitteeContributionJson struct {
Signature string `json:"signature" hex:"true"`
}
type ForkChoiceNodeJson struct {
Slot string `json:"slot"`
BlockRoot string `json:"block_root" hex:"true"`
ParentRoot string `json:"parent_root" hex:"true"`
JustifiedEpoch string `json:"justified_epoch"`
FinalizedEpoch string `json:"finalized_epoch"`
UnrealizedJustifiedEpoch string `json:"unrealized_justified_epoch"`
UnrealizedFinalizedEpoch string `json:"unrealized_finalized_epoch"`
Balance string `json:"balance"`
Weight string `json:"weight"`
ExecutionOptimistic bool `json:"execution_optimistic"`
ExecutionBlockHash string `json:"execution_block_hash" hex:"true"`
TimeStamp string `json:"timestamp"`
Validity string `json:"validity" enum:"true"`
}
type ForkChoiceDumpJson struct {
JustifiedCheckpoint *CheckpointJson `json:"justified_checkpoint"`
FinalizedCheckpoint *CheckpointJson `json:"finalized_checkpoint"`
BestJustifiedCheckpoint *CheckpointJson `json:"best_justified_checkpoint"`
UnrealizedJustifiedCheckpoint *CheckpointJson `json:"unrealized_justified_checkpoint"`
UnrealizedFinalizedCheckpoint *CheckpointJson `json:"unrealized_finalized_checkpoint"`
ProposerBoostRoot string `json:"proposer_boost_root" hex:"true"`
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root" hex:"true"`
HeadRoot string `json:"head_root" hex:"true"`
ForkChoiceNodes []*ForkChoiceNodeJson `json:"fork_choice_nodes"`
}
type HistoricalSummaryJson struct {
BlockSummaryRoot string `json:"block_summary_root" hex:"true"`
StateSummaryRoot string `json:"state_summary_root" hex:"true"`
@@ -740,49 +660,6 @@ type SszResponse interface {
SSZFinalized() bool
}
type SszResponseJson struct {
Data string `json:"data"`
}
func (ssz *SszResponseJson) SSZData() string {
return ssz.Data
}
func (*SszResponseJson) SSZVersion() string {
return strings.ToLower(ethpbv2.Version_PHASE0.String())
}
func (*SszResponseJson) SSZOptimistic() bool {
return false
}
func (*SszResponseJson) SSZFinalized() bool {
return true
}
type VersionedSSZResponseJson struct {
Version string `json:"version" enum:"true"`
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
Data string `json:"data"`
}
func (ssz *VersionedSSZResponseJson) SSZData() string {
return ssz.Data
}
func (ssz *VersionedSSZResponseJson) SSZVersion() string {
return ssz.Version
}
func (ssz *VersionedSSZResponseJson) SSZOptimistic() bool {
return ssz.ExecutionOptimistic
}
func (ssz *VersionedSSZResponseJson) SSZFinalized() bool {
return ssz.Finalized
}
// ---------------
// Events.
// ---------------

View File

@@ -4,13 +4,11 @@ go_library(
name = "go_default_library",
srcs = [
"blocks.go",
"config.go",
"handlers.go",
"handlers_pool.go",
"handlers_state.go",
"handlers_validator.go",
"log.go",
"pool.go",
"server.go",
"structs.go",
],
@@ -52,10 +50,8 @@ go_library(
"//consensus-types/validator:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//network/http:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/migration:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
@@ -67,7 +63,6 @@ go_library(
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
],
)
@@ -76,13 +71,11 @@ go_test(
srcs = [
"blinded_blocks_test.go",
"blocks_test.go",
"config_test.go",
"handlers_pool_test.go",
"handlers_state_test.go",
"handlers_test.go",
"handlers_validators_test.go",
"init_test.go",
"pool_test.go",
"server_test.go",
],
embed = [":go_default_library"],
@@ -121,10 +114,8 @@ go_test(
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//network/forks:go_default_library",
"//network/http:go_default_library",
"//proto/eth/service:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/migration:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
@@ -133,13 +124,11 @@ go_test(
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_stretchr_testify//mock:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
],
)

View File

@@ -1688,22 +1688,6 @@ func (s *Server) GetCommittees(w http.ResponseWriter, r *http.Request) {
http2.WriteJson(w, &GetCommitteesResponse{Data: committees, ExecutionOptimistic: isOptimistic, Finalized: isFinalized})
}
// GetDepositContract retrieves deposit contract address and genesis fork version.
func (*Server) GetDepositContract(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "beacon.GetDepositContract")
defer span.End()
http2.WriteJson(w, &DepositContractResponse{
Data: &struct {
ChainId string `json:"chain_id"`
Address string `json:"address"`
}{
ChainId: strconv.FormatUint(params.BeaconConfig().DepositChainID, 10),
Address: params.BeaconConfig().DepositContractAddress,
},
})
}
// GetBlockHeaders retrieves block headers matching given query. By default it will fetch current head slot blocks.
func (s *Server) GetBlockHeaders(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.GetBlockHeaders")
@@ -1747,6 +1731,11 @@ func (s *Server) GetBlockHeaders(w http.ResponseWriter, r *http.Request) {
}
}
if len(blks) == 0 {
http2.HandleError(w, "No blocks found", http.StatusNotFound)
return
}
isOptimistic := false
isFinalized := true
blkHdrs := make([]*shared.SignedBeaconBlockHeaderContainer, len(blks))

View File

@@ -17,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/config/features"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
@@ -27,6 +28,8 @@ import (
"go.opencensus.io/trace"
)
const broadcastBLSChangesRateLimit = 128
// ListAttestations retrieves attestations known by the node but
// not necessarily incorporated into any block. Allows filtering by committee index or slot.
func (s *Server) ListAttestations(w http.ResponseWriter, r *http.Request) {
@@ -443,3 +446,138 @@ func (s *Server) ListBLSToExecutionChanges(w http.ResponseWriter, r *http.Reques
Data: changes,
})
}
// GetAttesterSlashings retrieves attester slashings known by the node but
// not necessarily incorporated into any block.
func (s *Server) GetAttesterSlashings(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.GetAttesterSlashings")
defer span.End()
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
if err != nil {
http2.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
return
}
sourceSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, headState, true /* return unlimited slashings */)
slashings := shared.AttesterSlashingsFromConsensus(sourceSlashings)
http2.WriteJson(w, &GetAttesterSlashingsResponse{Data: slashings})
}
// SubmitAttesterSlashing submits an attester slashing object to node's pool and
// if passes validation node MUST broadcast it to network.
func (s *Server) SubmitAttesterSlashing(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttesterSlashing")
defer span.End()
var req shared.AttesterSlashing
err := json.NewDecoder(r.Body).Decode(&req)
switch {
case err == io.EOF:
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
case err != nil:
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
slashing, err := req.ToConsensus()
if err != nil {
http2.HandleError(w, "Could not convert request slashing to consensus slashing: "+err.Error(), http.StatusBadRequest)
return
}
headState, err := s.ChainInfoFetcher.HeadState(ctx)
if err != nil {
http2.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
return
}
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, slashing.Attestation_1.Data.Slot)
if err != nil {
http2.HandleError(w, "Could not process slots: "+err.Error(), http.StatusInternalServerError)
return
}
err = blocks.VerifyAttesterSlashing(ctx, headState, slashing)
if err != nil {
http2.HandleError(w, "Invalid attester slashing: "+err.Error(), http.StatusBadRequest)
return
}
err = s.SlashingsPool.InsertAttesterSlashing(ctx, headState, slashing)
if err != nil {
http2.HandleError(w, "Could not insert attester slashing into pool: "+err.Error(), http.StatusInternalServerError)
return
}
if !features.Get().DisableBroadcastSlashings {
if err = s.Broadcaster.Broadcast(ctx, slashing); err != nil {
http2.HandleError(w, "Could not broadcast slashing object: "+err.Error(), http.StatusInternalServerError)
return
}
}
}
// GetProposerSlashings retrieves proposer slashings known by the node
// but not necessarily incorporated into any block.
func (s *Server) GetProposerSlashings(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.GetProposerSlashings")
defer span.End()
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
if err != nil {
http2.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
return
}
sourceSlashings := s.SlashingsPool.PendingProposerSlashings(ctx, headState, true /* return unlimited slashings */)
slashings := shared.ProposerSlashingsFromConsensus(sourceSlashings)
http2.WriteJson(w, &GetProposerSlashingsResponse{Data: slashings})
}
// SubmitProposerSlashing submits a proposer slashing object to node's pool and if
// passes validation node MUST broadcast it to network.
func (s *Server) SubmitProposerSlashing(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitProposerSlashing")
defer span.End()
var req shared.ProposerSlashing
err := json.NewDecoder(r.Body).Decode(&req)
switch {
case err == io.EOF:
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
case err != nil:
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
slashing, err := req.ToConsensus()
if err != nil {
http2.HandleError(w, "Could not convert request slashing to consensus slashing: "+err.Error(), http.StatusBadRequest)
return
}
headState, err := s.ChainInfoFetcher.HeadState(ctx)
if err != nil {
http2.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
return
}
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, slashing.Header_1.Header.Slot)
if err != nil {
http2.HandleError(w, "Could not process slots: "+err.Error(), http.StatusInternalServerError)
return
}
err = blocks.VerifyProposerSlashing(headState, slashing)
if err != nil {
http2.HandleError(w, "Invalid proposer slashing: "+err.Error(), http.StatusBadRequest)
return
}
err = s.SlashingsPool.InsertProposerSlashing(ctx, headState, slashing)
if err != nil {
http2.HandleError(w, "Could not insert proposer slashing into pool: "+err.Error(), http.StatusInternalServerError)
return
}
if !features.Get().DisableBroadcastSlashings {
if err = s.Broadcaster.Broadcast(ctx, slashing); err != nil {
http2.HandleError(w, "Could not broadcast slashing object: "+err.Error(), http.StatusInternalServerError)
return
}
}
}

View File

@@ -2,6 +2,7 @@ package beacon
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
@@ -19,6 +20,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
blstoexecmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec/mock"
slashingsmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings/mock"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits/mock"
p2pMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
@@ -997,6 +999,563 @@ func TestSubmitSignedBLSToExecutionChanges_Failures(t *testing.T) {
}
}
func TestGetAttesterSlashings(t *testing.T) {
bs, err := util.NewBeaconState()
require.NoError(t, err)
slashing1 := &ethpbv1alpha1.AttesterSlashing{
Attestation_1: &ethpbv1alpha1.IndexedAttestation{
AttestingIndices: []uint64{1, 10},
Data: &ethpbv1alpha1.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature1"), 96),
},
Attestation_2: &ethpbv1alpha1.IndexedAttestation{
AttestingIndices: []uint64{2, 20},
Data: &ethpbv1alpha1.AttestationData{
Slot: 2,
CommitteeIndex: 2,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 2,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 20,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature2"), 96),
},
}
slashing2 := &ethpbv1alpha1.AttesterSlashing{
Attestation_1: &ethpbv1alpha1.IndexedAttestation{
AttestingIndices: []uint64{3, 30},
Data: &ethpbv1alpha1.AttestationData{
Slot: 3,
CommitteeIndex: 3,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot3"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 3,
Root: bytesutil.PadTo([]byte("sourceroot3"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 30,
Root: bytesutil.PadTo([]byte("targetroot3"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature3"), 96),
},
Attestation_2: &ethpbv1alpha1.IndexedAttestation{
AttestingIndices: []uint64{4, 40},
Data: &ethpbv1alpha1.AttestationData{
Slot: 4,
CommitteeIndex: 4,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot4"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 4,
Root: bytesutil.PadTo([]byte("sourceroot4"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 40,
Root: bytesutil.PadTo([]byte("targetroot4"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature4"), 96),
},
}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []*ethpbv1alpha1.AttesterSlashing{slashing1, slashing2}},
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/beacon/pool/attester_slashings", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetAttesterSlashings(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &GetAttesterSlashingsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
assert.Equal(t, 2, len(resp.Data))
}
func TestGetProposerSlashings(t *testing.T) {
bs, err := util.NewBeaconState()
require.NoError(t, err)
slashing1 := &ethpbv1alpha1.ProposerSlashing{
Header_1: &ethpbv1alpha1.SignedBeaconBlockHeader{
Header: &ethpbv1alpha1.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
},
Signature: bytesutil.PadTo([]byte("signature1"), 96),
},
Header_2: &ethpbv1alpha1.SignedBeaconBlockHeader{
Header: &ethpbv1alpha1.BeaconBlockHeader{
Slot: 2,
ProposerIndex: 2,
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
},
Signature: bytesutil.PadTo([]byte("signature2"), 96),
},
}
slashing2 := &ethpbv1alpha1.ProposerSlashing{
Header_1: &ethpbv1alpha1.SignedBeaconBlockHeader{
Header: &ethpbv1alpha1.BeaconBlockHeader{
Slot: 3,
ProposerIndex: 3,
ParentRoot: bytesutil.PadTo([]byte("parentroot3"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot3"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot3"), 32),
},
Signature: bytesutil.PadTo([]byte("signature3"), 96),
},
Header_2: &ethpbv1alpha1.SignedBeaconBlockHeader{
Header: &ethpbv1alpha1.BeaconBlockHeader{
Slot: 4,
ProposerIndex: 4,
ParentRoot: bytesutil.PadTo([]byte("parentroot4"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot4"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot4"), 32),
},
Signature: bytesutil.PadTo([]byte("signature4"), 96),
},
}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{PendingPropSlashings: []*ethpbv1alpha1.ProposerSlashing{slashing1, slashing2}},
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/beacon/pool/attester_slashings", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetProposerSlashings(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &GetProposerSlashingsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
require.NotNil(t, resp.Data)
assert.Equal(t, 2, len(resp.Data))
}
func TestSubmitAttesterSlashing_Ok(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
PublicKey: keys[0].PublicKey().Marshal(),
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = []*ethpbv1alpha1.Validator{validator}
return nil
})
require.NoError(t, err)
slashing := &ethpbv1alpha1.AttesterSlashing{
Attestation_1: &ethpbv1alpha1.IndexedAttestation{
AttestingIndices: []uint64{0},
Data: &ethpbv1alpha1.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
},
},
Signature: make([]byte, 96),
},
Attestation_2: &ethpbv1alpha1.IndexedAttestation{
AttestingIndices: []uint64{0},
Data: &ethpbv1alpha1.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: make([]byte, 96),
},
}
for _, att := range []*ethpbv1alpha1.IndexedAttestation{slashing.Attestation_1, slashing.Attestation_2} {
sb, err := signing.ComputeDomainAndSign(bs, att.Data.Target.Epoch, att.Data, params.BeaconConfig().DomainBeaconAttester, keys[0])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
att.Signature = sig.Marshal()
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
toSubmit := shared.AttesterSlashingsFromConsensus([]*ethpbv1alpha1.AttesterSlashing{slashing})
b, err := json.Marshal(toSubmit[0])
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(b)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/attester_slashings", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitAttesterSlashing(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
pendingSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, bs, true)
require.Equal(t, 1, len(pendingSlashings))
assert.DeepEqual(t, slashing, pendingSlashings[0])
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
require.Equal(t, 1, broadcaster.NumMessages())
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.AttesterSlashing)
assert.Equal(t, true, ok)
}
func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = 1
params.OverrideBeaconConfig(config)
bs, keys := util.DeterministicGenesisState(t, 1)
slashing := &ethpbv1alpha1.AttesterSlashing{
Attestation_1: &ethpbv1alpha1.IndexedAttestation{
AttestingIndices: []uint64{0},
Data: &ethpbv1alpha1.AttestationData{
Slot: params.BeaconConfig().SlotsPerEpoch,
CommitteeIndex: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
},
},
Signature: make([]byte, 96),
},
Attestation_2: &ethpbv1alpha1.IndexedAttestation{
AttestingIndices: []uint64{0},
Data: &ethpbv1alpha1.AttestationData{
Slot: params.BeaconConfig().SlotsPerEpoch,
CommitteeIndex: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: make([]byte, 96),
},
}
newBs := bs.Copy()
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
for _, att := range []*ethpbv1alpha1.IndexedAttestation{slashing.Attestation_1, slashing.Attestation_2} {
sb, err := signing.ComputeDomainAndSign(newBs, att.Data.Target.Epoch, att.Data, params.BeaconConfig().DomainBeaconAttester, keys[0])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
att.Signature = sig.Marshal()
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
toSubmit := shared.AttesterSlashingsFromConsensus([]*ethpbv1alpha1.AttesterSlashing{slashing})
b, err := json.Marshal(toSubmit[0])
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(b)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/attester_slashings", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitAttesterSlashing(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
pendingSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, bs, true)
require.Equal(t, 1, len(pendingSlashings))
assert.DeepEqual(t, slashing, pendingSlashings[0])
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
require.Equal(t, 1, broadcaster.NumMessages())
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.AttesterSlashing)
assert.Equal(t, true, ok)
}
func TestSubmitAttesterSlashing_InvalidSlashing(t *testing.T) {
bs, err := util.NewBeaconState()
require.NoError(t, err)
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
var body bytes.Buffer
_, err = body.WriteString(invalidAttesterSlashing)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/attester_slashings", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitAttesterSlashing(writer, request)
require.Equal(t, http.StatusBadRequest, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, "Invalid attester slashing", e.Message)
}
func TestSubmitProposerSlashing_Ok(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
PublicKey: keys[0].PublicKey().Marshal(),
WithdrawableEpoch: primitives.Epoch(1),
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = []*ethpbv1alpha1.Validator{validator}
return nil
})
require.NoError(t, err)
slashing := &ethpbv1alpha1.ProposerSlashing{
Header_1: &ethpbv1alpha1.SignedBeaconBlockHeader{
Header: &ethpbv1alpha1.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 0,
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
},
Signature: make([]byte, 96),
},
Header_2: &ethpbv1alpha1.SignedBeaconBlockHeader{
Header: &ethpbv1alpha1.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 0,
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
},
Signature: make([]byte, 96),
},
}
for _, h := range []*ethpbv1alpha1.SignedBeaconBlockHeader{slashing.Header_1, slashing.Header_2} {
sb, err := signing.ComputeDomainAndSign(
bs,
slots.ToEpoch(h.Header.Slot),
h.Header,
params.BeaconConfig().DomainBeaconProposer,
keys[0],
)
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
h.Signature = sig.Marshal()
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
toSubmit := shared.ProposerSlashingsFromConsensus([]*ethpbv1alpha1.ProposerSlashing{slashing})
b, err := json.Marshal(toSubmit[0])
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(b)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/proposer_slashings", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitProposerSlashing(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
pendingSlashings := s.SlashingsPool.PendingProposerSlashings(ctx, bs, true)
require.Equal(t, 1, len(pendingSlashings))
assert.DeepEqual(t, slashing, pendingSlashings[0])
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
require.Equal(t, 1, broadcaster.NumMessages())
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.ProposerSlashing)
assert.Equal(t, true, ok)
}
func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = 1
params.OverrideBeaconConfig(config)
bs, keys := util.DeterministicGenesisState(t, 1)
slashing := &ethpbv1alpha1.ProposerSlashing{
Header_1: &ethpbv1alpha1.SignedBeaconBlockHeader{
Header: &ethpbv1alpha1.BeaconBlockHeader{
Slot: params.BeaconConfig().SlotsPerEpoch,
ProposerIndex: 0,
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
},
Signature: make([]byte, 96),
},
Header_2: &ethpbv1alpha1.SignedBeaconBlockHeader{
Header: &ethpbv1alpha1.BeaconBlockHeader{
Slot: params.BeaconConfig().SlotsPerEpoch,
ProposerIndex: 0,
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
},
Signature: make([]byte, 96),
},
}
newBs := bs.Copy()
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
for _, h := range []*ethpbv1alpha1.SignedBeaconBlockHeader{slashing.Header_1, slashing.Header_2} {
sb, err := signing.ComputeDomainAndSign(
newBs,
slots.ToEpoch(h.Header.Slot),
h.Header,
params.BeaconConfig().DomainBeaconProposer,
keys[0],
)
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
h.Signature = sig.Marshal()
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
toSubmit := shared.ProposerSlashingsFromConsensus([]*ethpbv1alpha1.ProposerSlashing{slashing})
b, err := json.Marshal(toSubmit[0])
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(b)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/proposer_slashings", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitProposerSlashing(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
pendingSlashings := s.SlashingsPool.PendingProposerSlashings(ctx, bs, true)
require.Equal(t, 1, len(pendingSlashings))
assert.DeepEqual(t, slashing, pendingSlashings[0])
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
require.Equal(t, 1, broadcaster.NumMessages())
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.ProposerSlashing)
assert.Equal(t, true, ok)
}
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
bs, err := util.NewBeaconState()
require.NoError(t, err)
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
var body bytes.Buffer
_, err = body.WriteString(invalidProposerSlashing)
require.NoError(t, err)
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/proposer_slashings", &body)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.SubmitProposerSlashing(writer, request)
require.Equal(t, http.StatusBadRequest, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, "Invalid proposer slashing", e.Message)
}
var (
singleAtt = `[
{
@@ -1142,4 +1701,68 @@ var (
"signature": "foo"
}
]`
// signatures are invalid
invalidAttesterSlashing = `{
"attestation_1": {
"attesting_indices": [
"1"
],
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
"data": {
"slot": "1",
"index": "1",
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"source": {
"epoch": "1",
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
},
"target": {
"epoch": "1",
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
}
}
},
"attestation_2": {
"attesting_indices": [
"1"
],
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
"data": {
"slot": "1",
"index": "1",
"beacon_block_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"source": {
"epoch": "1",
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
},
"target": {
"epoch": "1",
"root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
}
}
}
}`
// signatures are invalid
invalidProposerSlashing = `{
"signed_header_1": {
"message": {
"slot": "1",
"proposer_index": "1",
"parent_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"body_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
},
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
},
"signed_header_2": {
"message": {
"slot": "1",
"proposer_index": "1",
"parent_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"body_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
},
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
}
}`
)

View File

@@ -2670,6 +2670,7 @@ func TestGetBlockHeaders(t *testing.T) {
writer.Body = &bytes.Buffer{}
bs.GetBlockHeaders(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &GetBlockHeadersResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
@@ -2721,6 +2722,7 @@ func TestGetBlockHeaders(t *testing.T) {
writer.Body = &bytes.Buffer{}
bs.GetBlockHeaders(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &GetBlockHeadersResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.ExecutionOptimistic)
@@ -2764,6 +2766,7 @@ func TestGetBlockHeaders(t *testing.T) {
writer.Body = &bytes.Buffer{}
bs.GetBlockHeaders(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &GetBlockHeadersResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.Finalized)
@@ -2777,6 +2780,7 @@ func TestGetBlockHeaders(t *testing.T) {
writer.Body = &bytes.Buffer{}
bs.GetBlockHeaders(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &GetBlockHeadersResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, false, resp.Finalized)
@@ -2789,10 +2793,25 @@ func TestGetBlockHeaders(t *testing.T) {
writer.Body = &bytes.Buffer{}
bs.GetBlockHeaders(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &GetBlockHeadersResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, false, resp.Finalized)
})
t.Run("no blocks found", func(t *testing.T) {
urlWithParams := fmt.Sprintf("%s?parent_root=%s", url, hexutil.Encode(bytes.Repeat([]byte{1}, 32)))
request := httptest.NewRequest(http.MethodGet, urlWithParams, nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
bs.GetBlockHeaders(writer, request)
require.Equal(t, http.StatusNotFound, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusNotFound, e.Code)
assert.StringContains(t, "No blocks found", e.Message)
})
})
}
@@ -3128,23 +3147,3 @@ func TestGetGenesis(t *testing.T) {
assert.StringContains(t, "Chain genesis info is not yet known", e.Message)
})
}
func TestGetDepositContract(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig().Copy()
config.DepositChainID = uint64(10)
config.DepositContractAddress = "0x4242424242424242424242424242424242424242"
params.OverrideBeaconConfig(config)
request := httptest.NewRequest(http.MethodGet, "/eth/v1/beacon/states/{state_id}/finality_checkpoints", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s := &Server{}
s.GetDepositContract(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
response := DepositContractResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &response))
assert.Equal(t, "10", response.Data.ChainId)
assert.Equal(t, "0x4242424242424242424242424242424242424242", response.Data.Address)
}

View File

@@ -1,7 +1,9 @@
package beacon
import (
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"strings"
@@ -49,7 +51,32 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
}
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
rawIds := r.URL.Query()["id"]
var req GetValidatorsRequest
if r.Method == http.MethodPost {
err = json.NewDecoder(r.Body).Decode(&req)
switch {
case err == io.EOF:
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
case err != nil:
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
}
var statuses []string
var rawIds []string
if r.Method == http.MethodGet {
rawIds = r.URL.Query()["id"]
statuses = r.URL.Query()["status"]
} else {
rawIds = req.Ids
statuses = req.Statuses
}
for i, ss := range statuses {
statuses[i] = strings.ToLower(ss)
}
ids, ok := decodeIds(w, st, rawIds, true /* ignore unknown */)
if !ok {
return
@@ -72,11 +99,6 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
epoch := slots.ToEpoch(st.Slot())
allBalances := st.Balances()
statuses := r.URL.Query()["status"]
for i, ss := range statuses {
statuses[i] = strings.ToLower(ss)
}
// Exit early if no matching validators were found or we don't want to further filter validators by status.
if len(readOnlyVals) == 0 || len(statuses) == 0 {
containers := make([]*ValidatorContainer, len(readOnlyVals))
@@ -234,7 +256,21 @@ func (bs *Server) GetValidatorBalances(w http.ResponseWriter, r *http.Request) {
}
isFinalized := bs.FinalizationFetcher.IsFinalized(ctx, blockRoot)
rawIds := r.URL.Query()["id"]
var rawIds []string
if r.Method == http.MethodGet {
rawIds = r.URL.Query()["id"]
} else {
err = json.NewDecoder(r.Body).Decode(&rawIds)
switch {
case err == io.EOF:
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
case err != nil:
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
}
ids, ok := decodeIds(w, st, rawIds, true /* ignore unknown */)
if !ok {
return

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
@@ -26,8 +27,12 @@ import (
)
func TestGetValidators(t *testing.T) {
const exitedValIndex = 3
var st state.BeaconState
st, _ = util.DeterministicGenesisState(t, 8192)
st, _ = util.DeterministicGenesisState(t, 4)
vals := st.Validators()
vals[exitedValIndex].ExitEpoch = 0
require.NoError(t, st.SetValidators(vals))
t.Run("get all", func(t *testing.T) {
chainService := &chainMock.ChainService{}
@@ -49,7 +54,7 @@ func TestGetValidators(t *testing.T) {
assert.Equal(t, http.StatusOK, writer.Code)
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 8192, len(resp.Data))
require.Equal(t, 4, len(resp.Data))
val := resp.Data[0]
assert.Equal(t, "0", val.Index)
assert.Equal(t, "32000000000", val.Balance)
@@ -77,7 +82,7 @@ func TestGetValidators(t *testing.T) {
request := httptest.NewRequest(
http.MethodGet,
"http://example.com/eth/v1/beacon/states/{state_id}/validators?id=15&id=26",
"http://example.com/eth/v1/beacon/states/{state_id}/validators?id=0&id=1",
nil,
)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
@@ -89,8 +94,8 @@ func TestGetValidators(t *testing.T) {
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "15", resp.Data[0].Index)
assert.Equal(t, "26", resp.Data[1].Index)
assert.Equal(t, "0", resp.Data[0].Index)
assert.Equal(t, "1", resp.Data[1].Index)
})
t.Run("get by pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
@@ -103,8 +108,8 @@ func TestGetValidators(t *testing.T) {
FinalizationFetcher: chainService,
}
pubkey1 := st.PubkeyAtIndex(primitives.ValidatorIndex(20))
pubkey2 := st.PubkeyAtIndex(primitives.ValidatorIndex(66))
pubkey1 := st.PubkeyAtIndex(primitives.ValidatorIndex(0))
pubkey2 := st.PubkeyAtIndex(primitives.ValidatorIndex(1))
hexPubkey1 := hexutil.Encode(pubkey1[:])
hexPubkey2 := hexutil.Encode(pubkey2[:])
request := httptest.NewRequest(
@@ -121,8 +126,8 @@ func TestGetValidators(t *testing.T) {
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "20", resp.Data[0].Index)
assert.Equal(t, "66", resp.Data[1].Index)
assert.Equal(t, "0", resp.Data[0].Index)
assert.Equal(t, "1", resp.Data[1].Index)
})
t.Run("get by both index and pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
@@ -135,11 +140,11 @@ func TestGetValidators(t *testing.T) {
FinalizationFetcher: chainService,
}
pubkey := st.PubkeyAtIndex(primitives.ValidatorIndex(20))
pubkey := st.PubkeyAtIndex(primitives.ValidatorIndex(0))
hexPubkey := hexutil.Encode(pubkey[:])
request := httptest.NewRequest(
http.MethodGet,
fmt.Sprintf("http://example.com/eth/v1/beacon/states/{state_id}/validators?id=%s&id=60", hexPubkey),
fmt.Sprintf("http://example.com/eth/v1/beacon/states/{state_id}/validators?id=%s&id=1", hexPubkey),
nil,
)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
@@ -151,8 +156,8 @@ func TestGetValidators(t *testing.T) {
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "20", resp.Data[0].Index)
assert.Equal(t, "60", resp.Data[1].Index)
assert.Equal(t, "0", resp.Data[0].Index)
assert.Equal(t, "1", resp.Data[1].Index)
})
t.Run("state ID required", func(t *testing.T) {
s := Server{
@@ -275,11 +280,139 @@ func TestGetValidators(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.Finalized)
})
t.Run("POST", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
Stater: &testutil.MockStater{
BeaconState: st,
},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
FinalizationFetcher: chainService,
}
var body bytes.Buffer
req := &GetValidatorsRequest{
Ids: []string{"0", strconv.Itoa(exitedValIndex)},
Statuses: []string{"exited"},
}
b, err := json.Marshal(req)
require.NoError(t, err)
_, err = body.Write(b)
require.NoError(t, err)
request := httptest.NewRequest(
http.MethodPost,
"http://example.com/eth/v1/beacon/states/{state_id}/validators",
&body,
)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 1, len(resp.Data))
assert.Equal(t, "3", resp.Data[0].Index)
})
t.Run("POST nil values", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
Stater: &testutil.MockStater{
BeaconState: st,
},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
FinalizationFetcher: chainService,
}
var body bytes.Buffer
req := &GetValidatorsRequest{
Ids: nil,
Statuses: nil,
}
b, err := json.Marshal(req)
require.NoError(t, err)
_, err = body.Write(b)
require.NoError(t, err)
request := httptest.NewRequest(
http.MethodPost,
"http://example.com/eth/v1/beacon/states/{state_id}/validators",
&body,
)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetValidators(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 4, len(resp.Data))
})
t.Run("POST empty", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
Stater: &testutil.MockStater{
BeaconState: st,
},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
FinalizationFetcher: chainService,
}
request := httptest.NewRequest(
http.MethodPost,
"http://example.com/eth/v1/beacon/states/{state_id}/validators",
nil,
)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetValidators(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, "No data submitted", e.Message)
})
t.Run("POST invalid", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
Stater: &testutil.MockStater{
BeaconState: st,
},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
FinalizationFetcher: chainService,
}
body := bytes.Buffer{}
_, err := body.WriteString("foo")
require.NoError(t, err)
request := httptest.NewRequest(
http.MethodPost,
"http://example.com/eth/v1/beacon/states/{state_id}/validators",
&body,
)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetValidators(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, "Could not decode request body", e.Message)
})
}
func TestListValidators_FilterByStatus(t *testing.T) {
func TestGetValidators_FilterByStatus(t *testing.T) {
var st state.BeaconState
st, _ = util.DeterministicGenesisState(t, 8192)
st, _ = util.DeterministicGenesisState(t, 1)
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
validators := []*eth.Validator{
@@ -366,7 +499,7 @@ func TestListValidators_FilterByStatus(t *testing.T) {
assert.Equal(t, http.StatusOK, writer.Code)
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, 8192+2, len(resp.Data))
assert.Equal(t, 3, len(resp.Data))
for _, vc := range resp.Data {
assert.Equal(
t,
@@ -397,7 +530,7 @@ func TestListValidators_FilterByStatus(t *testing.T) {
assert.Equal(t, http.StatusOK, writer.Code)
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, 8192+1, len(resp.Data))
assert.Equal(t, 2, len(resp.Data))
for _, vc := range resp.Data {
require.Equal(
t,
@@ -506,7 +639,7 @@ func TestListValidators_FilterByStatus(t *testing.T) {
func TestGetValidator(t *testing.T) {
var st state.BeaconState
st, _ = util.DeterministicGenesisState(t, 8192)
st, _ = util.DeterministicGenesisState(t, 2)
t.Run("get by index", func(t *testing.T) {
chainService := &chainMock.ChainService{}
@@ -520,7 +653,7 @@ func TestGetValidator(t *testing.T) {
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/validators/{validator_id}", nil)
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": "15"})
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": "0"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
@@ -528,12 +661,12 @@ func TestGetValidator(t *testing.T) {
assert.Equal(t, http.StatusOK, writer.Code)
resp := &GetValidatorResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, "15", resp.Data.Index)
assert.Equal(t, "0", resp.Data.Index)
assert.Equal(t, "32000000000", resp.Data.Balance)
assert.Equal(t, "active_ongoing", resp.Data.Status)
require.NotNil(t, resp.Data.Validator)
assert.Equal(t, "0x872c61b4a7f8510ec809e5b023f5fdda2105d024c470ddbbeca4bc74e8280af0d178d749853e8f6a841083ac1b4db98f", resp.Data.Validator.Pubkey)
assert.Equal(t, "0x00b24fc624e56a5ed42a9639691e27e34b783c7237030367bd17cbef65fa6ccf", resp.Data.Validator.WithdrawalCredentials)
assert.Equal(t, "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", resp.Data.Validator.Pubkey)
assert.Equal(t, "0x00ec7ef7780c9d151597924036262dd28dc60e1228f4da6fecf9d402cb3f3594", resp.Data.Validator.WithdrawalCredentials)
assert.Equal(t, "32000000000", resp.Data.Validator.EffectiveBalance)
assert.Equal(t, false, resp.Data.Validator.Slashed)
assert.Equal(t, "0", resp.Data.Validator.ActivationEligibilityEpoch)
@@ -552,7 +685,7 @@ func TestGetValidator(t *testing.T) {
FinalizationFetcher: chainService,
}
pubKey := st.PubkeyAtIndex(primitives.ValidatorIndex(20))
pubKey := st.PubkeyAtIndex(primitives.ValidatorIndex(0))
hexPubkey := hexutil.Encode(pubKey[:])
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/validators/{validator_id}", nil)
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": hexPubkey})
@@ -563,7 +696,7 @@ func TestGetValidator(t *testing.T) {
assert.Equal(t, http.StatusOK, writer.Code)
resp := &GetValidatorResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, "20", resp.Data.Index)
assert.Equal(t, "0", resp.Data.Index)
})
t.Run("state ID required", func(t *testing.T) {
s := Server{
@@ -657,7 +790,7 @@ func TestGetValidator(t *testing.T) {
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/validators/{validator_id}", nil)
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": "15"})
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": "0"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
@@ -685,7 +818,7 @@ func TestGetValidator(t *testing.T) {
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/validators/{validator_id}", nil)
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": "15"})
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": "0"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
@@ -699,7 +832,7 @@ func TestGetValidator(t *testing.T) {
func TestGetValidatorBalances(t *testing.T) {
var st state.BeaconState
count := uint64(8192)
count := uint64(4)
st, _ = util.DeterministicGenesisState(t, count)
balances := make([]uint64, count)
for i := uint64(0); i < count; i++ {
@@ -727,10 +860,10 @@ func TestGetValidatorBalances(t *testing.T) {
assert.Equal(t, http.StatusOK, writer.Code)
resp := &GetValidatorBalancesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 8192, len(resp.Data))
val := resp.Data[123]
assert.Equal(t, "123", val.Index)
assert.Equal(t, "123", val.Balance)
require.Equal(t, 4, len(resp.Data))
val := resp.Data[3]
assert.Equal(t, "3", val.Index)
assert.Equal(t, "3", val.Balance)
})
t.Run("get by index", func(t *testing.T) {
chainService := &chainMock.ChainService{}
@@ -745,7 +878,7 @@ func TestGetValidatorBalances(t *testing.T) {
request := httptest.NewRequest(
http.MethodGet,
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances?id=15&id=26",
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances?id=0&id=1",
nil,
)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
@@ -757,8 +890,8 @@ func TestGetValidatorBalances(t *testing.T) {
resp := &GetValidatorBalancesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "15", resp.Data[0].Index)
assert.Equal(t, "26", resp.Data[1].Index)
assert.Equal(t, "0", resp.Data[0].Index)
assert.Equal(t, "1", resp.Data[1].Index)
})
t.Run("get by pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
@@ -770,8 +903,8 @@ func TestGetValidatorBalances(t *testing.T) {
OptimisticModeFetcher: chainService,
FinalizationFetcher: chainService,
}
pubkey1 := st.PubkeyAtIndex(primitives.ValidatorIndex(20))
pubkey2 := st.PubkeyAtIndex(primitives.ValidatorIndex(66))
pubkey1 := st.PubkeyAtIndex(primitives.ValidatorIndex(0))
pubkey2 := st.PubkeyAtIndex(primitives.ValidatorIndex(1))
hexPubkey1 := hexutil.Encode(pubkey1[:])
hexPubkey2 := hexutil.Encode(pubkey2[:])
@@ -789,8 +922,8 @@ func TestGetValidatorBalances(t *testing.T) {
resp := &GetValidatorBalancesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "20", resp.Data[0].Index)
assert.Equal(t, "66", resp.Data[1].Index)
assert.Equal(t, "0", resp.Data[0].Index)
assert.Equal(t, "1", resp.Data[1].Index)
})
t.Run("get by both index and pubkey", func(t *testing.T) {
chainService := &chainMock.ChainService{}
@@ -803,11 +936,11 @@ func TestGetValidatorBalances(t *testing.T) {
FinalizationFetcher: chainService,
}
pubkey := st.PubkeyAtIndex(primitives.ValidatorIndex(20))
pubkey := st.PubkeyAtIndex(primitives.ValidatorIndex(0))
hexPubkey := hexutil.Encode(pubkey[:])
request := httptest.NewRequest(
http.MethodGet,
fmt.Sprintf("http://example.com/eth/v1/beacon/states/{state_id}/validators?id=%s&id=60", hexPubkey),
fmt.Sprintf("http://example.com/eth/v1/beacon/states/{state_id}/validators?id=%s&id=1", hexPubkey),
nil,
)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
@@ -819,8 +952,8 @@ func TestGetValidatorBalances(t *testing.T) {
resp := &GetValidatorsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "20", resp.Data[0].Index)
assert.Equal(t, "60", resp.Data[1].Index)
assert.Equal(t, "0", resp.Data[0].Index)
assert.Equal(t, "1", resp.Data[1].Index)
})
t.Run("unknown pubkey is ignored", func(t *testing.T) {
chainService := &chainMock.ChainService{}
@@ -906,7 +1039,7 @@ func TestGetValidatorBalances(t *testing.T) {
request := httptest.NewRequest(
http.MethodGet,
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances?id=15",
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances?id=0",
nil,
)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
@@ -938,7 +1071,7 @@ func TestGetValidatorBalances(t *testing.T) {
request := httptest.NewRequest(
http.MethodGet,
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances?id=15",
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances?id=0",
nil,
)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
@@ -951,4 +1084,96 @@ func TestGetValidatorBalances(t *testing.T) {
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, true, resp.Finalized)
})
t.Run("POST", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
Stater: &testutil.MockStater{
BeaconState: st,
},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
FinalizationFetcher: chainService,
}
pubkey1 := st.PubkeyAtIndex(primitives.ValidatorIndex(0))
pubkey2 := st.PubkeyAtIndex(primitives.ValidatorIndex(1))
hexPubkey1 := hexutil.Encode(pubkey1[:])
hexPubkey2 := hexutil.Encode(pubkey2[:])
var body bytes.Buffer
_, err := body.WriteString(fmt.Sprintf("[\"%s\",\"%s\"]", hexPubkey1, hexPubkey2))
require.NoError(t, err)
request := httptest.NewRequest(
http.MethodPost,
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances",
&body,
)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetValidatorBalances(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &GetValidatorBalancesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 2, len(resp.Data))
assert.Equal(t, "0", resp.Data[0].Index)
assert.Equal(t, "1", resp.Data[1].Index)
})
t.Run("POST empty", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
Stater: &testutil.MockStater{
BeaconState: st,
},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
FinalizationFetcher: chainService,
}
request := httptest.NewRequest(
http.MethodPost,
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances",
nil,
)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetValidatorBalances(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, "No data submitted", e.Message)
})
t.Run("POST invalid", func(t *testing.T) {
chainService := &chainMock.ChainService{}
s := Server{
Stater: &testutil.MockStater{
BeaconState: st,
},
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
FinalizationFetcher: chainService,
}
body := bytes.Buffer{}
_, err := body.WriteString("foo")
require.NoError(t, err)
request := httptest.NewRequest(
http.MethodPost,
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances",
&body,
)
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetValidatorBalances(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, "Could not decode request body", e.Message)
})
}

View File

@@ -1,129 +0,0 @@
package beacon
import (
"context"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/config/features"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
const broadcastBLSChangesRateLimit = 128
// ListPoolAttesterSlashings retrieves attester slashings known by the node but
// not necessarily incorporated into any block.
func (bs *Server) ListPoolAttesterSlashings(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.AttesterSlashingsPoolResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolAttesterSlashings")
defer span.End()
headState, err := bs.ChainInfoFetcher.HeadStateReadOnly(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
sourceSlashings := bs.SlashingsPool.PendingAttesterSlashings(ctx, headState, true /* return unlimited slashings */)
slashings := make([]*ethpbv1.AttesterSlashing, len(sourceSlashings))
for i, s := range sourceSlashings {
slashings[i] = migration.V1Alpha1AttSlashingToV1(s)
}
return &ethpbv1.AttesterSlashingsPoolResponse{
Data: slashings,
}, nil
}
// SubmitAttesterSlashing submits AttesterSlashing object to node's pool and
// if passes validation node MUST broadcast it to network.
func (bs *Server) SubmitAttesterSlashing(ctx context.Context, req *ethpbv1.AttesterSlashing) (*emptypb.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon.SubmitAttesterSlashing")
defer span.End()
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, req.Attestation_1.Data.Slot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
}
alphaSlashing := migration.V1AttSlashingToV1Alpha1(req)
err = blocks.VerifyAttesterSlashing(ctx, headState, alphaSlashing)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "Invalid attester slashing: %v", err)
}
err = bs.SlashingsPool.InsertAttesterSlashing(ctx, headState, alphaSlashing)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not insert attester slashing into pool: %v", err)
}
if !features.Get().DisableBroadcastSlashings {
if err := bs.Broadcaster.Broadcast(ctx, alphaSlashing); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast slashing object: %v", err)
}
}
return &emptypb.Empty{}, nil
}
// ListPoolProposerSlashings retrieves proposer slashings known by the node
// but not necessarily incorporated into any block.
func (bs *Server) ListPoolProposerSlashings(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ProposerSlashingPoolResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolProposerSlashings")
defer span.End()
headState, err := bs.ChainInfoFetcher.HeadStateReadOnly(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
sourceSlashings := bs.SlashingsPool.PendingProposerSlashings(ctx, headState, true /* return unlimited slashings */)
slashings := make([]*ethpbv1.ProposerSlashing, len(sourceSlashings))
for i, s := range sourceSlashings {
slashings[i] = migration.V1Alpha1ProposerSlashingToV1(s)
}
return &ethpbv1.ProposerSlashingPoolResponse{
Data: slashings,
}, nil
}
// SubmitProposerSlashing submits AttesterSlashing object to node's pool and if
// passes validation node MUST broadcast it to network.
func (bs *Server) SubmitProposerSlashing(ctx context.Context, req *ethpbv1.ProposerSlashing) (*emptypb.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon.SubmitProposerSlashing")
defer span.End()
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, req.SignedHeader_1.Message.Slot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
}
alphaSlashing := migration.V1ProposerSlashingToV1Alpha1(req)
err = blocks.VerifyProposerSlashing(headState, alphaSlashing)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "Invalid proposer slashing: %v", err)
}
err = bs.SlashingsPool.InsertProposerSlashing(ctx, headState, alphaSlashing)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not insert proposer slashing into pool: %v", err)
}
if !features.Get().DisableBroadcastSlashings {
if err := bs.Broadcaster.Broadcast(ctx, alphaSlashing); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast slashing object: %v", err)
}
}
return &emptypb.Empty{}, nil
}

View File

@@ -1,558 +0,0 @@
package beacon
import (
"context"
"testing"
blockchainmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
slashingsmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings/mock"
p2pMock "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
ethpbv1alpha1 "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"google.golang.org/protobuf/types/known/emptypb"
)
func TestListPoolAttesterSlashings(t *testing.T) {
bs, err := util.NewBeaconState()
require.NoError(t, err)
slashing1 := &ethpbv1alpha1.AttesterSlashing{
Attestation_1: &ethpbv1alpha1.IndexedAttestation{
AttestingIndices: []uint64{1, 10},
Data: &ethpbv1alpha1.AttestationData{
Slot: 1,
CommitteeIndex: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature1"), 96),
},
Attestation_2: &ethpbv1alpha1.IndexedAttestation{
AttestingIndices: []uint64{2, 20},
Data: &ethpbv1alpha1.AttestationData{
Slot: 2,
CommitteeIndex: 2,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 2,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 20,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature2"), 96),
},
}
slashing2 := &ethpbv1alpha1.AttesterSlashing{
Attestation_1: &ethpbv1alpha1.IndexedAttestation{
AttestingIndices: []uint64{3, 30},
Data: &ethpbv1alpha1.AttestationData{
Slot: 3,
CommitteeIndex: 3,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot3"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 3,
Root: bytesutil.PadTo([]byte("sourceroot3"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 30,
Root: bytesutil.PadTo([]byte("targetroot3"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature3"), 96),
},
Attestation_2: &ethpbv1alpha1.IndexedAttestation{
AttestingIndices: []uint64{4, 40},
Data: &ethpbv1alpha1.AttestationData{
Slot: 4,
CommitteeIndex: 4,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot4"), 32),
Source: &ethpbv1alpha1.Checkpoint{
Epoch: 4,
Root: bytesutil.PadTo([]byte("sourceroot4"), 32),
},
Target: &ethpbv1alpha1.Checkpoint{
Epoch: 40,
Root: bytesutil.PadTo([]byte("targetroot4"), 32),
},
},
Signature: bytesutil.PadTo([]byte("signature4"), 96),
},
}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []*ethpbv1alpha1.AttesterSlashing{slashing1, slashing2}},
}
resp, err := s.ListPoolAttesterSlashings(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
require.Equal(t, 2, len(resp.Data))
assert.DeepEqual(t, migration.V1Alpha1AttSlashingToV1(slashing1), resp.Data[0])
assert.DeepEqual(t, migration.V1Alpha1AttSlashingToV1(slashing2), resp.Data[1])
}
func TestListPoolProposerSlashings(t *testing.T) {
bs, err := util.NewBeaconState()
require.NoError(t, err)
slashing1 := &ethpbv1alpha1.ProposerSlashing{
Header_1: &ethpbv1alpha1.SignedBeaconBlockHeader{
Header: &ethpbv1alpha1.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
},
Signature: bytesutil.PadTo([]byte("signature1"), 96),
},
Header_2: &ethpbv1alpha1.SignedBeaconBlockHeader{
Header: &ethpbv1alpha1.BeaconBlockHeader{
Slot: 2,
ProposerIndex: 2,
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
},
Signature: bytesutil.PadTo([]byte("signature2"), 96),
},
}
slashing2 := &ethpbv1alpha1.ProposerSlashing{
Header_1: &ethpbv1alpha1.SignedBeaconBlockHeader{
Header: &ethpbv1alpha1.BeaconBlockHeader{
Slot: 3,
ProposerIndex: 3,
ParentRoot: bytesutil.PadTo([]byte("parentroot3"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot3"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot3"), 32),
},
Signature: bytesutil.PadTo([]byte("signature3"), 96),
},
Header_2: &ethpbv1alpha1.SignedBeaconBlockHeader{
Header: &ethpbv1alpha1.BeaconBlockHeader{
Slot: 4,
ProposerIndex: 4,
ParentRoot: bytesutil.PadTo([]byte("parentroot4"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot4"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot4"), 32),
},
Signature: bytesutil.PadTo([]byte("signature4"), 96),
},
}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{PendingPropSlashings: []*ethpbv1alpha1.ProposerSlashing{slashing1, slashing2}},
}
resp, err := s.ListPoolProposerSlashings(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
require.Equal(t, 2, len(resp.Data))
assert.DeepEqual(t, migration.V1Alpha1ProposerSlashingToV1(slashing1), resp.Data[0])
assert.DeepEqual(t, migration.V1Alpha1ProposerSlashingToV1(slashing2), resp.Data[1])
}
func TestSubmitAttesterSlashing_Ok(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
PublicKey: keys[0].PublicKey().Marshal(),
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = []*ethpbv1alpha1.Validator{validator}
return nil
})
require.NoError(t, err)
slashing := &ethpbv1.AttesterSlashing{
Attestation_1: &ethpbv1.IndexedAttestation{
AttestingIndices: []uint64{0},
Data: &ethpbv1.AttestationData{
Slot: 1,
Index: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
Source: &ethpbv1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
},
Target: &ethpbv1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
},
},
Signature: make([]byte, 96),
},
Attestation_2: &ethpbv1.IndexedAttestation{
AttestingIndices: []uint64{0},
Data: &ethpbv1.AttestationData{
Slot: 1,
Index: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
Source: &ethpbv1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: make([]byte, 96),
},
}
for _, att := range []*ethpbv1.IndexedAttestation{slashing.Attestation_1, slashing.Attestation_2} {
sb, err := signing.ComputeDomainAndSign(bs, att.Data.Target.Epoch, att.Data, params.BeaconConfig().DomainBeaconAttester, keys[0])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
att.Signature = sig.Marshal()
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
_, err = s.SubmitAttesterSlashing(ctx, slashing)
require.NoError(t, err)
pendingSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, bs, true)
require.Equal(t, 1, len(pendingSlashings))
assert.DeepEqual(t, migration.V1AttSlashingToV1Alpha1(slashing), pendingSlashings[0])
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
require.Equal(t, 1, broadcaster.NumMessages())
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.AttesterSlashing)
assert.Equal(t, true, ok)
}
func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = 1
params.OverrideBeaconConfig(config)
bs, keys := util.DeterministicGenesisState(t, 1)
slashing := &ethpbv1.AttesterSlashing{
Attestation_1: &ethpbv1.IndexedAttestation{
AttestingIndices: []uint64{0},
Data: &ethpbv1.AttestationData{
Slot: params.BeaconConfig().SlotsPerEpoch,
Index: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
Source: &ethpbv1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
},
Target: &ethpbv1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
},
},
Signature: make([]byte, 96),
},
Attestation_2: &ethpbv1.IndexedAttestation{
AttestingIndices: []uint64{0},
Data: &ethpbv1.AttestationData{
Slot: params.BeaconConfig().SlotsPerEpoch,
Index: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
Source: &ethpbv1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: make([]byte, 96),
},
}
newBs := bs.Copy()
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
for _, att := range []*ethpbv1.IndexedAttestation{slashing.Attestation_1, slashing.Attestation_2} {
sb, err := signing.ComputeDomainAndSign(newBs, att.Data.Target.Epoch, att.Data, params.BeaconConfig().DomainBeaconAttester, keys[0])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
att.Signature = sig.Marshal()
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
_, err = s.SubmitAttesterSlashing(ctx, slashing)
require.NoError(t, err)
pendingSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, bs, true)
require.Equal(t, 1, len(pendingSlashings))
assert.DeepEqual(t, migration.V1AttSlashingToV1Alpha1(slashing), pendingSlashings[0])
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
require.Equal(t, 1, broadcaster.NumMessages())
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.AttesterSlashing)
assert.Equal(t, true, ok)
}
func TestSubmitAttesterSlashing_InvalidSlashing(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
bs, err := util.NewBeaconState()
require.NoError(t, err)
attestation := &ethpbv1.IndexedAttestation{
AttestingIndices: []uint64{0},
Data: &ethpbv1.AttestationData{
Slot: 1,
Index: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
Source: &ethpbv1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
},
Target: &ethpbv1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
},
},
Signature: make([]byte, 96),
}
slashing := &ethpbv1.AttesterSlashing{
Attestation_1: attestation,
Attestation_2: attestation,
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
_, err = s.SubmitAttesterSlashing(ctx, slashing)
require.ErrorContains(t, "Invalid attester slashing", err)
assert.Equal(t, false, broadcaster.BroadcastCalled.Load())
}
func TestSubmitProposerSlashing_Ok(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
PublicKey: keys[0].PublicKey().Marshal(),
WithdrawableEpoch: primitives.Epoch(1),
}
bs, err := util.NewBeaconState(func(state *ethpbv1alpha1.BeaconState) error {
state.Validators = []*ethpbv1alpha1.Validator{validator}
return nil
})
require.NoError(t, err)
slashing := &ethpbv1.ProposerSlashing{
SignedHeader_1: &ethpbv1.SignedBeaconBlockHeader{
Message: &ethpbv1.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 0,
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
},
Signature: make([]byte, 96),
},
SignedHeader_2: &ethpbv1.SignedBeaconBlockHeader{
Message: &ethpbv1.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 0,
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
},
Signature: make([]byte, 96),
},
}
for _, h := range []*ethpbv1.SignedBeaconBlockHeader{slashing.SignedHeader_1, slashing.SignedHeader_2} {
sb, err := signing.ComputeDomainAndSign(
bs,
slots.ToEpoch(h.Message.Slot),
h.Message,
params.BeaconConfig().DomainBeaconProposer,
keys[0],
)
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
h.Signature = sig.Marshal()
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
_, err = s.SubmitProposerSlashing(ctx, slashing)
require.NoError(t, err)
pendingSlashings := s.SlashingsPool.PendingProposerSlashings(ctx, bs, true)
require.Equal(t, 1, len(pendingSlashings))
assert.DeepEqual(t, migration.V1ProposerSlashingToV1Alpha1(slashing), pendingSlashings[0])
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
require.Equal(t, 1, broadcaster.NumMessages())
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.ProposerSlashing)
assert.Equal(t, true, ok)
}
func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = 1
params.OverrideBeaconConfig(config)
bs, keys := util.DeterministicGenesisState(t, 1)
slashing := &ethpbv1.ProposerSlashing{
SignedHeader_1: &ethpbv1.SignedBeaconBlockHeader{
Message: &ethpbv1.BeaconBlockHeader{
Slot: params.BeaconConfig().SlotsPerEpoch,
ProposerIndex: 0,
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
},
Signature: make([]byte, 96),
},
SignedHeader_2: &ethpbv1.SignedBeaconBlockHeader{
Message: &ethpbv1.BeaconBlockHeader{
Slot: params.BeaconConfig().SlotsPerEpoch,
ProposerIndex: 0,
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
},
Signature: make([]byte, 96),
},
}
newBs := bs.Copy()
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
for _, h := range []*ethpbv1.SignedBeaconBlockHeader{slashing.SignedHeader_1, slashing.SignedHeader_2} {
sb, err := signing.ComputeDomainAndSign(
newBs,
slots.ToEpoch(h.Message.Slot),
h.Message,
params.BeaconConfig().DomainBeaconProposer,
keys[0],
)
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
h.Signature = sig.Marshal()
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
_, err = s.SubmitProposerSlashing(ctx, slashing)
require.NoError(t, err)
pendingSlashings := s.SlashingsPool.PendingProposerSlashings(ctx, bs, true)
require.Equal(t, 1, len(pendingSlashings))
assert.DeepEqual(t, migration.V1ProposerSlashingToV1Alpha1(slashing), pendingSlashings[0])
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
require.Equal(t, 1, broadcaster.NumMessages())
_, ok := broadcaster.BroadcastMessages[0].(*ethpbv1alpha1.ProposerSlashing)
assert.Equal(t, true, ok)
}
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
bs, err := util.NewBeaconState()
require.NoError(t, err)
header := &ethpbv1.SignedBeaconBlockHeader{
Message: &ethpbv1.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 0,
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
},
Signature: make([]byte, 96),
}
slashing := &ethpbv1.ProposerSlashing{
SignedHeader_1: header,
SignedHeader_2: header,
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
_, err = s.SubmitProposerSlashing(ctx, slashing)
require.ErrorContains(t, "Invalid proposer slashing", err)
assert.Equal(t, false, broadcaster.BroadcastCalled.Load())
}

View File

@@ -20,13 +20,6 @@ type GetCommitteesResponse struct {
Finalized bool `json:"finalized"`
}
type DepositContractResponse struct {
Data *struct {
ChainId string `json:"chain_id"`
Address string `json:"address"`
} `json:"data"`
}
type ListAttestationsResponse struct {
Data []*shared.Attestation `json:"data"`
}
@@ -83,6 +76,11 @@ type GetBlockHeaderResponse struct {
Data *shared.SignedBeaconBlockHeaderContainer `json:"data"`
}
type GetValidatorsRequest struct {
Ids []string `json:"ids"`
Statuses []string `json:"statuses"`
}
type GetValidatorsResponse struct {
ExecutionOptimistic bool `json:"execution_optimistic"`
Finalized bool `json:"finalized"`
@@ -180,3 +178,11 @@ type SyncCommitteeValidators struct {
type BLSToExecutionChangesPoolResponse struct {
Data []*shared.SignedBLSToExecutionChange `json:"data"`
}
type GetAttesterSlashingsResponse struct {
Data []*shared.AttesterSlashing `json:"data"`
}
type GetProposerSlashingsResponse struct {
Data []*shared.ProposerSlashing `json:"data"`
}

View File

@@ -26,7 +26,7 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "beacon.Blobs")
defer span.End()
var sidecars []*eth.BlobSidecar
var sidecars []*eth.DeprecatedBlobSidecar
var root []byte
indices := parseIndices(r.URL)
@@ -142,7 +142,7 @@ loop:
return indices
}
func buildSidecarsResponse(sidecars []*eth.BlobSidecar) *SidecarsResponse {
func buildSidecarsResponse(sidecars []*eth.DeprecatedBlobSidecar) *SidecarsResponse {
resp := &SidecarsResponse{Data: make([]*Sidecar, len(sidecars))}
for i, sc := range sidecars {
resp.Data[i] = &Sidecar{

View File

@@ -34,7 +34,7 @@ func TestBlobs(t *testing.T) {
db := testDB.SetupDB(t)
blockroot := bytesutil.PadTo([]byte("blockroot"), 32)
require.NoError(t, db.SaveBlobSidecar(context.Background(), []*eth.BlobSidecar{
require.NoError(t, db.SaveBlobSidecar(context.Background(), []*eth.DeprecatedBlobSidecar{
{
BlockRoot: blockroot,
Index: 0,
@@ -272,7 +272,7 @@ func TestBlobs(t *testing.T) {
assert.Equal(t, true, strings.Contains(e.Message, "could not parse block ID"))
})
t.Run("ssz", func(t *testing.T) {
require.NoError(t, db.SaveBlobSidecar(context.Background(), []*eth.BlobSidecar{
require.NoError(t, db.SaveBlobSidecar(context.Background(), []*eth.DeprecatedBlobSidecar{
{
BlockRoot: blockroot,
Index: 0,

View File

@@ -0,0 +1,35 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"handlers.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/config",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/rpc/eth/shared:go_default_library",
"//config/params:go_default_library",
"//network/forks:go_default_library",
"//network/http:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["handlers_test.go"],
embed = [":go_default_library"],
deps = [
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/forks:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
],
)

View File

@@ -1,36 +1,48 @@
package beacon
package config
import (
"context"
"fmt"
"net/http"
"reflect"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/network/forks"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
// GetDepositContract retrieves deposit contract address and genesis fork version.
func GetDepositContract(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "config.GetDepositContract")
defer span.End()
http2.WriteJson(w, &GetDepositContractResponse{
Data: &DepositContractData{
ChainId: strconv.FormatUint(params.BeaconConfig().DepositChainID, 10),
Address: params.BeaconConfig().DepositContractAddress,
},
})
}
// GetForkSchedule retrieve all scheduled upcoming forks this node is aware of.
func (_ *Server) GetForkSchedule(ctx context.Context, _ *emptypb.Empty) (*ethpb.ForkScheduleResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.GetForkSchedule")
func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "config.GetForkSchedule")
defer span.End()
schedule := params.BeaconConfig().ForkVersionSchedule
if len(schedule) == 0 {
return &ethpb.ForkScheduleResponse{
Data: make([]*ethpb.Fork, 0),
}, nil
http2.WriteJson(w, &GetForkScheduleResponse{
Data: make([]*shared.Fork, 0),
})
return
}
versions := forks.SortedForkVersions(schedule)
chainForks := make([]*ethpb.Fork, len(schedule))
chainForks := make([]*shared.Fork, len(schedule))
var previous, current []byte
for i, v := range versions {
if i == 0 {
@@ -40,31 +52,32 @@ func (_ *Server) GetForkSchedule(ctx context.Context, _ *emptypb.Empty) (*ethpb.
}
copyV := v
current = copyV[:]
chainForks[i] = &ethpb.Fork{
PreviousVersion: previous,
CurrentVersion: current,
Epoch: schedule[v],
chainForks[i] = &shared.Fork{
PreviousVersion: hexutil.Encode(previous),
CurrentVersion: hexutil.Encode(current),
Epoch: fmt.Sprintf("%d", schedule[v]),
}
}
return &ethpb.ForkScheduleResponse{
http2.WriteJson(w, &GetForkScheduleResponse{
Data: chainForks,
}, nil
})
}
// GetSpec retrieves specification configuration (without Phase 1 params) used on this node. Specification params list
// Values are returned with following format:
// - any value starting with 0x in the spec is returned as a hex string.
// - all other values are returned as number.
func (_ *Server) GetSpec(ctx context.Context, _ *emptypb.Empty) (*ethpb.SpecResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon.GetSpec")
func GetSpec(w http.ResponseWriter, r *http.Request) {
_, span := trace.StartSpan(r.Context(), "config.GetSpec")
defer span.End()
data, err := prepareConfigSpec()
if err != nil {
return nil, status.Errorf(codes.Internal, "Failed to prepare spec data: %v", err)
http2.HandleError(w, "Could not prepare config spec: "+err.Error(), http.StatusInternalServerError)
return
}
return &ethpb.SpecResponse{Data: data}, nil
http2.WriteJson(w, &GetSpecResponse{Data: data})
}
func prepareConfigSpec() (map[string]string, error) {

View File

@@ -1,20 +1,44 @@
package beacon
package config
import (
"context"
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/network/forks"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"google.golang.org/protobuf/types/known/emptypb"
)
func TestGetDepositContract(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig().Copy()
config.DepositChainID = uint64(10)
config.DepositContractAddress = "0x4242424242424242424242424242424242424242"
params.OverrideBeaconConfig(config)
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/deposit_contract", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
GetDepositContract(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
response := GetDepositContractResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &response))
assert.Equal(t, "10", response.Data.ChainId)
assert.Equal(t, "0x4242424242424242424242424242424242424242", response.Data.Address)
}
func TestGetSpec(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig().Copy()
@@ -137,12 +161,19 @@ func TestGetSpec(t *testing.T) {
params.OverrideBeaconConfig(config)
server := &Server{}
resp, err := server.GetSpec(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/spec", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
assert.Equal(t, 112, len(resp.Data))
for k, v := range resp.Data {
GetSpec(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := GetSpecResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
data, ok := resp.Data.(map[string]interface{})
require.Equal(t, true, ok)
assert.Equal(t, 113, len(data))
for k, v := range data {
switch k {
case "CONFIG_NAME":
assert.Equal(t, "ConfigName", v)
@@ -353,7 +384,9 @@ func TestGetSpec(t *testing.T) {
case "TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":
assert.Equal(t, "72", v)
case "TERMINAL_BLOCK_HASH":
assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(v))
s, ok := v.(string)
require.Equal(t, true, ok)
assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(s))
case "TERMINAL_TOTAL_DIFFICULTY":
assert.Equal(t, "73", v)
case "DefaultFeeRecipient":
@@ -382,6 +415,8 @@ func TestGetSpec(t *testing.T) {
assert.Equal(t, "160", v)
case "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":
assert.Equal(t, "8", v)
case "MAX_REQUEST_LIGHT_CLIENT_UPDATES":
assert.Equal(t, "128", v)
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
default:
t.Errorf("Incorrect key: %s", k)
@@ -390,44 +425,55 @@ func TestGetSpec(t *testing.T) {
}
func TestForkSchedule_Ok(t *testing.T) {
genesisForkVersion := []byte("Genesis")
firstForkVersion, firstForkEpoch := []byte("Firs"), primitives.Epoch(100)
secondForkVersion, secondForkEpoch := []byte("Seco"), primitives.Epoch(200)
thirdForkVersion, thirdForkEpoch := []byte("Thir"), primitives.Epoch(300)
t.Run("ok", func(t *testing.T) {
genesisForkVersion := []byte("Genesis")
firstForkVersion, firstForkEpoch := []byte("Firs"), primitives.Epoch(100)
secondForkVersion, secondForkEpoch := []byte("Seco"), primitives.Epoch(200)
thirdForkVersion, thirdForkEpoch := []byte("Thir"), primitives.Epoch(300)
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig().Copy()
config.GenesisForkVersion = genesisForkVersion
// Create fork schedule adding keys in non-sorted order.
schedule := make(map[[4]byte]primitives.Epoch, 3)
schedule[bytesutil.ToBytes4(secondForkVersion)] = secondForkEpoch
schedule[bytesutil.ToBytes4(firstForkVersion)] = firstForkEpoch
schedule[bytesutil.ToBytes4(thirdForkVersion)] = thirdForkEpoch
config.ForkVersionSchedule = schedule
params.OverrideBeaconConfig(config)
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig().Copy()
config.GenesisForkVersion = genesisForkVersion
// Create fork schedule adding keys in non-sorted order.
schedule := make(map[[4]byte]primitives.Epoch, 3)
schedule[bytesutil.ToBytes4(secondForkVersion)] = secondForkEpoch
schedule[bytesutil.ToBytes4(firstForkVersion)] = firstForkEpoch
schedule[bytesutil.ToBytes4(thirdForkVersion)] = thirdForkEpoch
config.ForkVersionSchedule = schedule
params.OverrideBeaconConfig(config)
s := &Server{}
resp, err := s.GetForkSchedule(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
require.Equal(t, 3, len(resp.Data))
fork := resp.Data[0]
assert.DeepEqual(t, genesisForkVersion, fork.PreviousVersion)
assert.DeepEqual(t, string(firstForkVersion), string(fork.CurrentVersion))
assert.Equal(t, firstForkEpoch, fork.Epoch)
fork = resp.Data[1]
assert.DeepEqual(t, firstForkVersion, fork.PreviousVersion)
assert.DeepEqual(t, secondForkVersion, fork.CurrentVersion)
assert.Equal(t, secondForkEpoch, fork.Epoch)
fork = resp.Data[2]
assert.DeepEqual(t, secondForkVersion, fork.PreviousVersion)
assert.DeepEqual(t, thirdForkVersion, fork.CurrentVersion)
assert.Equal(t, thirdForkEpoch, fork.Epoch)
}
func TestForkSchedule_CorrectNumberOfForks(t *testing.T) {
s := &Server{}
resp, err := s.GetForkSchedule(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
os := forks.NewOrderedSchedule(params.BeaconConfig())
assert.Equal(t, os.Len(), len(resp.Data))
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
GetForkSchedule(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &GetForkScheduleResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, 3, len(resp.Data))
fork := resp.Data[0]
assert.DeepEqual(t, hexutil.Encode(genesisForkVersion), fork.PreviousVersion)
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", firstForkEpoch), fork.Epoch)
fork = resp.Data[1]
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.PreviousVersion)
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", secondForkEpoch), fork.Epoch)
fork = resp.Data[2]
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.PreviousVersion)
assert.DeepEqual(t, hexutil.Encode(thirdForkVersion), fork.CurrentVersion)
assert.Equal(t, fmt.Sprintf("%d", thirdForkEpoch), fork.Epoch)
})
t.Run("correct number of forks", func(t *testing.T) {
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
GetForkSchedule(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &GetForkScheduleResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
os := forks.NewOrderedSchedule(params.BeaconConfig())
assert.Equal(t, os.Len(), len(resp.Data))
})
}

View File

@@ -0,0 +1,20 @@
package config
import "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
type GetDepositContractResponse struct {
Data *DepositContractData `json:"data"`
}
type DepositContractData struct {
ChainId string `json:"chain_id"`
Address string `json:"address"`
}
type GetForkScheduleResponse struct {
Data []*shared.Fork `json:"data"`
}
type GetSpecResponse struct {
Data interface{} `json:"data"`
}

View File

@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"debug.go",
"handlers.go",
"server.go",
"structs.go",
@@ -20,26 +19,18 @@ go_library(
"//beacon-chain/state:go_default_library",
"//network/http:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"debug_test.go",
"handlers_test.go",
],
srcs = ["handlers_test.go"],
embed = [":go_default_library"],
deps = [
"//api:go_default_library",
@@ -48,14 +39,12 @@ go_test(
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_golang_protobuf//ptypes/empty",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_gorilla_mux//:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
],
)

View File

@@ -1,41 +0,0 @@
package debug
import (
"context"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/emptypb"
)
// ListForkChoiceHeadsV2 retrieves the leaves of the current fork choice tree.
func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (*ethpbv2.ForkChoiceHeadsResponse, error) {
ctx, span := trace.StartSpan(ctx, "debug.ListForkChoiceHeadsV2")
defer span.End()
headRoots, headSlots := ds.HeadFetcher.ChainHeads()
resp := &ethpbv2.ForkChoiceHeadsResponse{
Data: make([]*ethpbv2.ForkChoiceHead, len(headRoots)),
}
for i := range headRoots {
isOptimistic, err := ds.OptimisticModeFetcher.IsOptimisticForRoot(ctx, headRoots[i])
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if head is optimistic: %v", err)
}
resp.Data[i] = &ethpbv2.ForkChoiceHead{
Root: headRoots[i][:],
Slot: headSlots[i],
ExecutionOptimistic: isOptimistic,
}
}
return resp, nil
}
// GetForkChoice returns a dump fork choice store.
func (ds *Server) GetForkChoice(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceDump, error) {
return ds.ForkchoiceFetcher.ForkChoiceDump(ctx)
}

View File

@@ -1,90 +0,0 @@
package debug
import (
"context"
"testing"
"github.com/golang/protobuf/ptypes/empty"
blockchainmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"google.golang.org/protobuf/types/known/emptypb"
)
func TestListForkChoiceHeadsV2(t *testing.T) {
ctx := context.Background()
expectedSlotsAndRoots := []struct {
Slot primitives.Slot
Root [32]byte
}{{
Slot: 0,
Root: bytesutil.ToBytes32(bytesutil.PadTo([]byte("foo"), 32)),
}, {
Slot: 1,
Root: bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)),
}}
chainService := &blockchainmock.ChainService{}
server := &Server{
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
}
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
require.NoError(t, err)
assert.Equal(t, 2, len(resp.Data))
for _, sr := range expectedSlotsAndRoots {
found := false
for _, h := range resp.Data {
if h.Slot == sr.Slot {
found = true
assert.DeepEqual(t, sr.Root[:], h.Root)
}
assert.Equal(t, false, h.ExecutionOptimistic)
}
assert.Equal(t, true, found, "Expected head not found")
}
t.Run("optimistic head", func(t *testing.T) {
chainService := &blockchainmock.ChainService{
Optimistic: true,
OptimisticRoots: make(map[[32]byte]bool),
}
for _, sr := range expectedSlotsAndRoots {
chainService.OptimisticRoots[sr.Root] = true
}
server := &Server{
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
}
resp, err := server.ListForkChoiceHeadsV2(ctx, &emptypb.Empty{})
require.NoError(t, err)
assert.Equal(t, 2, len(resp.Data))
for _, sr := range expectedSlotsAndRoots {
found := false
for _, h := range resp.Data {
if h.Slot == sr.Slot {
found = true
assert.DeepEqual(t, sr.Root[:], h.Root)
}
assert.Equal(t, true, h.ExecutionOptimistic)
}
assert.Equal(t, true, found, "Expected head not found")
}
})
}
func TestServer_GetForkChoice(t *testing.T) {
store := doublylinkedtree.New()
fRoot := [32]byte{'a'}
fc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: fRoot}
require.NoError(t, store.UpdateFinalizedCheckpoint(fc))
bs := &Server{ForkchoiceFetcher: &blockchainmock.ChainService{ForkChoiceStore: store}}
res, err := bs.GetForkChoice(context.Background(), &empty.Empty{})
require.NoError(t, err)
require.Equal(t, primitives.Epoch(2), res.FinalizedCheckpoint.Epoch, "Did not get wanted finalized epoch")
}

View File

@@ -3,8 +3,10 @@ package debug
import (
"context"
"encoding/json"
"fmt"
"net/http"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v4/api"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
@@ -147,3 +149,74 @@ func (s *Server) getBeaconStateSSZV2(ctx context.Context, w http.ResponseWriter,
w.Header().Set(api.VersionHeader, version.String(st.Version()))
http2.WriteSsz(w, sszState, "beacon_state.ssz")
}
// GetForkChoiceHeadsV2 retrieves the leaves of the current fork choice tree.
func (s *Server) GetForkChoiceHeadsV2(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "debug.GetForkChoiceHeadsV2")
defer span.End()
headRoots, headSlots := s.HeadFetcher.ChainHeads()
resp := &GetForkChoiceHeadsV2Response{
Data: make([]*ForkChoiceHead, len(headRoots)),
}
for i := range headRoots {
isOptimistic, err := s.OptimisticModeFetcher.IsOptimisticForRoot(ctx, headRoots[i])
if err != nil {
http2.HandleError(w, "Could not check if head is optimistic: "+err.Error(), http.StatusInternalServerError)
return
}
resp.Data[i] = &ForkChoiceHead{
Root: hexutil.Encode(headRoots[i][:]),
Slot: fmt.Sprintf("%d", headSlots[i]),
ExecutionOptimistic: isOptimistic,
}
}
http2.WriteJson(w, resp)
}
// GetForkChoice returns a dump fork choice store.
func (s *Server) GetForkChoice(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "debug.GetForkChoice")
defer span.End()
dump, err := s.ForkchoiceFetcher.ForkChoiceDump(ctx)
if err != nil {
http2.HandleError(w, "Could not get forkchoice dump: "+err.Error(), http.StatusInternalServerError)
return
}
nodes := make([]*ForkChoiceNode, len(dump.ForkChoiceNodes))
for i, n := range dump.ForkChoiceNodes {
nodes[i] = &ForkChoiceNode{
Slot: fmt.Sprintf("%d", n.Slot),
BlockRoot: hexutil.Encode(n.BlockRoot),
ParentRoot: hexutil.Encode(n.ParentRoot),
JustifiedEpoch: fmt.Sprintf("%d", n.JustifiedEpoch),
FinalizedEpoch: fmt.Sprintf("%d", n.FinalizedEpoch),
Weight: fmt.Sprintf("%d", n.Weight),
ExecutionBlockHash: hexutil.Encode(n.ExecutionBlockHash),
Validity: n.Validity.String(),
ExtraData: &ForkChoiceNodeExtraData{
UnrealizedJustifiedEpoch: fmt.Sprintf("%d", n.UnrealizedJustifiedEpoch),
UnrealizedFinalizedEpoch: fmt.Sprintf("%d", n.UnrealizedFinalizedEpoch),
Balance: fmt.Sprintf("%d", n.Balance),
ExecutionOptimistic: n.ExecutionOptimistic,
TimeStamp: fmt.Sprintf("%d", n.Timestamp),
},
}
}
resp := &GetForkChoiceDumpResponse{
JustifiedCheckpoint: shared.CheckpointFromConsensus(dump.JustifiedCheckpoint),
FinalizedCheckpoint: shared.CheckpointFromConsensus(dump.FinalizedCheckpoint),
ForkChoiceNodes: nodes,
ExtraData: &ForkChoiceDumpExtraData{
UnrealizedJustifiedCheckpoint: shared.CheckpointFromConsensus(dump.UnrealizedJustifiedCheckpoint),
UnrealizedFinalizedCheckpoint: shared.CheckpointFromConsensus(dump.UnrealizedFinalizedCheckpoint),
ProposerBoostRoot: hexutil.Encode(dump.ProposerBoostRoot),
PreviousProposerBoostRoot: hexutil.Encode(dump.PreviousProposerBoostRoot),
HeadRoot: hexutil.Encode(dump.HeadRoot),
},
}
http2.WriteJson(w, resp)
}

View File

@@ -8,11 +8,15 @@ import (
"net/http/httptest"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
"github.com/prysmaticlabs/prysm/v4/api"
blockchainmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -381,3 +385,98 @@ func TestGetBeaconStateSSZV2(t *testing.T) {
assert.DeepEqual(t, sszExpected, writer.Body.Bytes())
})
}
func TestGetForkChoiceHeadsV2(t *testing.T) {
expectedSlotsAndRoots := []struct {
Slot string
Root string
}{{
Slot: "0",
Root: hexutil.Encode(bytesutil.PadTo([]byte("foo"), 32)),
}, {
Slot: "1",
Root: hexutil.Encode(bytesutil.PadTo([]byte("bar"), 32)),
}}
chainService := &blockchainmock.ChainService{}
s := &Server{
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/beacon/heads", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetForkChoiceHeadsV2(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &GetForkChoiceHeadsV2Response{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, 2, len(resp.Data))
for _, sr := range expectedSlotsAndRoots {
found := false
for _, h := range resp.Data {
if h.Slot == sr.Slot {
found = true
assert.Equal(t, sr.Root, h.Root)
}
assert.Equal(t, false, h.ExecutionOptimistic)
}
assert.Equal(t, true, found, "Expected head not found")
}
t.Run("optimistic head", func(t *testing.T) {
chainService := &blockchainmock.ChainService{
Optimistic: true,
OptimisticRoots: make(map[[32]byte]bool),
}
for _, sr := range expectedSlotsAndRoots {
b, err := hexutil.Decode(sr.Root)
require.NoError(t, err)
chainService.OptimisticRoots[bytesutil.ToBytes32(b)] = true
}
s := &Server{
HeadFetcher: chainService,
OptimisticModeFetcher: chainService,
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/beacon/heads", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetForkChoiceHeadsV2(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &GetForkChoiceHeadsV2Response{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
assert.Equal(t, 2, len(resp.Data))
for _, sr := range expectedSlotsAndRoots {
found := false
for _, h := range resp.Data {
if h.Slot == sr.Slot {
found = true
assert.Equal(t, sr.Root, h.Root)
}
assert.Equal(t, true, h.ExecutionOptimistic)
}
assert.Equal(t, true, found, "Expected head not found")
}
})
}
func TestGetForkChoice(t *testing.T) {
store := doublylinkedtree.New()
fRoot := [32]byte{'a'}
fc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: fRoot}
require.NoError(t, store.UpdateFinalizedCheckpoint(fc))
s := &Server{ForkchoiceFetcher: &blockchainmock.ChainService{ForkChoiceStore: store}}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/debug/fork_choice", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetForkChoice(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &GetForkChoiceDumpResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.Equal(t, "2", resp.FinalizedCheckpoint.Epoch)
}

Some files were not shown because too many files have changed in this diff Show More