mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
63 Commits
hardcode_g
...
testRangeF
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cffe93ddeb | ||
|
|
2b58b462e2 | ||
|
|
292d42e454 | ||
|
|
057af30542 | ||
|
|
534459e269 | ||
|
|
b0fa3017b7 | ||
|
|
15574a80a5 | ||
|
|
b91b9f6fa5 | ||
|
|
d48f89c166 | ||
|
|
289c44a1f2 | ||
|
|
c9fa9389ab | ||
|
|
940782f323 | ||
|
|
8c684c1919 | ||
|
|
8f431c1a79 | ||
|
|
c3d7069d0d | ||
|
|
006e8db8bb | ||
|
|
5e4f25f25b | ||
|
|
52cdd9a538 | ||
|
|
6064e2b5c7 | ||
|
|
1fa7903c17 | ||
|
|
467ad45e64 | ||
|
|
70a4875c68 | ||
|
|
952d2b3b27 | ||
|
|
0857060867 | ||
|
|
4b011087d2 | ||
|
|
c14ee7268d | ||
|
|
5c7c3a6c40 | ||
|
|
b30a9d520f | ||
|
|
9cc6a7951d | ||
|
|
73a1130c98 | ||
|
|
c81aaa70c5 | ||
|
|
0c80ddb1b6 | ||
|
|
2863dcb9ea | ||
|
|
ce4b6b5464 | ||
|
|
c93db748ba | ||
|
|
ccafc1019c | ||
|
|
6bf6671725 | ||
|
|
a5b128b983 | ||
|
|
747de1ac95 | ||
|
|
687e164470 | ||
|
|
68b9d9668b | ||
|
|
c80c6ef5f4 | ||
|
|
a279ce6330 | ||
|
|
e142ece77c | ||
|
|
7f6a5f44f3 | ||
|
|
a280c6b671 | ||
|
|
5371299b32 | ||
|
|
a23bb9f0bd | ||
|
|
bca3806794 | ||
|
|
a14b1f11a4 | ||
|
|
6850904568 | ||
|
|
a3887807bc | ||
|
|
0914df3bbd | ||
|
|
e501279798 | ||
|
|
6d61e1ea2a | ||
|
|
4d45fc0e87 | ||
|
|
23d7f87cef | ||
|
|
a4db00d7ef | ||
|
|
b6a4e1213c | ||
|
|
86088d9c85 | ||
|
|
e10e610589 | ||
|
|
e16a25b6cc | ||
|
|
f129dfdee6 |
1
.bazelrc
1
.bazelrc
@@ -22,6 +22,7 @@ coverage --define=coverage_enabled=1
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --compilation_mode=opt
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -10,7 +10,6 @@
|
||||
in review.
|
||||
4. Note that PRs updating dependencies and new Go versions are not accepted.
|
||||
Please file an issue instead.
|
||||
5. A changelog entry is required for user facing issues.
|
||||
-->
|
||||
|
||||
**What type of PR is this?**
|
||||
@@ -29,9 +28,3 @@
|
||||
Fixes #
|
||||
|
||||
**Other notes for review**
|
||||
|
||||
**Acknowledgements**
|
||||
|
||||
- [ ] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
|
||||
- [ ] I have made an appropriate entry to [CHANGELOG.md](https://github.com/prysmaticlabs/prysm/blob/develop/CHANGELOG.md).
|
||||
- [ ] I have added a description to this PR with sufficient context for reviewers to understand this PR.
|
||||
|
||||
33
.github/workflows/changelog.yml
vendored
33
.github/workflows/changelog.yml
vendored
@@ -1,33 +0,0 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
|
||||
jobs:
|
||||
changed_files:
|
||||
runs-on: ubuntu-latest
|
||||
name: Check CHANGELOG.md
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: changelog modified
|
||||
id: changelog-modified
|
||||
uses: tj-actions/changed-files@v45
|
||||
with:
|
||||
files: CHANGELOG.md
|
||||
|
||||
- name: List all changed files
|
||||
env:
|
||||
ALL_CHANGED_FILES: ${{ steps.changelog-modified.outputs.all_changed_files }}
|
||||
run: |
|
||||
if [[ ${ALL_CHANGED_FILES[*]} =~ (^|[[:space:]])"CHANGELOG.md"($|[[:space:]]) ]];
|
||||
then
|
||||
echo "CHANGELOG.md was modified.";
|
||||
exit 0;
|
||||
else
|
||||
echo "CHANGELOG.md was not modified.";
|
||||
echo "Please see CHANGELOG.md and follow the instructions to add your changes to that file."
|
||||
echo "In some rare scenarios, a changelog entry is not required and this CI check can be ignored."
|
||||
exit 1;
|
||||
fi
|
||||
6
.github/workflows/go.yml
vendored
6
.github/workflows/go.yml
vendored
@@ -48,13 +48,13 @@ jobs:
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.22.3'
|
||||
go-version: '1.22.6'
|
||||
id: go
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.55.2
|
||||
version: v1.60.2
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.22.3'
|
||||
go-version: '1.22.6'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
@@ -6,7 +6,7 @@ run:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
timeout: 10m
|
||||
go: '1.22.3'
|
||||
go: '1.22.6'
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
|
||||
2582
CHANGELOG.md
2582
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@@ -6,8 +6,6 @@ Excited by our work and want to get involved in building out our sharding releas
|
||||
|
||||
You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PR’s after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/CTYGPUJ) drop us a line there if you want to get more involved or have any questions on our implementation!
|
||||
|
||||
Please, do not send pull requests for trivial changes, such as typos, these will be rejected. These types of pull requests incur a cost to reviewers and do not provide much value to the project. If you are unsure, please open an issue first to discuss the change.
|
||||
|
||||
## Contribution Steps
|
||||
|
||||
**1. Set up Prysm following the instructions in README.md.**
|
||||
@@ -122,19 +120,15 @@ $ git push myrepo feature-in-progress-branch
|
||||
|
||||
Navigate to your fork of the repo on GitHub. On the upper left where the current branch is listed, change the branch to your feature-in-progress-branch. Open the files that you have worked on and check to make sure they include your changes.
|
||||
|
||||
**16. Add an entry to CHANGELOG.md.**
|
||||
**16. Create a pull request.**
|
||||
|
||||
If your change is user facing, you must include a CHANGELOG.md entry. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
|
||||
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base master”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls.
|
||||
|
||||
**17. Create a pull request.**
|
||||
|
||||
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base master”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls. Ensure that you have added an entry to CHANGELOG.md if your PR is a user-facing change. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
|
||||
|
||||
**18. Respond to comments by Core Contributors.**
|
||||
**17. Respond to comments by Core Contributors.**
|
||||
|
||||
Core Contributors may ask questions and request that you make edits. If you set notifications at the top of the page to “not watching,” you will still be notified by email whenever someone comments on the page of a pull request you have created. If you are asked to modify your pull request, repeat steps 8 through 15, then leave a comment to notify the Core Contributors that the pull request is ready for further review.
|
||||
|
||||
**19. If the number of commits becomes excessive, you may be asked to squash your commits.**
|
||||
**18. If the number of commits becomes excessive, you may be asked to squash your commits.**
|
||||
|
||||
You can do this with an interactive rebase. Start by running the following command to determine the commit that is the base of your branch...
|
||||
|
||||
@@ -142,7 +136,7 @@ Core Contributors may ask questions and request that you make edits. If you set
|
||||
$ git merge-base feature-in-progress-branch prysm/master
|
||||
```
|
||||
|
||||
**20. The previous command will return a commit-hash that you should use in the following command.**
|
||||
**19. The previous command will return a commit-hash that you should use in the following command.**
|
||||
|
||||
```
|
||||
$ git rebase -i commit-hash
|
||||
@@ -166,30 +160,13 @@ squash hash add a feature
|
||||
|
||||
Save and close the file, then a commit command will appear in the terminal that squashes the smaller commits into one. Check to be sure the commit message accurately reflects your changes and then hit enter to execute it.
|
||||
|
||||
**21. Update your pull request with the following command.**
|
||||
**20. Update your pull request with the following command.**
|
||||
|
||||
```
|
||||
$ git push myrepo feature-in-progress-branch -f
|
||||
```
|
||||
|
||||
**22. Finally, again leave a comment to the Core Contributors on the pull request to let them know that the pull request has been updated.**
|
||||
|
||||
## Maintaining CHANGELOG.md
|
||||
|
||||
This project follows the changelog guidelines from [keepachangelog.com](https://keepachangelog.com/en/1.1.0/).
|
||||
|
||||
All PRs with user facing changes should have an entry in the CHANGELOG.md file and the change should be categorized in the appropriate category within the "Unreleased" section. The categories are:
|
||||
|
||||
- `Added` for new features.
|
||||
- `Changed` for changes in existing functionality.
|
||||
- `Deprecated` for soon-to-be removed features.
|
||||
- `Removed` for now removed features.
|
||||
- `Fixed` for any bug fixes.
|
||||
- `Security` in case of vulnerabilities. Please see the [Security Policy](SECURITY.md) for responsible disclosure before adding a change with this category.
|
||||
|
||||
### Releasing
|
||||
|
||||
When a new release is made, the "Unreleased" section should be moved to a new section with the release version and the current date. Then a new "Unreleased" section is made at the top of the file with the categories listed above.
|
||||
**21. Finally, again leave a comment to the Core Contributors on the pull request to let them know that the pull request has been updated.**
|
||||
|
||||
## Contributor Responsibilities
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -227,7 +227,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-alpha.5"
|
||||
consensus_spec_version = "v1.5.0-alpha.3"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -243,7 +243,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-R9vG5HEL5eGMOAmbkKfJ2jfelNqL5V0xBUPiXOiGM6U=",
|
||||
integrity = "sha256-+byv+GUOQytex5GgtjBGVoNDseJZbiBdAjEtlgCbjEo=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -259,7 +259,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-AEIiEOlf1XuxoRMCsN+kgJMo4LrS05+biTA1p/7Ro00=",
|
||||
integrity = "sha256-JJUy/jT1h3kGQkinTuzL7gMOA1+qgmPgJXVrYuH63Cg=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -275,7 +275,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-LH/Xr20yrJRYnbpjRGupMWTIOWt3cpxZJWXgThwVDsk=",
|
||||
integrity = "sha256-T2VM4Qd0SwgGnTjWxjOX297DqEsovO9Ueij1UEJy48Y=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -290,7 +290,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-mlytz4MPjKh0DwV7FMiAtnRbJw9B6o78/x66/vmnYc8=",
|
||||
integrity = "sha256-OP9BCBcQ7i+93bwj7ktY8pZ5uWsGjgTe4XTp7BDhX+I=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -317,94 +317,6 @@ type BlindedBeaconBlockBodyDeneb struct {
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockContentsElectra struct {
|
||||
SignedBlock *SignedBeaconBlockElectra `json:"signed_block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
}
|
||||
|
||||
type BeaconBlockContentsElectra struct {
|
||||
Block *BeaconBlockElectra `json:"block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockElectra struct {
|
||||
Message *BeaconBlockElectra `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockElectra{}
|
||||
|
||||
func (s *SignedBeaconBlockElectra) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockElectra) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockElectra struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BeaconBlockBodyElectra `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyElectra struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadElectra `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockElectra struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyElectra `json:"body"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockElectra struct {
|
||||
Message *BlindedBeaconBlockElectra `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockElectra{}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockElectra) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockElectra) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyElectra struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockHeaderContainer struct {
|
||||
Header *SignedBeaconBlockHeader `json:"header"`
|
||||
Root string `json:"root"`
|
||||
@@ -533,49 +445,3 @@ type ExecutionPayloadHeaderDeneb struct {
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadElectra struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
DepositRequests []*DepositRequest `json:"deposit_requests"`
|
||||
WithdrawalRequests []*WithdrawalRequest `json:"withdrawal_requests"`
|
||||
ConsolidationRequests []*ConsolidationRequest `json:"consolidation_requests"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderElectra struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
DepositRequestsRoot string `json:"deposit_requests_root"`
|
||||
WithdrawalRequestsRoot string `json:"withdrawal_requests_root"`
|
||||
ConsolidationRequestsRoot string `json:"consolidation_requests_root"`
|
||||
}
|
||||
|
||||
@@ -340,42 +340,6 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *SignedAggregateAttestationAndProofElectra) ToConsensus() (*eth.SignedAggregateAttestationAndProofElectra, error) {
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(s.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.SignedAggregateAttestationAndProofElectra{
|
||||
Message: msg,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *AggregateAttestationAndProofElectra) ToConsensus() (*eth.AggregateAttestationAndProofElectra, error) {
|
||||
aggIndex, err := strconv.ParseUint(a.AggregatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregatorIndex")
|
||||
}
|
||||
agg, err := a.Aggregate.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Aggregate")
|
||||
}
|
||||
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, 96)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SelectionProof")
|
||||
}
|
||||
return ð.AggregateAttestationAndProofElectra{
|
||||
AggregatorIndex: primitives.ValidatorIndex(aggIndex),
|
||||
Aggregate: agg,
|
||||
SelectionProof: proof,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *Attestation) ToConsensus() (*eth.Attestation, error) {
|
||||
aggBits, err := hexutil.Decode(a.AggregationBits)
|
||||
if err != nil {
|
||||
@@ -405,41 +369,6 @@ func AttFromConsensus(a *eth.Attestation) *Attestation {
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
|
||||
aggBits, err := hexutil.Decode(a.AggregationBits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregationBits")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
committeeBits, err := hexutil.Decode(a.CommitteeBits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "CommitteeBits")
|
||||
}
|
||||
|
||||
return ð.AttestationElectra{
|
||||
AggregationBits: aggBits,
|
||||
Data: data,
|
||||
Signature: sig,
|
||||
CommitteeBits: committeeBits,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func AttElectraFromConsensus(a *eth.AttestationElectra) *AttestationElectra {
|
||||
return &AttestationElectra{
|
||||
AggregationBits: hexutil.Encode(a.AggregationBits),
|
||||
Data: AttDataFromConsensus(a.Data),
|
||||
Signature: hexutil.Encode(a.Signature),
|
||||
CommitteeBits: hexutil.Encode(a.CommitteeBits),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AttestationData) ToConsensus() (*eth.AttestationData, error) {
|
||||
slot, err := strconv.ParseUint(a.Slot, 10, 64)
|
||||
if err != nil {
|
||||
@@ -694,18 +623,6 @@ func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
|
||||
return ð.AttesterSlashing{Attestation_1: att1, Attestation_2: att2}, nil
|
||||
}
|
||||
|
||||
func (s *AttesterSlashingElectra) ToConsensus() (*eth.AttesterSlashingElectra, error) {
|
||||
att1, err := s.Attestation1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation1")
|
||||
}
|
||||
att2, err := s.Attestation2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation2")
|
||||
}
|
||||
return ð.AttesterSlashingElectra{Attestation_1: att1, Attestation_2: att2}, nil
|
||||
}
|
||||
|
||||
func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
indices := make([]uint64, len(a.AttestingIndices))
|
||||
var err error
|
||||
@@ -731,31 +648,6 @@ func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *IndexedAttestationElectra) ToConsensus() (*eth.IndexedAttestationElectra, error) {
|
||||
indices := make([]uint64, len(a.AttestingIndices))
|
||||
var err error
|
||||
for i, ix := range a.AttestingIndices {
|
||||
indices[i], err = strconv.ParseUint(ix, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
|
||||
}
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.IndexedAttestationElectra{
|
||||
AttestingIndices: indices,
|
||||
Data: data,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func WithdrawalsFromConsensus(ws []*enginev1.Withdrawal) []*Withdrawal {
|
||||
result := make([]*Withdrawal, len(ws))
|
||||
for i, w := range ws {
|
||||
@@ -773,126 +665,6 @@ func WithdrawalFromConsensus(w *enginev1.Withdrawal) *Withdrawal {
|
||||
}
|
||||
}
|
||||
|
||||
func WithdrawalRequestsFromConsensus(ws []*enginev1.WithdrawalRequest) []*WithdrawalRequest {
|
||||
result := make([]*WithdrawalRequest, len(ws))
|
||||
for i, w := range ws {
|
||||
result[i] = WithdrawalRequestFromConsensus(w)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func WithdrawalRequestFromConsensus(w *enginev1.WithdrawalRequest) *WithdrawalRequest {
|
||||
return &WithdrawalRequest{
|
||||
SourceAddress: hexutil.Encode(w.SourceAddress),
|
||||
ValidatorPubkey: hexutil.Encode(w.ValidatorPubkey),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WithdrawalRequest) ToConsensus() (*enginev1.WithdrawalRequest, error) {
|
||||
src, err := bytesutil.DecodeHexWithLength(w.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(w.ValidatorPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ValidatorPubkey")
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
return &enginev1.WithdrawalRequest{
|
||||
SourceAddress: src,
|
||||
ValidatorPubkey: pubkey,
|
||||
Amount: amount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ConsolidationRequestsFromConsensus(cs []*enginev1.ConsolidationRequest) []*ConsolidationRequest {
|
||||
result := make([]*ConsolidationRequest, len(cs))
|
||||
for i, c := range cs {
|
||||
result[i] = ConsolidationRequestFromConsensus(c)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func ConsolidationRequestFromConsensus(c *enginev1.ConsolidationRequest) *ConsolidationRequest {
|
||||
return &ConsolidationRequest{
|
||||
SourceAddress: hexutil.Encode(c.SourceAddress),
|
||||
SourcePubkey: hexutil.Encode(c.SourcePubkey),
|
||||
TargetPubkey: hexutil.Encode(c.TargetPubkey),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConsolidationRequest) ToConsensus() (*enginev1.ConsolidationRequest, error) {
|
||||
srcAddress, err := bytesutil.DecodeHexWithLength(c.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
srcPubkey, err := bytesutil.DecodeHexWithLength(c.SourcePubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourcePubkey")
|
||||
}
|
||||
targetPubkey, err := bytesutil.DecodeHexWithLength(c.TargetPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "TargetPubkey")
|
||||
}
|
||||
return &enginev1.ConsolidationRequest{
|
||||
SourceAddress: srcAddress,
|
||||
SourcePubkey: srcPubkey,
|
||||
TargetPubkey: targetPubkey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func DepositRequestsFromConsensus(ds []*enginev1.DepositRequest) []*DepositRequest {
|
||||
result := make([]*DepositRequest, len(ds))
|
||||
for i, d := range ds {
|
||||
result[i] = DepositRequestFromConsensus(d)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func DepositRequestFromConsensus(d *enginev1.DepositRequest) *DepositRequest {
|
||||
return &DepositRequest{
|
||||
Pubkey: hexutil.Encode(d.Pubkey),
|
||||
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
Signature: hexutil.Encode(d.Signature),
|
||||
Index: fmt.Sprintf("%d", d.Index),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(d.Pubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Pubkey")
|
||||
}
|
||||
withdrawalCredentials, err := bytesutil.DecodeHexWithLength(d.WithdrawalCredentials, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "WithdrawalCredentials")
|
||||
}
|
||||
amount, err := strconv.ParseUint(d.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(d.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
index, err := strconv.ParseUint(d.Index, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Index")
|
||||
}
|
||||
return &enginev1.DepositRequest{
|
||||
Pubkey: pubkey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
Amount: amount,
|
||||
Signature: sig,
|
||||
Index: index,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlashing, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
@@ -1158,138 +930,6 @@ func AttesterSlashingFromConsensus(src *eth.AttesterSlashing) *AttesterSlashing
|
||||
}
|
||||
}
|
||||
|
||||
func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth.AttesterSlashingElectra, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
attesterSlashings := make([]*eth.AttesterSlashingElectra, len(src))
|
||||
for i, s := range src {
|
||||
if s == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1", i))
|
||||
}
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2", i))
|
||||
}
|
||||
|
||||
a1Sig, err := bytesutil.DecodeHexWithLength(s.Attestation1.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
|
||||
}
|
||||
err = slice.VerifyMaxLength(s.Attestation1.AttestingIndices, 2048)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices", i))
|
||||
}
|
||||
a1AttestingIndices := make([]uint64, len(s.Attestation1.AttestingIndices))
|
||||
for j, ix := range s.Attestation1.AttestingIndices {
|
||||
attestingIndex, err := strconv.ParseUint(ix, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices[%d]", i, j))
|
||||
}
|
||||
a1AttestingIndices[j] = attestingIndex
|
||||
}
|
||||
a1Data, err := s.Attestation1.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Data", i))
|
||||
}
|
||||
a2Sig, err := bytesutil.DecodeHexWithLength(s.Attestation2.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Signature", i))
|
||||
}
|
||||
err = slice.VerifyMaxLength(s.Attestation2.AttestingIndices, 2048)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices", i))
|
||||
}
|
||||
a2AttestingIndices := make([]uint64, len(s.Attestation2.AttestingIndices))
|
||||
for j, ix := range s.Attestation2.AttestingIndices {
|
||||
attestingIndex, err := strconv.ParseUint(ix, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices[%d]", i, j))
|
||||
}
|
||||
a2AttestingIndices[j] = attestingIndex
|
||||
}
|
||||
a2Data, err := s.Attestation2.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Data", i))
|
||||
}
|
||||
attesterSlashings[i] = ð.AttesterSlashingElectra{
|
||||
Attestation_1: ð.IndexedAttestationElectra{
|
||||
AttestingIndices: a1AttestingIndices,
|
||||
Data: a1Data,
|
||||
Signature: a1Sig,
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestationElectra{
|
||||
AttestingIndices: a2AttestingIndices,
|
||||
Data: a2Data,
|
||||
Signature: a2Sig,
|
||||
},
|
||||
}
|
||||
}
|
||||
return attesterSlashings, nil
|
||||
}
|
||||
|
||||
func AttesterSlashingsElectraFromConsensus(src []*eth.AttesterSlashingElectra) []*AttesterSlashingElectra {
|
||||
attesterSlashings := make([]*AttesterSlashingElectra, len(src))
|
||||
for i, s := range src {
|
||||
attesterSlashings[i] = AttesterSlashingElectraFromConsensus(s)
|
||||
}
|
||||
return attesterSlashings
|
||||
}
|
||||
|
||||
func AttesterSlashingElectraFromConsensus(src *eth.AttesterSlashingElectra) *AttesterSlashingElectra {
|
||||
a1AttestingIndices := make([]string, len(src.Attestation_1.AttestingIndices))
|
||||
for j, ix := range src.Attestation_1.AttestingIndices {
|
||||
a1AttestingIndices[j] = fmt.Sprintf("%d", ix)
|
||||
}
|
||||
a2AttestingIndices := make([]string, len(src.Attestation_2.AttestingIndices))
|
||||
for j, ix := range src.Attestation_2.AttestingIndices {
|
||||
a2AttestingIndices[j] = fmt.Sprintf("%d", ix)
|
||||
}
|
||||
return &AttesterSlashingElectra{
|
||||
Attestation1: &IndexedAttestationElectra{
|
||||
AttestingIndices: a1AttestingIndices,
|
||||
Data: &AttestationData{
|
||||
Slot: fmt.Sprintf("%d", src.Attestation_1.Data.Slot),
|
||||
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_1.Data.CommitteeIndex),
|
||||
BeaconBlockRoot: hexutil.Encode(src.Attestation_1.Data.BeaconBlockRoot),
|
||||
Source: &Checkpoint{
|
||||
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Source.Epoch),
|
||||
Root: hexutil.Encode(src.Attestation_1.Data.Source.Root),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Target.Epoch),
|
||||
Root: hexutil.Encode(src.Attestation_1.Data.Target.Root),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode(src.Attestation_1.Signature),
|
||||
},
|
||||
Attestation2: &IndexedAttestationElectra{
|
||||
AttestingIndices: a2AttestingIndices,
|
||||
Data: &AttestationData{
|
||||
Slot: fmt.Sprintf("%d", src.Attestation_2.Data.Slot),
|
||||
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_2.Data.CommitteeIndex),
|
||||
BeaconBlockRoot: hexutil.Encode(src.Attestation_2.Data.BeaconBlockRoot),
|
||||
Source: &Checkpoint{
|
||||
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Source.Epoch),
|
||||
Root: hexutil.Encode(src.Attestation_2.Data.Source.Root),
|
||||
},
|
||||
Target: &Checkpoint{
|
||||
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Target.Epoch),
|
||||
Root: hexutil.Encode(src.Attestation_2.Data.Target.Root),
|
||||
},
|
||||
},
|
||||
Signature: hexutil.Encode(src.Attestation_2.Signature),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func AttsToConsensus(src []*Attestation) ([]*eth.Attestation, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
@@ -1317,33 +957,6 @@ func AttsFromConsensus(src []*eth.Attestation) []*Attestation {
|
||||
return atts
|
||||
}
|
||||
|
||||
func AttsElectraToConsensus(src []*AttestationElectra) ([]*eth.AttestationElectra, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 8)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
atts := make([]*eth.AttestationElectra, len(src))
|
||||
for i, a := range src {
|
||||
atts[i], err = a.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
}
|
||||
return atts, nil
|
||||
}
|
||||
|
||||
func AttsElectraFromConsensus(src []*eth.AttestationElectra) []*AttestationElectra {
|
||||
atts := make([]*AttestationElectra, len(src))
|
||||
for i, a := range src {
|
||||
atts[i] = AttElectraFromConsensus(a)
|
||||
}
|
||||
return atts
|
||||
}
|
||||
|
||||
func DepositsToConsensus(src []*Deposit) ([]*eth.Deposit, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
@@ -1474,37 +1087,3 @@ func DepositSnapshotFromConsensus(ds *eth.DepositSnapshot) *DepositSnapshot {
|
||||
ExecutionBlockHeight: fmt.Sprintf("%d", ds.ExecutionDepth),
|
||||
}
|
||||
}
|
||||
|
||||
func PendingBalanceDepositsFromConsensus(ds []*eth.PendingBalanceDeposit) []*PendingBalanceDeposit {
|
||||
deposits := make([]*PendingBalanceDeposit, len(ds))
|
||||
for i, d := range ds {
|
||||
deposits[i] = &PendingBalanceDeposit{
|
||||
Index: fmt.Sprintf("%d", d.Index),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
}
|
||||
}
|
||||
return deposits
|
||||
}
|
||||
|
||||
func PendingPartialWithdrawalsFromConsensus(ws []*eth.PendingPartialWithdrawal) []*PendingPartialWithdrawal {
|
||||
withdrawals := make([]*PendingPartialWithdrawal, len(ws))
|
||||
for i, w := range ws {
|
||||
withdrawals[i] = &PendingPartialWithdrawal{
|
||||
Index: fmt.Sprintf("%d", w.Index),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
WithdrawableEpoch: fmt.Sprintf("%d", w.WithdrawableEpoch),
|
||||
}
|
||||
}
|
||||
return withdrawals
|
||||
}
|
||||
|
||||
func PendingConsolidationsFromConsensus(cs []*eth.PendingConsolidation) []*PendingConsolidation {
|
||||
consolidations := make([]*PendingConsolidation, len(cs))
|
||||
for i, c := range cs {
|
||||
consolidations[i] = &PendingConsolidation{
|
||||
SourceIndex: fmt.Sprintf("%d", c.SourceIndex),
|
||||
TargetIndex: fmt.Sprintf("%d", c.TargetIndex),
|
||||
}
|
||||
}
|
||||
return consolidations
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -593,185 +593,3 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
|
||||
HistoricalSummaries: hs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateElectra, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
for i, r := range srcBr {
|
||||
br[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcSr := st.StateRoots()
|
||||
sr := make([]string, len(srcSr))
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcVotes := st.Eth1DataVotes()
|
||||
votes := make([]*Eth1Data, len(srcVotes))
|
||||
for i, e := range srcVotes {
|
||||
votes[i] = Eth1DataFromConsensus(e)
|
||||
}
|
||||
srcVals := st.Validators()
|
||||
vals := make([]*Validator, len(srcVals))
|
||||
for i, v := range srcVals {
|
||||
vals[i] = ValidatorFromConsensus(v)
|
||||
}
|
||||
srcBals := st.Balances()
|
||||
bals := make([]string, len(srcBals))
|
||||
for i, b := range srcBals {
|
||||
bals[i] = fmt.Sprintf("%d", b)
|
||||
}
|
||||
srcRm := st.RandaoMixes()
|
||||
rm := make([]string, len(srcRm))
|
||||
for i, m := range srcRm {
|
||||
rm[i] = hexutil.Encode(m)
|
||||
}
|
||||
srcSlashings := st.Slashings()
|
||||
slashings := make([]string, len(srcSlashings))
|
||||
for i, s := range srcSlashings {
|
||||
slashings[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
srcPrevPart, err := st.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevPart := make([]string, len(srcPrevPart))
|
||||
for i, p := range srcPrevPart {
|
||||
prevPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcCurrPart, err := st.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currPart := make([]string, len(srcCurrPart))
|
||||
for i, p := range srcCurrPart {
|
||||
currPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcIs, err := st.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
is := make([]string, len(srcIs))
|
||||
for i, s := range srcIs {
|
||||
is[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
currSc, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSc, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
execData, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcPayload, ok := execData.Proto().(*enginev1.ExecutionPayloadHeaderElectra)
|
||||
if !ok {
|
||||
return nil, errPayloadHeaderNotFound
|
||||
}
|
||||
payload, err := ExecutionPayloadHeaderElectraFromConsensus(srcPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHs, err := st.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hs := make([]*HistoricalSummary, len(srcHs))
|
||||
for i, s := range srcHs {
|
||||
hs[i] = HistoricalSummaryFromConsensus(s)
|
||||
}
|
||||
nwi, err := st.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nwvi, err := st.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
drsi, err := st.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbtc, err := st.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ebtc, err := st.ExitBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eee, err := st.EarliestExitEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cbtc, err := st.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ece, err := st.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ppw, err := st.PendingPartialWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pc, err := st.PendingConsolidations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BeaconStateElectra{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
|
||||
BlockRoots: br,
|
||||
StateRoots: sr,
|
||||
HistoricalRoots: hr,
|
||||
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
|
||||
Eth1DataVotes: votes,
|
||||
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
RandaoMixes: rm,
|
||||
Slashings: slashings,
|
||||
PreviousEpochParticipation: prevPart,
|
||||
CurrentEpochParticipation: currPart,
|
||||
JustificationBits: hexutil.Encode(st.JustificationBits()),
|
||||
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
|
||||
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
|
||||
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
|
||||
InactivityScores: is,
|
||||
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
|
||||
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
|
||||
LatestExecutionPayloadHeader: payload,
|
||||
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
|
||||
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
|
||||
HistoricalSummaries: hs,
|
||||
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
|
||||
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
|
||||
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
|
||||
EarliestExitEpoch: fmt.Sprintf("%d", eee),
|
||||
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
|
||||
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
|
||||
PendingBalanceDeposits: PendingBalanceDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -225,19 +225,3 @@ type IndividualVote struct {
|
||||
InclusionDistance string `json:"inclusion_distance"`
|
||||
InactivityScore string `json:"inactivity_score"`
|
||||
}
|
||||
|
||||
type ChainHead struct {
|
||||
HeadSlot string `json:"head_slot"`
|
||||
HeadEpoch string `json:"head_epoch"`
|
||||
HeadBlockRoot string `json:"head_block_root"`
|
||||
FinalizedSlot string `json:"finalized_slot"`
|
||||
FinalizedEpoch string `json:"finalized_epoch"`
|
||||
FinalizedBlockRoot string `json:"finalized_block_root"`
|
||||
JustifiedSlot string `json:"justified_slot"`
|
||||
JustifiedEpoch string `json:"justified_epoch"`
|
||||
JustifiedBlockRoot string `json:"justified_block_root"`
|
||||
PreviousJustifiedSlot string `json:"previous_justified_slot"`
|
||||
PreviousJustifiedEpoch string `json:"previous_justified_epoch"`
|
||||
PreviousJustifiedBlockRoot string `json:"previous_justified_block_root"`
|
||||
OptimisticStatus bool `json:"optimistic_status"`
|
||||
}
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
package structs
|
||||
|
||||
type LightClientHeader struct {
|
||||
Beacon *BeaconBlockHeader `json:"beacon"`
|
||||
}
|
||||
|
||||
type LightClientBootstrapResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientBootstrap `json:"data"`
|
||||
}
|
||||
|
||||
type LightClientBootstrap struct {
|
||||
Header *LightClientHeader `json:"header"`
|
||||
Header *BeaconBlockHeader `json:"header"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
CurrentSyncCommitteeBranch []string `json:"current_sync_committee_branch"`
|
||||
}
|
||||
|
||||
@@ -118,34 +118,3 @@ type GetValidatorPerformanceResponse struct {
|
||||
MissingValidators [][]byte `json:"missing_validators,omitempty"`
|
||||
InactivityScores []uint64 `json:"inactivity_scores,omitempty"`
|
||||
}
|
||||
|
||||
type GetValidatorParticipationResponse struct {
|
||||
Epoch string `json:"epoch"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Participation *ValidatorParticipation `json:"participation"`
|
||||
}
|
||||
|
||||
type ValidatorParticipation struct {
|
||||
GlobalParticipationRate string `json:"global_participation_rate" deprecated:"true"`
|
||||
VotedEther string `json:"voted_ether" deprecated:"true"`
|
||||
EligibleEther string `json:"eligible_ether" deprecated:"true"`
|
||||
CurrentEpochActiveGwei string `json:"current_epoch_active_gwei"`
|
||||
CurrentEpochAttestingGwei string `json:"current_epoch_attesting_gwei"`
|
||||
CurrentEpochTargetAttestingGwei string `json:"current_epoch_target_attesting_gwei"`
|
||||
PreviousEpochActiveGwei string `json:"previous_epoch_active_gwei"`
|
||||
PreviousEpochAttestingGwei string `json:"previous_epoch_attesting_gwei"`
|
||||
PreviousEpochTargetAttestingGwei string `json:"previous_epoch_target_attesting_gwei"`
|
||||
PreviousEpochHeadAttestingGwei string `json:"previous_epoch_head_attesting_gwei"`
|
||||
}
|
||||
|
||||
type ActiveSetChanges struct {
|
||||
Epoch string `json:"epoch"`
|
||||
ActivatedPublicKeys []string `json:"activated_public_keys"`
|
||||
ActivatedIndices []string `json:"activated_indices"`
|
||||
ExitedPublicKeys []string `json:"exited_public_keys"`
|
||||
ExitedIndices []string `json:"exited_indices"`
|
||||
SlashedPublicKeys []string `json:"slashed_public_keys"`
|
||||
SlashedIndices []string `json:"slashed_indices"`
|
||||
EjectedPublicKeys []string `json:"ejected_public_keys"`
|
||||
EjectedIndices []string `json:"ejected_indices"`
|
||||
}
|
||||
|
||||
@@ -29,13 +29,6 @@ type Attestation struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type AttestationElectra struct {
|
||||
AggregationBits string `json:"aggregation_bits"`
|
||||
Data *AttestationData `json:"data"`
|
||||
Signature string `json:"signature"`
|
||||
CommitteeBits string `json:"committee_bits"`
|
||||
}
|
||||
|
||||
type AttestationData struct {
|
||||
Slot string `json:"slot"`
|
||||
CommitteeIndex string `json:"index"`
|
||||
@@ -85,17 +78,6 @@ type AggregateAttestationAndProof struct {
|
||||
SelectionProof string `json:"selection_proof"`
|
||||
}
|
||||
|
||||
type SignedAggregateAttestationAndProofElectra struct {
|
||||
Message *AggregateAttestationAndProofElectra `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type AggregateAttestationAndProofElectra struct {
|
||||
AggregatorIndex string `json:"aggregator_index"`
|
||||
Aggregate *AttestationElectra `json:"aggregate"`
|
||||
SelectionProof string `json:"selection_proof"`
|
||||
}
|
||||
|
||||
type SyncCommitteeSubscription struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
SyncCommitteeIndices []string `json:"sync_committee_indices"`
|
||||
@@ -196,11 +178,6 @@ type AttesterSlashing struct {
|
||||
Attestation2 *IndexedAttestation `json:"attestation_2"`
|
||||
}
|
||||
|
||||
type AttesterSlashingElectra struct {
|
||||
Attestation1 *IndexedAttestationElectra `json:"attestation_1"`
|
||||
Attestation2 *IndexedAttestationElectra `json:"attestation_2"`
|
||||
}
|
||||
|
||||
type Deposit struct {
|
||||
Proof []string `json:"proof"`
|
||||
Data *DepositData `json:"data"`
|
||||
@@ -219,12 +196,6 @@ type IndexedAttestation struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type IndexedAttestationElectra struct {
|
||||
AttestingIndices []string `json:"attesting_indices"`
|
||||
Data *AttestationData `json:"data"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type SyncAggregate struct {
|
||||
SyncCommitteeBits string `json:"sync_committee_bits"`
|
||||
SyncCommitteeSignature string `json:"sync_committee_signature"`
|
||||
@@ -236,39 +207,3 @@ type Withdrawal struct {
|
||||
ExecutionAddress string `json:"address"`
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type DepositRequest struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials"`
|
||||
Amount string `json:"amount"`
|
||||
Signature string `json:"signature"`
|
||||
Index string `json:"index"`
|
||||
}
|
||||
|
||||
type WithdrawalRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
ValidatorPubkey string `json:"validator_pubkey"`
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type ConsolidationRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
SourcePubkey string `json:"source_pubkey"`
|
||||
TargetPubkey string `json:"target_pubkey"`
|
||||
}
|
||||
|
||||
type PendingBalanceDeposit struct {
|
||||
Index string `json:"index"`
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type PendingPartialWithdrawal struct {
|
||||
Index string `json:"index"`
|
||||
Amount string `json:"amount"`
|
||||
WithdrawableEpoch string `json:"withdrawable_epoch"`
|
||||
}
|
||||
|
||||
type PendingConsolidation struct {
|
||||
SourceIndex string `json:"source_index"`
|
||||
TargetIndex string `json:"target_index"`
|
||||
}
|
||||
|
||||
@@ -140,43 +140,3 @@ type BeaconStateDeneb struct {
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
|
||||
}
|
||||
|
||||
type BeaconStateElectra struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *Fork `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots"`
|
||||
StateRoots []string `json:"state_roots"`
|
||||
HistoricalRoots []string `json:"historical_roots"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*Validator `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation []string `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits"`
|
||||
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"latest_execution_payload_header"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
|
||||
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
|
||||
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
|
||||
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
|
||||
EarliestExitEpoch string `json:"earliest_exit_epoch"`
|
||||
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
|
||||
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
|
||||
PendingBalanceDeposits []*PendingBalanceDeposit `json:"pending_balance_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
"init_sync_process_block.go",
|
||||
"lightclient.go",
|
||||
"log.go",
|
||||
"merge_ascii_art.go",
|
||||
"metrics.go",
|
||||
@@ -25,6 +26,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -47,7 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -84,6 +86,7 @@ go_library(
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
@@ -116,6 +119,7 @@ go_test(
|
||||
"head_test.go",
|
||||
"init_sync_process_block_test.go",
|
||||
"init_test.go",
|
||||
"lightclient_test.go",
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_test.go",
|
||||
@@ -157,6 +161,7 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
@@ -172,6 +177,7 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -22,6 +22,13 @@ func (s *Service) GetProposerHead() [32]byte {
|
||||
return s.cfg.ForkChoiceStore.GetProposerHead()
|
||||
}
|
||||
|
||||
// ShouldOverrideFCU returns the corresponding value from forkchoice
|
||||
func (s *Service) ShouldOverrideFCU() bool {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ShouldOverrideFCU()
|
||||
}
|
||||
|
||||
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
|
||||
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
@@ -92,10 +99,3 @@ func (s *Service) FinalizedBlockHash() [32]byte {
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||
}
|
||||
|
||||
// ParentRoot wraps a call to the corresponding method in forkchoice
|
||||
func (s *Service) ParentRoot(root [32]byte) ([32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ParentRoot(root)
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ var (
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kzg.go",
|
||||
"trusted_setup.go",
|
||||
"validation.go",
|
||||
],
|
||||
@@ -12,6 +13,9 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
109
beacon-chain/blockchain/kzg/kzg.go
Normal file
109
beacon-chain/blockchain/kzg/kzg.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// BytesPerBlob is the number of bytes in a single blob.
|
||||
const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
|
||||
// Blob represents a serialized chunk of data.
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
|
||||
// Commitment represent a KZG commitment to a Blob.
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
|
||||
// Bytes32 is a 32-byte array.
|
||||
type Bytes32 = ckzg4844.Bytes32
|
||||
|
||||
// CellsAndProofs represents the Cells and Proofs corresponding to
|
||||
// a single blob.
|
||||
type CellsAndProofs struct {
|
||||
Cells []Cell
|
||||
Proofs []Proof
|
||||
}
|
||||
|
||||
func BlobToKZGCommitment(blob *Blob) (Commitment, error) {
|
||||
comm, err := kzg4844.BlobToCommitment(kzg4844.Blob(*blob))
|
||||
if err != nil {
|
||||
return Commitment{}, err
|
||||
}
|
||||
return Commitment(comm), nil
|
||||
}
|
||||
|
||||
func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) {
|
||||
proof, err := kzg4844.ComputeBlobProof(kzg4844.Blob(*blob), kzg4844.Commitment(commitment))
|
||||
if err != nil {
|
||||
return [48]byte{}, err
|
||||
}
|
||||
return Proof(proof), nil
|
||||
}
|
||||
|
||||
func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) {
|
||||
ckzgBlob := (*ckzg4844.Blob)(blob)
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(ckzgBlob)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
for i := range partialCells {
|
||||
ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i])
|
||||
}
|
||||
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
// Convert cells/proofs to the CellsAndProofs type defined in this package.
|
||||
func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) {
|
||||
if len(ckzgCells) != len(ckzgProofs) {
|
||||
return CellsAndProofs{}, errors.New("different number of cells/proofs")
|
||||
}
|
||||
|
||||
var cells []Cell
|
||||
var proofs []Proof
|
||||
for i := range ckzgCells {
|
||||
cells = append(cells, Cell(ckzgCells[i]))
|
||||
proofs = append(proofs, Proof(ckzgProofs[i]))
|
||||
}
|
||||
|
||||
return CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
}, nil
|
||||
}
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
CKZG "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -12,17 +14,53 @@ var (
|
||||
//go:embed trusted_setup.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
kzgLoaded bool
|
||||
)
|
||||
|
||||
type TrustedSetup struct {
|
||||
G1Monomial [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_monomial"`
|
||||
G1Lagrange [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_lagrange"`
|
||||
G2Monomial [65]GoKZG.G2CompressedHexStr `json:"g2_monomial"`
|
||||
}
|
||||
|
||||
func Start() error {
|
||||
parsedSetup := GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
|
||||
trustedSetup := &TrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, trustedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse trusted setup JSON")
|
||||
}
|
||||
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
|
||||
kzgContext, err = GoKZG.NewContext4096(&GoKZG.JSONTrustedSetup{
|
||||
SetupG2: trustedSetup.G2Monomial[:],
|
||||
SetupG1Lagrange: trustedSetup.G1Lagrange})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize go-kzg context")
|
||||
}
|
||||
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1MonomialBytes := make([]byte, len(trustedSetup.G1Monomial)*(len(trustedSetup.G1Monomial[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Monomial {
|
||||
copy(g1MonomialBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1LagrangeBytes := make([]byte, len(trustedSetup.G1Lagrange)*(len(trustedSetup.G1Lagrange[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Lagrange {
|
||||
copy(g1LagrangeBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G2 point, converted from hex to binary.
|
||||
g2MonomialBytes := make([]byte, len(trustedSetup.G2Monomial)*(len(trustedSetup.G2Monomial[0])-2)/2)
|
||||
for i, g2 := range &trustedSetup.G2Monomial {
|
||||
copy(g2MonomialBytes[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
|
||||
}
|
||||
if !kzgLoaded {
|
||||
// TODO: Provide a configuration option for this.
|
||||
var precompute uint = 0
|
||||
|
||||
// Free the current trusted setup before running this method. CKZG
|
||||
// panics if the same setup is run multiple times.
|
||||
if err = CKZG.LoadTrustedSetup(g1MonomialBytes, g1LagrangeBytes, g2MonomialBytes, precompute); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
kzgLoaded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,20 +1,18 @@
|
||||
package light_client
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
)
|
||||
|
||||
const (
|
||||
160
beacon-chain/blockchain/lightclient_test.go
Normal file
160
beacon-chain/blockchain/lightclient_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
type testlc struct {
|
||||
t *testing.T
|
||||
ctx context.Context
|
||||
state state.BeaconState
|
||||
block interfaces.ReadOnlySignedBeaconBlock
|
||||
attestedState state.BeaconState
|
||||
attestedHeader *ethpb.BeaconBlockHeader
|
||||
}
|
||||
|
||||
func newTestLc(t *testing.T) *testlc {
|
||||
return &testlc{t: t}
|
||||
}
|
||||
|
||||
func (l *testlc) setupTest() *testlc {
|
||||
ctx := context.Background()
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().AltairForkEpoch * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
|
||||
|
||||
attestedState, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = attestedState.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parent := util.NewBeaconBlockCapella()
|
||||
parent.Block.Slot = slot
|
||||
|
||||
signedParent, err := blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentHeader, err := signedParent.Header()
|
||||
require.NoError(l.t, err)
|
||||
attestedHeader := parentHeader.Header
|
||||
|
||||
err = attestedState.SetLatestBlockHeader(attestedHeader)
|
||||
require.NoError(l.t, err)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
parent.Block.StateRoot = attestedStateRoot[:]
|
||||
signedParent, err = blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
state, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = state.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentRoot, err := signedParent.Block().HashTreeRoot()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
block := util.NewBeaconBlockCapella()
|
||||
block.Block.Slot = slot
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
for i := uint64(0); i < params.BeaconConfig().MinSyncCommitteeParticipants; i++ {
|
||||
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
|
||||
}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
h, err := signedBlock.Header()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
err = state.SetLatestBlockHeader(h.Header)
|
||||
require.NoError(l.t, err)
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
signedBlock, err = blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
l.state = state
|
||||
l.attestedState = attestedState
|
||||
l.attestedHeader = attestedHeader
|
||||
l.block = signedBlock
|
||||
l.ctx = ctx
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *testlc) checkAttestedHeader(update *ethpbv2.LightClientUpdate) {
|
||||
require.Equal(l.t, l.attestedHeader.Slot, update.AttestedHeader.Slot, "Attested header slot is not equal")
|
||||
require.Equal(l.t, l.attestedHeader.ProposerIndex, update.AttestedHeader.ProposerIndex, "Attested header proposer index is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.ParentRoot, update.AttestedHeader.ParentRoot, "Attested header parent root is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.BodyRoot, update.AttestedHeader.BodyRoot, "Attested header body root is not equal")
|
||||
|
||||
attestedStateRoot, err := l.attestedState.HashTreeRoot(l.ctx)
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, attestedStateRoot[:], update.AttestedHeader.StateRoot, "Attested header state root is not equal")
|
||||
}
|
||||
|
||||
func (l *testlc) checkSyncAggregate(update *ethpbv2.LightClientUpdate) {
|
||||
syncAggregate, err := l.block.Block().Body().SyncAggregate()
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeBits, update.SyncAggregate.SyncCommitteeBits, "SyncAggregate bits is not equal")
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeSignature, update.SyncAggregate.SyncCommitteeSignature, "SyncAggregate signature is not equal")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientOptimisticUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
require.Equal(t, (*v1.BeaconBlockHeader)(nil), update.FinalizedHeader, "Finalized header is not nil")
|
||||
require.DeepSSZEqual(t, ([][]byte)(nil), update.FinalityBranch, "Finality branch is not nil")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientFinalityUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
require.NotNil(t, update.FinalizedHeader, "Finalized header is nil")
|
||||
require.Equal(t, primitives.Slot(0), update.FinalizedHeader.Slot, "Finalized header slot is not zero")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), update.FinalizedHeader.ProposerIndex, "Finalized header proposer index is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.ParentRoot, "Finalized header parent root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.StateRoot, "Finalized header state root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.BodyRoot, "Finalized header body root is not zero")
|
||||
require.Equal(t, FinalityBranchNumOfLeaves, len(update.FinalityBranch), "Invalid finality branch leaves")
|
||||
for _, leaf := range update.FinalityBranch {
|
||||
require.DeepSSZEqual(t, zeroHash, leaf, "Leaf is not zero")
|
||||
}
|
||||
}
|
||||
@@ -118,9 +118,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
func WithP2PBroadcaster(p p2p.Acceser) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
s.cfg.P2P = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,10 +6,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
@@ -29,8 +33,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// A custom slot deadline for processing state slots in our cache.
|
||||
@@ -142,7 +144,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
b := blks[0].Block()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b.ParentRoot()); err != nil {
|
||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.ParentRoot())
|
||||
@@ -499,7 +501,7 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
}
|
||||
indices, err := bs.Indices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "indices")
|
||||
}
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
for i := range expected {
|
||||
@@ -513,12 +515,39 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
indices, err := bs.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for col := range expected {
|
||||
if !indices[col] {
|
||||
missing[col] = true
|
||||
}
|
||||
}
|
||||
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if coreTime.PeerDASIsActive(signed.Block().Slot()) {
|
||||
return s.isDataColumnsAvailable(ctx, root, signed)
|
||||
}
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -548,7 +577,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
@@ -567,8 +596,13 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
@@ -590,12 +624,130 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
func (s *Service) isDataColumnsAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
colMap, err := peerdas.CustodyColumns(s.cfg.P2P.NodeID(), peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// Expected is the number of custody data columnns a node is expected to have.
|
||||
expected := len(colMap)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe to newsly data columns stored in the database.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
subscription := s.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get the count of data columns we already have in the store.
|
||||
retrievedDataColumns, err := s.blobStorage.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "column indices")
|
||||
}
|
||||
|
||||
retrievedDataColumnsCount := uint64(len(retrievedDataColumns))
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missing, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": len(missing),
|
||||
}).Error("Still waiting for data columns DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case rootIndex := <-rootIndexChan:
|
||||
if rootIndex.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missing[rootIndex.Index]; ok {
|
||||
retrievedDataColumnsCount++
|
||||
}
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missing, rootIndex.Index)
|
||||
|
||||
// Exit if there is no more missing data columns.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
missingIndexes := make([]uint64, 0, len(missing))
|
||||
for val := range missing {
|
||||
copiedVal := val
|
||||
missingIndexes = append(missingIndexes, copiedVal)
|
||||
}
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndexes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -678,7 +830,7 @@ func (s *Service) waitForSync() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
@@ -16,7 +14,6 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -178,7 +175,7 @@ func (s *Service) sendLightClientFinalityUpdate(ctx context.Context, signed inte
|
||||
}
|
||||
}
|
||||
|
||||
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(
|
||||
update, err := NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState,
|
||||
signed,
|
||||
@@ -193,7 +190,7 @@ func (s *Service) sendLightClientFinalityUpdate(ctx context.Context, signed inte
|
||||
// Return the result
|
||||
result := ðpbv2.LightClientFinalityUpdateWithVersion{
|
||||
Version: ethpbv2.Version(signed.Version()),
|
||||
Data: lightclient.CreateLightClientFinalityUpdate(update),
|
||||
Data: CreateLightClientFinalityUpdate(update),
|
||||
}
|
||||
|
||||
// Send event
|
||||
@@ -213,7 +210,7 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
|
||||
return 0, errors.Wrap(err, "could not get attested state")
|
||||
}
|
||||
|
||||
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(
|
||||
update, err := NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState,
|
||||
signed,
|
||||
@@ -227,7 +224,7 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
|
||||
// Return the result
|
||||
result := ðpbv2.LightClientOptimisticUpdateWithVersion{
|
||||
Version: ethpbv2.Version(signed.Version()),
|
||||
Data: lightclient.CreateLightClientOptimisticUpdate(update),
|
||||
Data: CreateLightClientOptimisticUpdate(update),
|
||||
}
|
||||
|
||||
return s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
@@ -288,7 +285,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
|
||||
defer span.End()
|
||||
|
||||
// Verify incoming block has a valid pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b.ParentRoot()); err != nil {
|
||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -314,10 +311,11 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
|
||||
}
|
||||
|
||||
// verifyBlkPreState validates input block has a valid pre-state.
|
||||
func (s *Service) verifyBlkPreState(ctx context.Context, parentRoot [field_params.RootLength]byte) error {
|
||||
func (s *Service) verifyBlkPreState(ctx context.Context, b interfaces.ReadOnlyBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.verifyBlkPreState")
|
||||
defer span.End()
|
||||
|
||||
parentRoot := b.ParentRoot()
|
||||
// Loosen the check to HasBlock because state summary gets saved in batches
|
||||
// during initial syncing. There's no risk given a state summary object is just a
|
||||
// subset of the block object.
|
||||
|
||||
@@ -117,7 +117,7 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: 1, Root: root[:]}))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, root, st))
|
||||
require.NoError(t, service.verifyBlkPreState(ctx, wsb.Block().ParentRoot()))
|
||||
require.NoError(t, service.verifyBlkPreState(ctx, wsb.Block()))
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
@@ -2044,11 +2044,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
defaultConfig := util.DefaultBlockGenConfig()
|
||||
defaultConfig.NumWithdrawalRequests = 1
|
||||
defaultConfig.NumDepositRequests = 2
|
||||
defaultConfig.NumConsolidationRequests = 1
|
||||
b, err := util.GenerateFullBlockElectra(st, keys, defaultConfig, 1)
|
||||
b, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
@@ -2063,7 +2059,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockElectra(st, keys, defaultConfig, 2)
|
||||
b, err = util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
@@ -2071,7 +2067,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
// prepare another block that is not inserted
|
||||
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
|
||||
require.NoError(t, err)
|
||||
b3, err := util.GenerateFullBlockElectra(st3, keys, defaultConfig, 3)
|
||||
b3, err := util.GenerateFullBlockElectra(st3, keys, util.DefaultBlockGenConfig(), 3)
|
||||
require.NoError(t, err)
|
||||
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -191,26 +190,13 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
}
|
||||
|
||||
if err := s.receiveAttestationNoPubsub(ctx, a, disparity); err != nil {
|
||||
var fields logrus.Fields
|
||||
if a.Version() >= version.Electra {
|
||||
fields = logrus.Fields{
|
||||
"slot": a.GetData().Slot,
|
||||
"committeeCount": a.CommitteeBitsVal().Count(),
|
||||
"committeeIndices": a.CommitteeBitsVal().BitIndices(),
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
|
||||
"aggregatedCount": a.GetAggregationBits().Count(),
|
||||
}
|
||||
} else {
|
||||
fields = logrus.Fields{
|
||||
"slot": a.GetData().Slot,
|
||||
"committeeIndex": a.GetData().CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
|
||||
"aggregatedCount": a.GetAggregationBits().Count(),
|
||||
}
|
||||
}
|
||||
log.WithFields(fields).WithError(err).Warn("Could not process attestation for fork choice")
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": a.GetData().Slot,
|
||||
"committeeIndex": a.GetData().CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.GetData().Target.Root)),
|
||||
"aggregationCount": a.GetAggregationBits().Count(),
|
||||
}).WithError(err).Warn("Could not process attestation for fork choice")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,6 +51,12 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
@@ -77,20 +83,59 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
}
|
||||
// Save current justified and finalized epochs for future use.
|
||||
currStoreJustifiedEpoch := s.CurrentJustifiedCheckpt().Epoch
|
||||
currStoreFinalizedEpoch := s.FinalizedCheckpt().Epoch
|
||||
currentEpoch := coreTime.CurrentEpoch(preState)
|
||||
|
||||
currentCheckpoints := s.saveCurrentCheckpoints(preState)
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, blockCopy, blockRoot)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
if err != nil {
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
var postState state.BeaconState
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate consensus state transition function")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
daStartTime := time.Now()
|
||||
if avs != nil {
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
|
||||
return errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
|
||||
}
|
||||
} else {
|
||||
if err := s.isDataAvailable(ctx, blockRoot, blockCopy); err != nil {
|
||||
return errors.Wrap(err, "could not validate blob data availability")
|
||||
}
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
s.defragmentState(postState)
|
||||
|
||||
@@ -112,9 +157,29 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err := s.updateCheckpoints(ctx, currentCheckpoints, preState, postState, blockRoot); err != nil {
|
||||
return err
|
||||
if coreTime.CurrentEpoch(postState) > currentEpoch && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
|
||||
headSt, err := s.HeadState(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
|
||||
log.WithError(err).Error("could not report epoch metrics")
|
||||
}
|
||||
}
|
||||
if err := s.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch); err != nil {
|
||||
return errors.Wrap(err, "could not update justified checkpoint")
|
||||
}
|
||||
|
||||
newFinalized, err := s.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update finalized checkpoint")
|
||||
}
|
||||
// Send finalized events and finalized deposits in the background
|
||||
if newFinalized {
|
||||
// hook to process all post state finalization tasks
|
||||
s.executePostFinalizationTasks(ctx, postState)
|
||||
}
|
||||
|
||||
// If slasher is configured, forward the attestations in the block via an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
go s.sendBlockAttestationsToSlasher(blockCopy, preState)
|
||||
@@ -134,140 +199,31 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err := s.handleCaches(); err != nil {
|
||||
return err
|
||||
}
|
||||
s.reportPostBlockProcessing(blockCopy, blockRoot, receivedTime, daWaitedTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
type ffgCheckpoints struct {
|
||||
j, f, c primitives.Epoch
|
||||
}
|
||||
|
||||
func (s *Service) saveCurrentCheckpoints(state state.BeaconState) (cp ffgCheckpoints) {
|
||||
// Save current justified and finalized epochs for future use.
|
||||
cp.j = s.CurrentJustifiedCheckpt().Epoch
|
||||
cp.f = s.FinalizedCheckpt().Epoch
|
||||
cp.c = coreTime.CurrentEpoch(state)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Service) updateCheckpoints(
|
||||
ctx context.Context,
|
||||
cp ffgCheckpoints,
|
||||
preState, postState state.BeaconState,
|
||||
blockRoot [32]byte,
|
||||
) error {
|
||||
if coreTime.CurrentEpoch(postState) > cp.c && s.cfg.ForkChoiceStore.IsCanonical(blockRoot) {
|
||||
headSt, err := s.HeadState(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
|
||||
log.WithError(err).Error("could not report epoch metrics")
|
||||
}
|
||||
}
|
||||
if err := s.updateJustificationOnBlock(ctx, preState, postState, cp.j); err != nil {
|
||||
return errors.Wrap(err, "could not update justified checkpoint")
|
||||
}
|
||||
|
||||
newFinalized, err := s.updateFinalizationOnBlock(ctx, preState, postState, cp.f)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update finalized checkpoint")
|
||||
}
|
||||
// Send finalized events and finalized deposits in the background
|
||||
if newFinalized {
|
||||
// hook to process all post state finalization tasks
|
||||
s.executePostFinalizationTasks(ctx, postState)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) validateExecutionAndConsensus(
|
||||
ctx context.Context,
|
||||
preState state.BeaconState,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
) (state.BeaconState, bool, error) {
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
var postState state.BeaconState
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
postState, err = s.validateStateTransition(ctx, preState, block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate consensus state transition function")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return postState, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) handleDA(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
avs das.AvailabilityStore,
|
||||
) (time.Duration, error) {
|
||||
daStartTime := time.Now()
|
||||
if avs != nil {
|
||||
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
|
||||
}
|
||||
} else {
|
||||
if err := s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability")
|
||||
}
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
|
||||
return daWaitedTime, nil
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
receivedTime time.Time,
|
||||
daWaitedTime time.Duration,
|
||||
) {
|
||||
// Reports on block and fork choice metrics.
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
reportSlotMetrics(block.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
|
||||
// Log block sync status.
|
||||
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
justified := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, justified, finalized, receivedTime, uint64(s.genesisTime.Unix()), daWaitedTime); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
// Log payload data
|
||||
if err := logPayload(block.Block()); err != nil {
|
||||
if err := logPayload(blockCopy.Block()); err != nil {
|
||||
log.WithError(err).Error("Unable to log debug block payload data")
|
||||
}
|
||||
// Log state transition data.
|
||||
if err := logStateTransitionData(block.Block()); err != nil {
|
||||
if err := logStateTransitionData(blockCopy.Block()); err != nil {
|
||||
log.WithError(err).Error("Unable to log state transition data")
|
||||
}
|
||||
|
||||
timeWithoutDaWait := time.Since(receivedTime) - daWaitedTime
|
||||
chainServiceProcessingTime.Observe(float64(timeWithoutDaWait.Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
|
||||
|
||||
14
beacon-chain/blockchain/receive_data_column.go
Normal file
14
beacon-chain/blockchain/receive_data_column.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
func (s *Service) ReceiveDataColumn(ds blocks.VerifiedRODataColumn) error {
|
||||
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
|
||||
return errors.Wrap(err, "save data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -81,7 +81,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
P2P p2p.Acceser
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
@@ -106,15 +106,17 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][fieldparams.MaxBlobsPerBlock]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}
|
||||
// TODO: Separate Data Columns from blobs
|
||||
/*
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}*/
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
@@ -128,7 +130,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -142,7 +144,7 @@ func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -168,7 +170,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -97,7 +97,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
@@ -579,7 +579,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -45,6 +46,11 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
@@ -65,6 +71,11 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
|
||||
@@ -628,6 +628,11 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumn(_ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -8,6 +8,7 @@ go_library(
|
||||
"attestation_data.go",
|
||||
"balance_cache_key.go",
|
||||
"checkpoint_state.go",
|
||||
"column_subnet_ids.go",
|
||||
"committee.go",
|
||||
"committee_disabled.go", # keep
|
||||
"committees.go",
|
||||
|
||||
70
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
70
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
type columnSubnetIDs struct {
|
||||
colSubCache *cache.Cache
|
||||
colSubLock sync.RWMutex
|
||||
}
|
||||
|
||||
// ColumnSubnetIDs for column subnet participants
|
||||
var ColumnSubnetIDs = newColumnSubnetIDs()
|
||||
|
||||
const columnKey = "columns"
|
||||
|
||||
func newColumnSubnetIDs() *columnSubnetIDs {
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
epochDuration := time.Duration(slotsPerEpoch.Mul(secondsPerSlot))
|
||||
|
||||
// Set the default duration of a column subnet subscription as the column expiry period.
|
||||
minEpochsForDataColumnSidecarsRequest := time.Duration(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
subLength := epochDuration * minEpochsForDataColumnSidecarsRequest
|
||||
|
||||
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
|
||||
return &columnSubnetIDs{colSubCache: persistentCache}
|
||||
}
|
||||
|
||||
// GetColumnSubnets retrieves the data column subnets.
|
||||
func (s *columnSubnetIDs) GetColumnSubnets() ([]uint64, bool, time.Time) {
|
||||
s.colSubLock.RLock()
|
||||
defer s.colSubLock.RUnlock()
|
||||
|
||||
id, duration, ok := s.colSubCache.GetWithExpiration(columnKey)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
// Retrieve indices from the cache.
|
||||
idxs, ok := id.([]uint64)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
|
||||
return idxs, ok, duration
|
||||
}
|
||||
|
||||
// AddColumnSubnets adds the relevant data column subnets.
|
||||
func (s *columnSubnetIDs) AddColumnSubnets(colIdx []uint64) {
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Set(columnKey, colIdx, 0)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
// entries on them. This should only ever be used for testing, in normal
|
||||
// production, handling of the relevant subnets for each role is done
|
||||
// separately.
|
||||
func (s *columnSubnetIDs) EmptyAllCaches() {
|
||||
// Clear the cache.
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Flush()
|
||||
}
|
||||
@@ -110,7 +110,25 @@ func VerifyAttestationNoVerifySignature(
|
||||
|
||||
var indexedAtt ethpb.IndexedAtt
|
||||
|
||||
if att.Version() >= version.Electra {
|
||||
if att.Version() < version.Electra {
|
||||
if uint64(att.GetData().CommitteeIndex) >= c {
|
||||
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
|
||||
}
|
||||
|
||||
if err = helpers.VerifyAttestationBitfieldLengths(ctx, beaconState, att); err != nil {
|
||||
return errors.Wrap(err, "could not verify attestation bitfields")
|
||||
}
|
||||
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if att.GetData().CommitteeIndex != 0 {
|
||||
return errors.New("committee index must be 0 post-Electra")
|
||||
}
|
||||
@@ -136,29 +154,6 @@ func VerifyAttestationNoVerifySignature(
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if uint64(att.GetData().CommitteeIndex) >= c {
|
||||
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
|
||||
}
|
||||
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if committee == nil {
|
||||
return errors.New("no committee exist for this attestation")
|
||||
}
|
||||
|
||||
if err := helpers.VerifyBitfieldLength(att.GetAggregationBits(), uint64(len(committee))); err != nil {
|
||||
return errors.Wrap(err, "failed to verify aggregation bitfield")
|
||||
}
|
||||
|
||||
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return attestation.IsValidAttestationIndices(ctx, indexedAtt)
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
package blocks
|
||||
|
||||
var ProcessBLSToExecutionChange = processBLSToExecutionChange
|
||||
|
||||
var VerifyBlobCommitmentCount = verifyBlobCommitmentCount
|
||||
|
||||
@@ -2,13 +2,11 @@ package blocks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -202,13 +200,13 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
|
||||
// block_hash=payload.block_hash,
|
||||
// transactions_root=hash_tree_root(payload.transactions),
|
||||
// )
|
||||
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) (state.BeaconState, error) {
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := verifyBlobCommitmentCount(body); err != nil {
|
||||
return nil, err
|
||||
func ProcessPayload(st state.BeaconState, payload interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
var err error
|
||||
if st.Version() >= version.Capella {
|
||||
st, err = ProcessWithdrawals(st, payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
}
|
||||
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
|
||||
return nil, err
|
||||
@@ -222,20 +220,70 @@ func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBod
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
if body.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
kzgs, err := body.BlobKzgCommitments()
|
||||
// ValidatePayloadHeaderWhenMergeCompletes validates the payload header when the merge completes.
|
||||
func ValidatePayloadHeaderWhenMergeCompletes(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Skip validation if the state is not merge compatible.
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(kzgs) > field_params.MaxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
|
||||
if !complete {
|
||||
return nil
|
||||
}
|
||||
// Validate current header's parent hash matches state header's block hash.
|
||||
h, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.ParentHash(), h.BlockHash()) {
|
||||
return ErrInvalidPayloadBlockHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatePayloadHeader validates the payload header.
|
||||
func ValidatePayloadHeader(st state.BeaconState, header interfaces.ExecutionData) error {
|
||||
// Validate header's random mix matches with state in current epoch
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header.PrevRandao(), random) {
|
||||
return ErrInvalidPayloadPrevRandao
|
||||
}
|
||||
|
||||
// Validate header's timestamp matches with state in current slot.
|
||||
t, err := slots.ToTime(st.GenesisTime(), st.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if header.Timestamp() != uint64(t.Unix()) {
|
||||
return ErrInvalidPayloadTimeStamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPayloadHeader processes the payload header.
|
||||
func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
var err error
|
||||
if st.Version() >= version.Capella {
|
||||
st, err = ProcessWithdrawals(st, header)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
}
|
||||
if err := ValidatePayloadHeaderWhenMergeCompletes(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ValidatePayloadHeader(st, header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// GetBlockPayloadHash returns the hash of the execution payload of the block
|
||||
func GetBlockPayloadHash(blk interfaces.ReadOnlyBeaconBlock) ([32]byte, error) {
|
||||
var payloadHash [32]byte
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
@@ -14,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -583,18 +581,14 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
body, err := consensusblocks.NewBeaconBlockBody(ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: tt.payload,
|
||||
})
|
||||
wrappedPayload, err := consensusblocks.WrappedExecutionPayload(tt.payload)
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, body)
|
||||
st, err := blocks.ProcessPayload(st, wrappedPayload)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
payload, err := body.Execution()
|
||||
require.NoError(t, err)
|
||||
want, err := consensusblocks.PayloadToHeader(payload)
|
||||
want, err := consensusblocks.PayloadToHeader(wrappedPayload)
|
||||
require.Equal(t, tt.err, err)
|
||||
h, err := st.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
@@ -615,15 +609,13 @@ func Test_ProcessPayloadCapella(t *testing.T) {
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
payload.PrevRandao = random
|
||||
body, err := consensusblocks.NewBeaconBlockBody(ðpb.BeaconBlockBodyCapella{
|
||||
ExecutionPayload: payload,
|
||||
})
|
||||
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload)
|
||||
require.NoError(t, err)
|
||||
_, err = blocks.ProcessPayload(st, body)
|
||||
_, err = blocks.ProcessPayload(st, wrapped)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
func Test_ProcessPayloadHeader(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
require.NoError(t, err)
|
||||
@@ -671,13 +663,7 @@ func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
p, ok := tt.header.Proto().(*enginev1.ExecutionPayloadHeader)
|
||||
require.Equal(t, true, ok)
|
||||
body, err := consensusblocks.NewBeaconBlockBody(ðpb.BlindedBeaconBlockBodyBellatrix{
|
||||
ExecutionPayloadHeader: p,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, body)
|
||||
st, err := blocks.ProcessPayloadHeader(st, tt.header)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
@@ -742,7 +728,7 @@ func Test_ValidatePayloadHeader(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err = blocks.ValidatePayload(st, tt.header)
|
||||
err = blocks.ValidatePayloadHeader(st, tt.header)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -799,7 +785,7 @@ func Test_ValidatePayloadHeaderWhenMergeCompletes(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err = blocks.ValidatePayloadWhenMergeCompletes(tt.state, tt.header)
|
||||
err = blocks.ValidatePayloadHeaderWhenMergeCompletes(tt.state, tt.header)
|
||||
require.Equal(t, tt.err, err)
|
||||
})
|
||||
}
|
||||
@@ -920,15 +906,3 @@ func emptyPayloadCapella() *enginev1.ExecutionPayloadCapella {
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyBlobCommitmentCount(t *testing.T) {
|
||||
b := ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}
|
||||
rb, err := consensusblocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Body()))
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, fieldparams.MaxBlobsPerBlock+1)}}
|
||||
rb, err = consensusblocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", fieldparams.MaxBlobsPerBlock+1), blocks.VerifyBlobCommitmentCount(rb.Body()))
|
||||
}
|
||||
|
||||
@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
|
||||
@@ -49,7 +49,7 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
return errors.New("nil state")
|
||||
}
|
||||
|
||||
nextEpoch := slots.ToEpoch(st.Slot()) + 1
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
|
||||
var nextPendingConsolidation uint64
|
||||
pendingConsolidations, err := st.PendingConsolidations()
|
||||
@@ -66,7 +66,7 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
nextPendingConsolidation++
|
||||
continue
|
||||
}
|
||||
if sourceValidator.WithdrawableEpoch > nextEpoch {
|
||||
if sourceValidator.WithdrawableEpoch > currentEpoch {
|
||||
break
|
||||
}
|
||||
|
||||
|
||||
@@ -248,7 +248,7 @@ func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, ac
|
||||
|
||||
// constants
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
nextEpoch := slots.ToEpoch(st.Slot()) + 1
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
|
||||
for _, balanceDeposit := range deposits {
|
||||
v, err := st.ValidatorAtIndexReadOnly(balanceDeposit.Index)
|
||||
@@ -259,7 +259,7 @@ func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, ac
|
||||
// If the validator is currently exiting, postpone the deposit until after the withdrawable
|
||||
// epoch.
|
||||
if v.ExitEpoch() < ffe {
|
||||
if nextEpoch <= v.WithdrawableEpoch() {
|
||||
if curEpoch <= v.WithdrawableEpoch() {
|
||||
depositsToPostpone = append(depositsToPostpone, balanceDeposit)
|
||||
} else {
|
||||
// The deposited balance will never become active. Therefore, we increase the balance but do
|
||||
|
||||
@@ -22,31 +22,29 @@ var (
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
// # [Modified in Electra:EIP6110]
|
||||
// # Disable former deposit mechanism once all prior deposits are processed
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
|
||||
// if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
// else:
|
||||
// assert len(body.deposits) == 0
|
||||
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
// # [Modified in Electra:EIP6110]
|
||||
// # Disable former deposit mechanism once all prior deposits are processed
|
||||
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
|
||||
// if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||
// else:
|
||||
// assert len(body.deposits) == 0
|
||||
//
|
||||
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
// for operation in operations:
|
||||
// fn(state, operation)
|
||||
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
// for operation in operations:
|
||||
// fn(state, operation)
|
||||
//
|
||||
// for_ops(body.proposer_slashings, process_proposer_slashing)
|
||||
// for_ops(body.attester_slashings, process_attester_slashing)
|
||||
// for_ops(body.attestations, process_attestation) # [Modified in Electra:EIP7549]
|
||||
// for_ops(body.deposits, process_deposit) # [Modified in Electra:EIP7251]
|
||||
// for_ops(body.voluntary_exits, process_voluntary_exit) # [Modified in Electra:EIP7251]
|
||||
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||
// for_ops(body.execution_payload.deposit_requests, process_deposit_request) # [New in Electra:EIP6110]
|
||||
// # [New in Electra:EIP7002:EIP7251]
|
||||
// for_ops(body.execution_payload.withdrawal_requests, process_withdrawal_request)
|
||||
// # [New in Electra:EIP7251]
|
||||
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
|
||||
|
||||
// for_ops(body.proposer_slashings, process_proposer_slashing)
|
||||
// for_ops(body.attester_slashings, process_attester_slashing)
|
||||
// for_ops(body.attestations, process_attestation) # [Modified in Electra:EIP7549]
|
||||
// for_ops(body.deposits, process_deposit) # [Modified in Electra:EIP7251]
|
||||
// for_ops(body.voluntary_exits, process_voluntary_exit) # [Modified in Electra:EIP7251]
|
||||
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||
// # [New in Electra:EIP7002:EIP7251]
|
||||
// for_ops(body.execution_payload.withdrawal_requests, process_execution_layer_withdrawal_request)
|
||||
// for_ops(body.execution_payload.deposit_requests, process_deposit_requests) # [New in Electra:EIP6110]
|
||||
// for_ops(body.consolidations, process_consolidation) # [New in Electra:EIP7251]
|
||||
func ProcessOperations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
@@ -86,14 +84,16 @@ func ProcessOperations(
|
||||
if !ok {
|
||||
return nil, errors.New("could not cast execution data to electra execution data")
|
||||
}
|
||||
st, err = ProcessDepositRequests(ctx, st, exe.DepositRequests())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit receipts")
|
||||
}
|
||||
st, err = ProcessWithdrawalRequests(ctx, st, exe.WithdrawalRequests())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
|
||||
}
|
||||
|
||||
st, err = ProcessDepositRequests(ctx, st, exe.DepositRequests())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit receipts")
|
||||
}
|
||||
|
||||
if err := ProcessConsolidationRequests(ctx, st, exe.ConsolidationRequests()); err != nil {
|
||||
return nil, fmt.Errorf("could not process consolidation requests: %w", err)
|
||||
}
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
@@ -51,68 +47,3 @@ func TestVerifyOperationLengths_Electra(t *testing.T) {
|
||||
require.ErrorContains(t, "incorrect outstanding deposits in block body", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessEpoch_CanProcessElectra(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(100))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*ethpb.PendingBalanceDeposit, 20)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ðpb.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 10,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
require.NoError(t, st.SetPendingConsolidations([]*ethpb.PendingConsolidation{
|
||||
{
|
||||
SourceIndex: 2,
|
||||
TargetIndex: 3,
|
||||
},
|
||||
{
|
||||
SourceIndex: 0,
|
||||
TargetIndex: 1,
|
||||
},
|
||||
}))
|
||||
err := electra.ProcessEpoch(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), st.Slashings()[2], "Unexpected slashed balance")
|
||||
|
||||
b := st.Balances()
|
||||
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(b)))
|
||||
require.Equal(t, uint64(44799839993), b[0])
|
||||
|
||||
s, err := st.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(s)))
|
||||
|
||||
p, err := st.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(p)))
|
||||
|
||||
p, err = st.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxValidatorsPerCommittee, uint64(len(p)))
|
||||
|
||||
sc, err := st.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, uint64(len(sc.Pubkeys)))
|
||||
|
||||
sc, err = st.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, uint64(len(sc.Pubkeys)))
|
||||
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(100), res)
|
||||
|
||||
// Half of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(remaining))
|
||||
|
||||
num, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), num)
|
||||
}
|
||||
|
||||
@@ -40,7 +40,6 @@ import (
|
||||
// excess_blob_gas=pre.latest_execution_payload_header.excess_blob_gas,
|
||||
// deposit_requests_root=Root(), # [New in Electra:EIP6110]
|
||||
// withdrawal_requests_root=Root(), # [New in Electra:EIP7002],
|
||||
// consolidation_requests_root=Root(), # [New in Electra:EIP7251]
|
||||
// )
|
||||
//
|
||||
// exit_epochs = [v.exit_epoch for v in pre.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||
|
||||
@@ -22,6 +22,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
@@ -52,6 +53,7 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_google_go_cmp//cmp:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -20,9 +20,32 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// AttestingBalance returns the total balance from all the attesting indices.
|
||||
//
|
||||
// WARNING: This method allocates a new copy of the attesting validator indices set and is
|
||||
// considered to be very memory expensive. Avoid using this unless you really
|
||||
// need to get attesting balance from attestations.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def get_attesting_balance(state: BeaconState, attestations: Sequence[PendingAttestation]) -> Gwei:
|
||||
// """
|
||||
// Return the combined effective balance of the set of unslashed validators participating in ``attestations``.
|
||||
// Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
|
||||
// """
|
||||
// return get_total_balance(state, get_unslashed_attesting_indices(state, attestations))
|
||||
func AttestingBalance(ctx context.Context, state state.ReadOnlyBeaconState, atts []*ethpb.PendingAttestation) (uint64, error) {
|
||||
indices, err := UnslashedAttestingIndices(ctx, state, atts)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get attesting indices")
|
||||
}
|
||||
return helpers.TotalBalance(state, indices), nil
|
||||
}
|
||||
|
||||
// ProcessRegistryUpdates rotates validators in and out of active pool.
|
||||
// the amount to rotate is determined churn limit.
|
||||
//
|
||||
@@ -432,3 +455,51 @@ func ProcessFinalUpdates(state state.BeaconState) (state.BeaconState, error) {
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// UnslashedAttestingIndices returns all the attesting indices from a list of attestations,
|
||||
// it sorts the indices and filters out the slashed ones.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def get_unslashed_attesting_indices(state: BeaconState,
|
||||
// attestations: Sequence[PendingAttestation]) -> Set[ValidatorIndex]:
|
||||
// output = set() # type: Set[ValidatorIndex]
|
||||
// for a in attestations:
|
||||
// output = output.union(get_attesting_indices(state, a.data, a.aggregation_bits))
|
||||
// return set(filter(lambda index: not state.validators[index].slashed, output))
|
||||
func UnslashedAttestingIndices(ctx context.Context, state state.ReadOnlyBeaconState, atts []*ethpb.PendingAttestation) ([]primitives.ValidatorIndex, error) {
|
||||
var setIndices []primitives.ValidatorIndex
|
||||
seen := make(map[uint64]bool)
|
||||
|
||||
for _, att := range atts {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attestingIndices, err := attestation.AttestingIndices(att, committee)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create a set for attesting indices
|
||||
for _, index := range attestingIndices {
|
||||
if !seen[index] {
|
||||
setIndices = append(setIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
seen[index] = true
|
||||
}
|
||||
}
|
||||
// Sort the attesting set indices by increasing order.
|
||||
sort.Slice(setIndices, func(i, j int) bool { return setIndices[i] < setIndices[j] })
|
||||
// Remove the slashed validator indices.
|
||||
for i := 0; i < len(setIndices); i++ {
|
||||
v, err := state.ValidatorAtIndexReadOnly(setIndices[i])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to look up validator")
|
||||
}
|
||||
if !v.IsNil() && v.Slashed() {
|
||||
setIndices = append(setIndices[:i], setIndices[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
return setIndices, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
@@ -23,6 +24,131 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestUnslashedAttestingIndices_CanSortAndFilter(t *testing.T) {
|
||||
// Generate 2 attestations.
|
||||
atts := make([]*ethpb.PendingAttestation, 2)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, fieldparams.RootLength)},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0x00, 0xFF, 0xFF, 0xFF},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate validators and state for the 2 attestations.
|
||||
validatorCount := 1000
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
indices, err := epoch.UnslashedAttestingIndices(context.Background(), beaconState, atts)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(indices)-1; i++ {
|
||||
if indices[i] >= indices[i+1] {
|
||||
t.Error("sorted indices not sorted or duplicated")
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the slashed validator is filtered.
|
||||
slashedValidator := indices[0]
|
||||
validators = beaconState.Validators()
|
||||
validators[slashedValidator].Slashed = true
|
||||
require.NoError(t, beaconState.SetValidators(validators))
|
||||
indices, err = epoch.UnslashedAttestingIndices(context.Background(), beaconState, atts)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(indices); i++ {
|
||||
assert.NotEqual(t, slashedValidator, indices[i], "Slashed validator %d is not filtered", slashedValidator)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnslashedAttestingIndices_DuplicatedAttestations(t *testing.T) {
|
||||
// Generate 5 of the same attestations.
|
||||
atts := make([]*ethpb.PendingAttestation, 5)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0}},
|
||||
AggregationBits: bitfield.Bitlist{0x00, 0xFF, 0xFF, 0xFF},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate validators and state for the 5 attestations.
|
||||
validatorCount := 1000
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
indices, err := epoch.UnslashedAttestingIndices(context.Background(), beaconState, atts)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(indices)-1; i++ {
|
||||
if indices[i] >= indices[i+1] {
|
||||
t.Error("sorted indices not sorted or duplicated")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestingBalance_CorrectBalance(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
// Generate 2 attestations.
|
||||
atts := make([]*ethpb.PendingAttestation, 2)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
Slot: primitives.Slot(i),
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate validators with balances and state for the 2 attestations.
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
balances := make([]uint64, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
Slot: 2,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
|
||||
Validators: validators,
|
||||
Balances: balances,
|
||||
}
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
balance, err := epoch.AttestingBalance(context.Background(), beaconState, atts)
|
||||
require.NoError(t, err)
|
||||
wanted := 256 * params.BeaconConfig().MaxEffectiveBalance
|
||||
assert.Equal(t, wanted, balance)
|
||||
}
|
||||
|
||||
func TestProcessSlashings_NotSlashed(t *testing.T) {
|
||||
base := ðpb.BeaconState{
|
||||
Slot: 0,
|
||||
|
||||
@@ -47,6 +47,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
@@ -58,6 +59,90 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
|
||||
assert.Equal(t, wanted, beaconState.Balances()[0], "Unexpected balance")
|
||||
}
|
||||
|
||||
func TestAttestationDeltaPrecompute(t *testing.T) {
|
||||
e := params.BeaconConfig().SlotsPerEpoch
|
||||
validatorCount := uint64(2048)
|
||||
base := buildState(e+2, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
var emptyRoot [32]byte
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Root: emptyRoot[:],
|
||||
},
|
||||
Source: ðpb.Checkpoint{
|
||||
Root: emptyRoot[:],
|
||||
},
|
||||
BeaconBlockRoot: emptyRoot[:],
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0xC0, 0xC0, 0xC0, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x01},
|
||||
InclusionDelay: 1,
|
||||
}
|
||||
}
|
||||
base.PreviousEpochAttestations = atts
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
slashedAttestedIndices := []primitives.ValidatorIndex{1413}
|
||||
for _, i := range slashedAttestedIndices {
|
||||
vs := beaconState.Validators()
|
||||
vs[i].Slashed = true
|
||||
require.Equal(t, nil, beaconState.SetValidators(vs))
|
||||
}
|
||||
|
||||
vp, bp, err := New(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
vp, bp, err = ProcessAttestations(context.Background(), beaconState, vp, bp)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add some variances to target and head balances.
|
||||
// See: https://github.com/prysmaticlabs/prysm/issues/5593
|
||||
bp.PrevEpochTargetAttested /= 2
|
||||
bp.PrevEpochHeadAttested = bp.PrevEpochHeadAttested * 2 / 3
|
||||
rewards, penalties, err := AttestationsDelta(beaconState, bp, vp)
|
||||
require.NoError(t, err)
|
||||
attestedBalance, err := epoch.AttestingBalance(context.Background(), beaconState, atts)
|
||||
require.NoError(t, err)
|
||||
totalBalance, err := helpers.TotalActiveBalance(beaconState)
|
||||
require.NoError(t, err)
|
||||
|
||||
attestedIndices := []primitives.ValidatorIndex{55, 1339, 1746, 1811, 1569}
|
||||
for _, i := range attestedIndices {
|
||||
base, err := baseReward(beaconState, i)
|
||||
require.NoError(t, err, "Could not get base reward")
|
||||
|
||||
// Base rewards for getting source right
|
||||
wanted := attestedBalance*base/totalBalance +
|
||||
bp.PrevEpochTargetAttested*base/totalBalance +
|
||||
bp.PrevEpochHeadAttested*base/totalBalance
|
||||
// Base rewards for proposer and attesters working together getting attestation
|
||||
// on chain in the fatest manner
|
||||
proposerReward := base / params.BeaconConfig().ProposerRewardQuotient
|
||||
wanted += (base-proposerReward)*uint64(params.BeaconConfig().MinAttestationInclusionDelay) - 1
|
||||
assert.Equal(t, wanted, rewards[i], "Unexpected reward balance for validator with index %d", i)
|
||||
// Since all these validators attested, they shouldn't get penalized.
|
||||
assert.Equal(t, uint64(0), penalties[i], "Unexpected penalty balance")
|
||||
}
|
||||
|
||||
for _, i := range slashedAttestedIndices {
|
||||
base, err := baseReward(beaconState, i)
|
||||
assert.NoError(t, err, "Could not get base reward")
|
||||
assert.Equal(t, uint64(0), rewards[i], "Unexpected slashed indices reward balance")
|
||||
assert.Equal(t, 3*base, penalties[i], "Unexpected slashed indices penalty balance")
|
||||
}
|
||||
|
||||
nonAttestedIndices := []primitives.ValidatorIndex{434, 677, 872, 791}
|
||||
for _, i := range nonAttestedIndices {
|
||||
base, err := baseReward(beaconState, i)
|
||||
assert.NoError(t, err, "Could not get base reward")
|
||||
wanted := 3 * base
|
||||
// Since all these validators did not attest, they shouldn't get rewarded.
|
||||
assert.Equal(t, uint64(0), rewards[i], "Unexpected reward balance")
|
||||
// Base penalties for not attesting.
|
||||
assert.Equal(t, wanted, penalties[i], "Unexpected penalty balance")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestationDeltas_ZeroEpoch(t *testing.T) {
|
||||
e := params.BeaconConfig().SlotsPerEpoch
|
||||
validatorCount := uint64(2048)
|
||||
|
||||
@@ -32,6 +32,9 @@ const (
|
||||
|
||||
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
|
||||
AttesterSlashingReceived = 8
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 9
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -77,3 +80,7 @@ type ProposerSlashingReceivedData struct {
|
||||
type AttesterSlashingReceivedData struct {
|
||||
AttesterSlashing ethpb.AttSlashing
|
||||
}
|
||||
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
@@ -82,47 +82,6 @@ func AttestationCommittees(ctx context.Context, st state.ReadOnlyBeaconState, at
|
||||
return committees, nil
|
||||
}
|
||||
|
||||
// BeaconCommittees returns the list of all beacon committees for a given state at a given slot.
|
||||
func BeaconCommittees(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) ([][]primitives.ValidatorIndex, error) {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
activeCount, err := ActiveValidatorCount(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute active validator count")
|
||||
}
|
||||
committeesPerSlot := SlotCommitteeCount(activeCount)
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get seed")
|
||||
}
|
||||
|
||||
committees := make([][]primitives.ValidatorIndex, committeesPerSlot)
|
||||
var activeIndices []primitives.ValidatorIndex
|
||||
|
||||
for idx := primitives.CommitteeIndex(0); idx < primitives.CommitteeIndex(len(committees)); idx++ {
|
||||
committee, err := committeeCache.Committee(ctx, slot, seed, idx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not interface with committee cache")
|
||||
}
|
||||
if committee != nil {
|
||||
committees[idx] = committee
|
||||
continue
|
||||
}
|
||||
|
||||
if len(activeIndices) == 0 {
|
||||
activeIndices, err = ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get active indices")
|
||||
}
|
||||
}
|
||||
committee, err = BeaconCommittee(ctx, activeIndices, seed, slot, idx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute beacon committee")
|
||||
}
|
||||
committees[idx] = committee
|
||||
}
|
||||
return committees, nil
|
||||
}
|
||||
|
||||
// BeaconCommitteeFromState returns the crosslink committee of a given slot and committee index. This
|
||||
// is a spec implementation where state is used as an argument. In case of state retrieval
|
||||
// becomes expensive, consider using BeaconCommittee below.
|
||||
@@ -294,22 +253,36 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
if err := verifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Retrieve active validator count for the specified epoch.
|
||||
activeValidatorCount, err := ActiveValidatorCount(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the number of committees per slot based on the number of active validator indices.
|
||||
numCommitteesPerSlot := SlotCommitteeCount(activeValidatorCount)
|
||||
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
|
||||
vals := make(map[primitives.ValidatorIndex]struct{})
|
||||
for _, v := range validators {
|
||||
vals[v] = struct{}{}
|
||||
}
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
|
||||
|
||||
// Compute committee assignments for each slot in the epoch.
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
committees, err := BeaconCommittees(ctx, state, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute beacon committees")
|
||||
}
|
||||
for j, committee := range committees {
|
||||
// Compute committees for the current slot.
|
||||
for j := uint64(0); j < numCommitteesPerSlot; j++ {
|
||||
committee, err := BeaconCommitteeFromState(ctx, state, slot, primitives.CommitteeIndex(j))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, vIndex := range committee {
|
||||
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
|
||||
continue
|
||||
@@ -323,6 +296,7 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return assignments, nil
|
||||
}
|
||||
|
||||
@@ -337,6 +311,24 @@ func VerifyBitfieldLength(bf bitfield.Bitfield, committeeSize uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyAttestationBitfieldLengths verifies that an attestations aggregation bitfields is
|
||||
// a valid length matching the size of the committee.
|
||||
func VerifyAttestationBitfieldLengths(ctx context.Context, state state.ReadOnlyBeaconState, att ethpb.Att) error {
|
||||
committee, err := BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve beacon committees")
|
||||
}
|
||||
|
||||
if committee == nil {
|
||||
return errors.New("no committee exist for this attestation")
|
||||
}
|
||||
|
||||
if err := VerifyBitfieldLength(att.GetAggregationBits(), uint64(len(committee))); err != nil {
|
||||
return errors.Wrap(err, "failed to verify aggregation bitfield")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShuffledIndices uses input beacon state and returns the shuffled indices of the input epoch,
|
||||
// the shuffled indices then can be used to break up into committees.
|
||||
func ShuffledIndices(s state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error) {
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
@@ -403,12 +402,7 @@ func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
require.NoError(t, state.SetSlot(tt.stateSlot))
|
||||
att := tt.attestation
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, committee)
|
||||
err = helpers.VerifyBitfieldLength(att.GetAggregationBits(), uint64(len(committee)))
|
||||
err := helpers.VerifyAttestationBitfieldLengths(context.Background(), state, tt.attestation)
|
||||
if tt.verificationFailure {
|
||||
assert.NotNil(t, err, "Verification succeeded when it was supposed to fail")
|
||||
} else {
|
||||
@@ -755,27 +749,3 @@ func TestAttestationCommittees(t *testing.T) {
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[1])))
|
||||
})
|
||||
}
|
||||
|
||||
func TestBeaconCommittees(t *testing.T) {
|
||||
prevConfig := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(prevConfig)
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.MinGenesisActiveValidatorCount = 128
|
||||
c.SlotsPerEpoch = 4
|
||||
c.TargetCommitteeSize = 16
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
state, _ := util.DeterministicGenesisState(t, 256)
|
||||
|
||||
activeCount, err := helpers.ActiveValidatorCount(context.Background(), state, 0)
|
||||
require.NoError(t, err)
|
||||
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
|
||||
committees, err := helpers.BeaconCommittees(context.Background(), state, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, committeesPerSlot, uint64(len(committees)))
|
||||
for idx := primitives.CommitteeIndex(0); idx < primitives.CommitteeIndex(len(committees)); idx++ {
|
||||
committee, err := helpers.BeaconCommitteeFromState(context.Background(), state, 0, idx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, committees[idx], committee)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["lightclient.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["lightclient_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,54 +0,0 @@
|
||||
package light_client_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
lightClient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
)
|
||||
|
||||
func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTest()
|
||||
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.Block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.CheckSyncAggregate(update)
|
||||
l.CheckAttestedHeader(update)
|
||||
|
||||
require.Equal(t, (*v1.BeaconBlockHeader)(nil), update.FinalizedHeader, "Finalized header is not nil")
|
||||
require.DeepSSZEqual(t, ([][]byte)(nil), update.FinalityBranch, "Finality branch is not nil")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTest()
|
||||
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.Block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.CheckSyncAggregate(update)
|
||||
l.CheckAttestedHeader(update)
|
||||
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
require.NotNil(t, update.FinalizedHeader, "Finalized header is nil")
|
||||
require.Equal(t, primitives.Slot(0), update.FinalizedHeader.Slot, "Finalized header slot is not zero")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), update.FinalizedHeader.ProposerIndex, "Finalized header proposer index is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.ParentRoot, "Finalized header parent root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.StateRoot, "Finalized header state root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.BodyRoot, "Finalized header body root is not zero")
|
||||
require.Equal(t, lightClient.FinalityBranchNumOfLeaves, len(update.FinalityBranch), "Invalid finality branch leaves")
|
||||
for _, leaf := range update.FinalityBranch {
|
||||
require.DeepSSZEqual(t, zeroHash, leaf, "Leaf is not zero")
|
||||
}
|
||||
}
|
||||
38
beacon-chain/core/peerdas/BUILD.bazel
Normal file
38
beacon-chain/core/peerdas/BUILD.bazel
Normal file
@@ -0,0 +1,38 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["helpers.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["helpers_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
359
beacon-chain/core/peerdas/helpers.go
Normal file
359
beacon-chain/core/peerdas/helpers.go
Normal file
@@ -0,0 +1,359 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"math/big"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/holiman/uint256"
|
||||
errors "github.com/pkg/errors"
|
||||
|
||||
kzg "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
CustodySubnetCountEnrKey = "csc"
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
|
||||
type Csc uint64
|
||||
|
||||
func (Csc) ENRKey() string { return CustodySubnetCountEnrKey }
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count")
|
||||
errIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
errMismatchLength = errors.New("mismatch in the length of the commitments and proofs")
|
||||
errRecordNil = errors.New("record is nil")
|
||||
errCannotLoadCustodySubnetCount = errors.New("cannot load the custody subnet count from peer")
|
||||
|
||||
// maxUint256 is the maximum value of a uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
// CustodyColumnSubnets computes the subnets the node should participate in for custody.
|
||||
func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Check if the custody subnet count is larger than the data column sidecar subnet count.
|
||||
if custodySubnetCount > dataColumnSidecarSubnetCount {
|
||||
return nil, errCustodySubnetCountTooLarge
|
||||
}
|
||||
|
||||
// First, compute the subnet IDs that the node should participate in.
|
||||
subnetIds := make(map[uint64]bool, custodySubnetCount)
|
||||
|
||||
one := uint256.NewInt(1)
|
||||
|
||||
for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(subnetIds)) < custodySubnetCount; currentId.Add(currentId, one) {
|
||||
// Convert to big endian bytes.
|
||||
currentIdBytesBigEndian := currentId.Bytes32()
|
||||
|
||||
// Convert to little endian.
|
||||
currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:])
|
||||
|
||||
// Hash the result.
|
||||
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
|
||||
|
||||
// Get the subnet ID.
|
||||
subnetId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % dataColumnSidecarSubnetCount
|
||||
|
||||
// Add the subnet to the map.
|
||||
subnetIds[subnetId] = true
|
||||
|
||||
// Overflow prevention.
|
||||
if currentId.Cmp(maxUint256) == 0 {
|
||||
currentId = uint256.NewInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
return subnetIds, nil
|
||||
}
|
||||
|
||||
// CustodyColumns computes the columns the node should custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#helper-functions
|
||||
func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Compute the custodied subnets.
|
||||
subnetIds, err := CustodyColumnSubnets(nodeId, custodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody subnets")
|
||||
}
|
||||
|
||||
columnsPerSubnet := fieldparams.NumberOfColumns / dataColumnSidecarSubnetCount
|
||||
|
||||
// Knowing the subnet ID and the number of columns per subnet, select all the columns the node should custody.
|
||||
// Columns belonging to the same subnet are contiguous.
|
||||
columnIndices := make(map[uint64]bool, custodySubnetCount*columnsPerSubnet)
|
||||
for i := uint64(0); i < columnsPerSubnet; i++ {
|
||||
for subnetId := range subnetIds {
|
||||
columnIndex := dataColumnSidecarSubnetCount*i + subnetId
|
||||
columnIndices[columnIndex] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columnIndices, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block and blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []kzg.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the signed block header.
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
// Get the block body.
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
|
||||
// Get the blob KZG commitments.
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the KZG commitments inclusion proof.
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobsCount)
|
||||
|
||||
for i := range blobs {
|
||||
blob := &blobs[i]
|
||||
blobCellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cellsAndProofs = append(cellsAndProofs, blobCellsAndProofs)
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecarsForReconstruct is a TEMPORARY function until there is an official specification for it.
|
||||
// It is scheduled for deletion.
|
||||
func DataColumnSidecarsForReconstruct(
|
||||
blobKzgCommitments [][]byte,
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
cellsAndProofs []kzg.CellsAndProofs,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Each CellsAndProofs corresponds to a Blob
|
||||
// So we can get the BlobCount by checking the length of CellsAndProofs
|
||||
blobsCount := len(cellsAndProofs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// VerifyDataColumnSidecarKZGProofs verifies the provided KZG Proofs for the particular
|
||||
// data column.
|
||||
func VerifyDataColumnSidecarKZGProofs(sc *ethpb.DataColumnSidecar) (bool, error) {
|
||||
if sc.ColumnIndex >= params.BeaconConfig().NumberOfColumns {
|
||||
return false, errIndexTooLarge
|
||||
}
|
||||
if len(sc.DataColumn) != len(sc.KzgCommitments) || len(sc.KzgCommitments) != len(sc.KzgProof) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
var commitments []kzg.Bytes48
|
||||
var indices []uint64
|
||||
var cells []kzg.Cell
|
||||
var proofs []kzg.Bytes48
|
||||
for i := range sc.DataColumn {
|
||||
commitments = append(commitments, kzg.Bytes48(sc.KzgCommitments[i]))
|
||||
indices = append(indices, sc.ColumnIndex)
|
||||
cells = append(cells, kzg.Cell(sc.DataColumn[i]))
|
||||
proofs = append(proofs, kzg.Bytes48(sc.KzgProof[i]))
|
||||
}
|
||||
|
||||
return kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
||||
}
|
||||
|
||||
// CustodySubnetCount returns the number of subnets the node should participate in for custody.
|
||||
func CustodySubnetCount() uint64 {
|
||||
count := params.BeaconConfig().CustodyRequirement
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
count = params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// HypergeomCDF computes the hypergeometric cumulative distribution function.
|
||||
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
func HypergeomCDF(k, M, n, N uint64) float64 {
|
||||
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
|
||||
denominator := new(big.Float).SetInt(denominatorInt)
|
||||
|
||||
rBig := big.NewFloat(0)
|
||||
|
||||
for i := uint64(0); i < k+1; i++ {
|
||||
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
|
||||
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
|
||||
numeratorInt := new(big.Int).Mul(a, b)
|
||||
numerator := new(big.Float).SetInt(numeratorInt)
|
||||
item := new(big.Float).Quo(numerator, denominator)
|
||||
rBig.Add(rBig, item)
|
||||
}
|
||||
|
||||
r, _ := rBig.Float64()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
|
||||
// number of samples we should actually query from peers.
|
||||
// TODO: Add link to the specification once it is available.
|
||||
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
|
||||
// Retrieve the columns count
|
||||
columnsCount := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If half of the columns are missing, we are able to reconstruct the data.
|
||||
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
|
||||
// This is the smallest worst case.
|
||||
worstCaseMissing := columnsCount/2 + 1
|
||||
|
||||
// Compute the false positive threshold.
|
||||
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
|
||||
|
||||
var sampleCount uint64
|
||||
|
||||
// Finally, compute the extended sample count.
|
||||
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
|
||||
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sampleCount
|
||||
}
|
||||
|
||||
func CustodyCountFromRecord(record *enr.Record) (uint64, error) {
|
||||
// By default, we assume the peer custodies the minimum number of subnets.
|
||||
if record == nil {
|
||||
return 0, errRecordNil
|
||||
}
|
||||
|
||||
// Load the `custody_subnet_count`
|
||||
var csc Csc
|
||||
if err := record.Load(&csc); err != nil {
|
||||
return 0, errCannotLoadCustodySubnetCount
|
||||
}
|
||||
|
||||
return uint64(csc), nil
|
||||
}
|
||||
|
||||
func CanSelfReconstruct(numCol uint64) bool {
|
||||
total := params.BeaconConfig().NumberOfColumns
|
||||
// if total is odd, then we need total / 2 + 1 columns to reconstruct
|
||||
// if total is even, then we need total / 2 columns to reconstruct
|
||||
columnsNeeded := total/2 + total%2
|
||||
return numCol >= columnsNeeded
|
||||
}
|
||||
144
beacon-chain/core/peerdas/helpers_test.go
Normal file
144
beacon-chain/core/peerdas/helpers_test.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func GetRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func GetRandBlob(seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := GetRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func GenerateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
|
||||
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
var (
|
||||
comms [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := int64(0); i < 6; i++ {
|
||||
blob := GetRandBlob(i)
|
||||
commitment, _, err := GenerateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
comms = append(comms, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
dbBlock.Block.Body.BlobKzgCommitments = comms
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
sCars, err := peerdas.DataColumnSidecars(sBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, sidecar := range sCars {
|
||||
verified, err := peerdas.VerifyDataColumnSidecarKZGProofs(sidecar)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, fmt.Sprintf("sidecar %d failed", i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHypergeomCDF(t *testing.T) {
|
||||
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
|
||||
// Expected result: 0.072
|
||||
const (
|
||||
expected = 0.0796665913283742
|
||||
margin = 0.000001
|
||||
)
|
||||
|
||||
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
|
||||
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
|
||||
}
|
||||
|
||||
func TestExtendedSampleCount(t *testing.T) {
|
||||
const samplesPerSlot = 16
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
allowedMissings uint64
|
||||
extendedSampleCount uint64
|
||||
}{
|
||||
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
|
||||
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
|
||||
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
|
||||
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
|
||||
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
|
||||
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
|
||||
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
|
||||
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
|
||||
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
|
||||
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
|
||||
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
|
||||
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
|
||||
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
|
||||
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
|
||||
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
|
||||
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
|
||||
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
|
||||
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
|
||||
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
|
||||
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
|
||||
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
|
||||
require.Equal(t, tc.extendedSampleCount, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -53,6 +53,11 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
|
||||
func PeerDASIsActive(slot primitives.Slot) bool {
|
||||
return params.PeerDASEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().Eip7594ForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH
|
||||
|
||||
@@ -29,8 +29,6 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
type customProcessingFn func(context.Context, state.BeaconState) error
|
||||
|
||||
// ExecuteStateTransition defines the procedure for a state transition function.
|
||||
//
|
||||
// Note: This method differs from the spec pseudocode as it uses a batch signature verification.
|
||||
@@ -175,7 +173,18 @@ func ProcessSlotsIfPossible(ctx context.Context, state state.BeaconState, target
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ProcessSlots includes core slot processing as well as a cache
|
||||
// ProcessSlots process through skip slots and apply epoch transition when it's needed
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_slots(state: BeaconState, slot: Slot) -> None:
|
||||
// assert state.slot < slot
|
||||
// while state.slot < slot:
|
||||
// process_slot(state)
|
||||
// # Process epoch on the start slot of the next epoch
|
||||
// if (state.slot + 1) % SLOTS_PER_EPOCH == 0:
|
||||
// process_epoch(state)
|
||||
// state.slot = Slot(state.slot + 1)
|
||||
func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.Slot) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlots")
|
||||
defer span.End()
|
||||
@@ -222,63 +231,42 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
|
||||
defer func() {
|
||||
SkipSlotCache.MarkNotInProgress(key)
|
||||
}()
|
||||
state, err = ProcessSlotsCore(ctx, span, state, slot, cacheBestBeaconStateOnErrFn(highestSlot, key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if highestSlot < state.Slot() {
|
||||
SkipSlotCache.Put(ctx, key, state)
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func cacheBestBeaconStateOnErrFn(highestSlot primitives.Slot, key [32]byte) customProcessingFn {
|
||||
return func(ctx context.Context, state state.BeaconState) error {
|
||||
for state.Slot() < slot {
|
||||
if ctx.Err() != nil {
|
||||
tracing.AnnotateError(span, ctx.Err())
|
||||
// Cache last best value.
|
||||
if highestSlot < state.Slot() {
|
||||
SkipSlotCache.Put(ctx, key, state)
|
||||
}
|
||||
return ctx.Err()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessSlotsCore process through skip slots and apply epoch transition when it's needed
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_slots(state: BeaconState, slot: Slot) -> None:
|
||||
// assert state.slot < slot
|
||||
// while state.slot < slot:
|
||||
// process_slot(state)
|
||||
// # Process epoch on the start slot of the next epoch
|
||||
// if (state.slot + 1) % SLOTS_PER_EPOCH == 0:
|
||||
// process_epoch(state)
|
||||
// state.slot = Slot(state.slot + 1)
|
||||
func ProcessSlotsCore(ctx context.Context, span *trace.Span, state state.BeaconState, slot primitives.Slot, fn customProcessingFn) (state.BeaconState, error) {
|
||||
var err error
|
||||
for state.Slot() < slot {
|
||||
if fn != nil {
|
||||
if err = fn(ctx, state); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
if SkipSlotCache.Put(ctx, key, state); err != nil {
|
||||
log.WithError(err).Error("Failed to put skip slot cache value")
|
||||
}
|
||||
}
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
state, err = ProcessSlot(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process slot")
|
||||
}
|
||||
|
||||
state, err = ProcessEpoch(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
if time.CanProcessEpoch(state) {
|
||||
if state.Version() == version.Phase0 {
|
||||
state, err = ProcessEpochPrecompute(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
||||
}
|
||||
} else if state.Version() <= version.Deneb {
|
||||
if err = altair.ProcessEpoch(ctx, state); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
} else {
|
||||
if err = electra.ProcessEpoch(ctx, state); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := state.SetSlot(state.Slot() + 1); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "failed to increment state slot")
|
||||
@@ -290,46 +278,25 @@ func ProcessSlotsCore(ctx context.Context, span *trace.Span, state state.BeaconS
|
||||
return nil, errors.Wrap(err, "failed to upgrade state")
|
||||
}
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ProcessEpoch is a wrapper on fork specific epoch processing
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
var err error
|
||||
if time.CanProcessEpoch(state) {
|
||||
if state.Version() == version.Electra {
|
||||
if err = electra.ProcessEpoch(ctx, state); err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
} else if state.Version() >= version.Altair {
|
||||
if err = altair.ProcessEpoch(ctx, state); err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
} else {
|
||||
state, err = ProcessEpochPrecompute(ctx, state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
||||
}
|
||||
}
|
||||
if highestSlot < state.Slot() {
|
||||
SkipSlotCache.Put(ctx, key, state)
|
||||
}
|
||||
return state, err
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// UpgradeState upgrades the state to the next version if possible.
|
||||
func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.UpgradeState")
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
upgraded := false
|
||||
|
||||
if time.CanUpgradeToAltair(state.Slot()) {
|
||||
state, err = altair.UpgradeToAltair(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
upgraded = true
|
||||
}
|
||||
|
||||
if time.CanUpgradeToBellatrix(state.Slot()) {
|
||||
@@ -338,7 +305,6 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
upgraded = true
|
||||
}
|
||||
|
||||
if time.CanUpgradeToCapella(state.Slot()) {
|
||||
@@ -347,7 +313,6 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
upgraded = true
|
||||
}
|
||||
|
||||
if time.CanUpgradeToDeneb(state.Slot()) {
|
||||
@@ -356,7 +321,6 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
upgraded = true
|
||||
}
|
||||
|
||||
if time.CanUpgradeToElectra(state.Slot()) {
|
||||
@@ -365,13 +329,7 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
upgraded = true
|
||||
}
|
||||
|
||||
if upgraded {
|
||||
log.Debugf("upgraded state to %s", version.String(state.Version()))
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition/interop"
|
||||
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
@@ -327,18 +328,20 @@ func ProcessBlockForStateRoot(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if state.Version() >= version.Capella {
|
||||
state, err = b.ProcessWithdrawals(state, executionData)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
if blk.IsBlinded() {
|
||||
state, err = b.ProcessPayloadHeader(state, executionData)
|
||||
} else {
|
||||
state, err = b.ProcessPayload(state, executionData)
|
||||
}
|
||||
state, err = b.ProcessPayload(state, blk.Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution data")
|
||||
}
|
||||
}
|
||||
|
||||
if err := VerifyBlobCommitmentCount(blk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
state, err = b.ProcessRandaoNoVerify(state, randaoReveal[:])
|
||||
if err != nil {
|
||||
@@ -374,6 +377,20 @@ func ProcessBlockForStateRoot(
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func VerifyBlobCommitmentCount(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
kzgs, err := blk.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(kzgs) > field_params.MaxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This calls altair block operations.
|
||||
func altairOperations(
|
||||
ctx context.Context,
|
||||
|
||||
@@ -2,11 +2,13 @@ package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -210,3 +212,15 @@ func TestProcessBlockDifferentVersion(t *testing.T) {
|
||||
_, _, err = transition.ProcessBlockNoVerifyAnySig(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, "state and block are different version. 0 != 1", err)
|
||||
}
|
||||
|
||||
func TestVerifyBlobCommitmentCount(t *testing.T) {
|
||||
b := ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}
|
||||
rb, err := blocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, transition.VerifyBlobCommitmentCount(rb))
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, field_params.MaxBlobsPerBlock+1)}}
|
||||
rb, err = blocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", field_params.MaxBlobsPerBlock+1), transition.VerifyBlobCommitmentCount(rb))
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability.go",
|
||||
"availability_columns.go",
|
||||
"cache.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
@@ -20,6 +21,7 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
|
||||
152
beacon-chain/das/availability_columns.go
Normal file
152
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,152 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *cache
|
||||
verifier ColumnBatchVerifier
|
||||
nodeID enode.ID
|
||||
}
|
||||
|
||||
type ColumnBatchVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, sc []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.BlobStorage, verifier ColumnBatchVerifier, id enode.ID) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newCache(),
|
||||
verifier: verifier,
|
||||
nodeID: id,
|
||||
}
|
||||
}
|
||||
|
||||
// Persist do nothing at the moment.
|
||||
// TODO: Very Ugly, change interface to allow for columns and blobs
|
||||
func (*LazilyPersistentStoreColumn) Persist(_ primitives.Slot, _ ...blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) PersistColumns(current primitives.Slot, sc ...blocks.RODataColumn) error {
|
||||
if len(sc) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sc) > 1 {
|
||||
first := sc[0].BlockRoot()
|
||||
for i := 1; i < len(sc); i++ {
|
||||
if first != sc[i].BlockRoot() {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sc[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromColumn(sc[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for i := range sc {
|
||||
if err := entry.stashColumns(&sc[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := fullCommitmentsToCheck(b, current)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could check data availability for block %#x", b.Root())
|
||||
}
|
||||
// Return early for blocks that are pre-deneb or which do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := keyFromBlock(b)
|
||||
entry := s.cache.ensure(key)
|
||||
defer s.cache.delete(key)
|
||||
root := b.Root()
|
||||
sumz, err := s.store.WaitForSummarizer(ctx)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
|
||||
WithError(err).
|
||||
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
|
||||
} else {
|
||||
entry.setDiskSummary(sumz.Summary(root))
|
||||
}
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
sidecars, err := entry.filterColumns(root, &blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete BlobSidecar batch")
|
||||
}
|
||||
// Do thorough verifications of each BlobSidecar for the block.
|
||||
// Same as above, we don't save BlobSidecars if there are any problems with the batch.
|
||||
vscs, err := s.verifier.VerifiedRODataColumns(ctx, b, sidecars)
|
||||
if err != nil {
|
||||
var me verification.VerificationMultiError
|
||||
ok := errors.As(err, &me)
|
||||
if ok {
|
||||
fails := me.Failures()
|
||||
lf := make(log.Fields, len(fails))
|
||||
for i := range fails {
|
||||
lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error()
|
||||
}
|
||||
log.WithFields(lf).
|
||||
Debug("invalid ColumnSidecars received")
|
||||
}
|
||||
return errors.Wrapf(err, "invalid ColumnSidecars received for block %#x", root)
|
||||
}
|
||||
// Ensure that each column sidecar is written to disk.
|
||||
for i := range vscs {
|
||||
if err := s.store.SaveDataColumn(vscs[i]); err != nil {
|
||||
return errors.Wrapf(err, "failed to save ColumnSidecar index %d for block %#x", vscs[i].ColumnIndex, root)
|
||||
}
|
||||
}
|
||||
// All ColumnSidecars are persisted - da check succeeds.
|
||||
return nil
|
||||
}
|
||||
|
||||
func fullCommitmentsToCheck(b blocks.ROBlock, current primitives.Slot) (safeCommitmentsArray, error) {
|
||||
var ar safeCommitmentsArray
|
||||
if b.Version() < version.Deneb {
|
||||
return ar, nil
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
|
||||
return ar, nil
|
||||
}
|
||||
kc, err := b.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return ar, err
|
||||
}
|
||||
for i := range ar {
|
||||
copy(ar[i], kc)
|
||||
}
|
||||
return ar, nil
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
@@ -38,6 +39,10 @@ func keyFromSidecar(sc blocks.ROBlob) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
func keyFromColumn(sc blocks.RODataColumn) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value.
|
||||
func keyFromBlock(b blocks.ROBlock) cacheKey {
|
||||
return cacheKey{slot: b.Block().Slot(), root: b.Root()}
|
||||
@@ -61,6 +66,7 @@ func (c *cache) delete(key cacheKey) {
|
||||
// cacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type cacheEntry struct {
|
||||
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
|
||||
colScs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.BlobStorageSummary
|
||||
}
|
||||
|
||||
@@ -82,6 +88,17 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) stashColumns(sc *blocks.RODataColumn) error {
|
||||
if sc.ColumnIndex >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.ColumnIndex)
|
||||
}
|
||||
if e.colScs[sc.ColumnIndex] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.ColumnIndex, sc.KzgCommitments)
|
||||
}
|
||||
e.colScs[sc.ColumnIndex] = sc
|
||||
return nil
|
||||
}
|
||||
|
||||
// filter evicts sidecars that are not committed to by the block and returns custom
|
||||
// errors if the cache is missing any of the commitments, or if the commitments in
|
||||
// the cache do not match those found in the block. If err is nil, then all expected
|
||||
@@ -117,6 +134,35 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
|
||||
return scs, nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) filterColumns(root [32]byte, kc *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
if e.diskSummary.AllAvailable(kc.count()) {
|
||||
return nil, nil
|
||||
}
|
||||
scs := make([]blocks.RODataColumn, 0, kc.count())
|
||||
for i := uint64(0); i < fieldparams.NumberOfColumns; i++ {
|
||||
// We already have this blob, we don't need to write it or validate it.
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
if kc[i] == nil {
|
||||
if e.colScs[i] != nil {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if e.colScs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
if !reflect.DeepEqual(kc[i], e.colScs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.colScs[i].KzgCommitments, kc[i])
|
||||
}
|
||||
scs = append(scs, *e.colScs[i])
|
||||
}
|
||||
|
||||
return scs, nil
|
||||
}
|
||||
|
||||
// safeCommitmentArray is a fixed size array of commitment byte slices. This is helpful for avoiding
|
||||
// gratuitous bounds checks.
|
||||
type safeCommitmentArray [fieldparams.MaxBlobsPerBlock][]byte
|
||||
@@ -129,3 +175,14 @@ func (s safeCommitmentArray) count() int {
|
||||
}
|
||||
return fieldparams.MaxBlobsPerBlock
|
||||
}
|
||||
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
func (s *safeCommitmentsArray) count() int {
|
||||
for i := range s {
|
||||
if s[i] == nil {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return fieldparams.NumberOfColumns
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -39,8 +40,15 @@ const (
|
||||
directoryPermissions = 0700
|
||||
)
|
||||
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
type BlobStorageOption func(*BlobStorage) error
|
||||
type (
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
RootIndexPair struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
Index uint64
|
||||
}
|
||||
)
|
||||
|
||||
// WithBasePath is a required option that sets the base path of blob storage.
|
||||
func WithBasePath(base string) BlobStorageOption {
|
||||
@@ -70,7 +78,10 @@ func WithSaveFsync(fsync bool) BlobStorageOption {
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
b := &BlobStorage{}
|
||||
b := &BlobStorage{
|
||||
DataColumnFeed: new(event.Feed),
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(b); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create blob storage")
|
||||
@@ -99,6 +110,7 @@ type BlobStorage struct {
|
||||
fsync bool
|
||||
fs afero.Fs
|
||||
pruner *blobPruner
|
||||
DataColumnFeed *event.Feed
|
||||
}
|
||||
|
||||
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
|
||||
@@ -221,6 +233,110 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveDataColumn saves a data column to our local filesystem.
|
||||
func (bs *BlobStorage) SaveDataColumn(column blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
fname := namerForDataColumn(column)
|
||||
sszPath := fname.path()
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
log.Trace("Ignoring a duplicate data column sidecar save attempt")
|
||||
return nil
|
||||
}
|
||||
|
||||
if bs.pruner != nil {
|
||||
hRoot, err := column.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bs.pruner.notify(hRoot, column.SignedBlockHeader.Header.Slot, column.ColumnIndex); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", hRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the ethpb.DataColumnSidecar to binary data using SSZ.
|
||||
sidecarData, err := column.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
} else if len(sidecarData) == 0 {
|
||||
return errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
partialMoved := false
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
if partialMoved {
|
||||
return
|
||||
}
|
||||
// It's expected to error if the save is successful.
|
||||
err = bs.fs.Remove(partPath)
|
||||
if err == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"partPath": partPath,
|
||||
}).Debugf("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := partialFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return errEmptyBlobWritten
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
partialMoved = true
|
||||
|
||||
// Notify the data column notifier that a new data column has been saved.
|
||||
bs.DataColumnFeed.Send(RootIndexPair{
|
||||
Root: column.BlockRoot(),
|
||||
Index: column.ColumnIndex,
|
||||
})
|
||||
|
||||
// TODO: Use new metrics for data columns
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a single BlobSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
@@ -246,6 +362,20 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
}
|
||||
|
||||
// GetColumn retrieves a single DataColumnSidecar by its root and index.
|
||||
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (*ethpb.DataColumnSidecar, error) {
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.path())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.DataColumnSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Remove removes all blobs for a given root.
|
||||
func (bs *BlobStorage) Remove(root [32]byte) error {
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
@@ -289,6 +419,61 @@ func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]boo
|
||||
return mask, nil
|
||||
}
|
||||
|
||||
// ColumnIndices retrieve the stored column indexes from our filesystem.
|
||||
func (bs *BlobStorage) ColumnIndices(root [32]byte) (map[uint64]bool, error) {
|
||||
custody := make(map[uint64]bool, fieldparams.NumberOfColumns)
|
||||
|
||||
// Get all the files in the directory.
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
entries, err := afero.ReadDir(bs.fs, rootDir)
|
||||
if err != nil {
|
||||
// If the directory does not exist, we do not custody any columns.
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "read directory")
|
||||
}
|
||||
|
||||
// Iterate over all the entries in the directory.
|
||||
for _, entry := range entries {
|
||||
// If the entry is a directory, skip it.
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the entry does not have the correct extension, skip it.
|
||||
name := entry.Name()
|
||||
if !strings.HasSuffix(name, sszExt) {
|
||||
continue
|
||||
}
|
||||
|
||||
// The file should be in the `<index>.<extension>` format.
|
||||
// Skip the file if it does not match the format.
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the column index from the file name.
|
||||
columnIndexStr := parts[0]
|
||||
columnIndex, err := strconv.ParseUint(columnIndexStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
|
||||
}
|
||||
|
||||
// If the column index is out of bounds, return an error.
|
||||
if columnIndex >= fieldparams.NumberOfColumns {
|
||||
return nil, errors.Wrapf(errIndexOutOfBounds, "invalid index %d", columnIndex)
|
||||
}
|
||||
|
||||
// Mark the column index as in custody.
|
||||
custody[columnIndex] = true
|
||||
}
|
||||
|
||||
return custody, nil
|
||||
}
|
||||
|
||||
// Clear deletes all files on the filesystem.
|
||||
func (bs *BlobStorage) Clear() error {
|
||||
dirs, err := listDir(bs.fs, ".")
|
||||
@@ -321,6 +506,10 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func namerForDataColumn(col blocks.VerifiedRODataColumn) blobNamer {
|
||||
return blobNamer{root: col.BlockRoot(), index: col.ColumnIndex}
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return rootString(p.root)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
|
||||
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
|
||||
type blobIndexMask [fieldparams.NumberOfColumns]bool
|
||||
|
||||
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
|
||||
type BlobStorageSummary struct {
|
||||
@@ -26,6 +26,15 @@ func (s BlobStorageSummary) HasIndex(idx uint64) bool {
|
||||
return s.mask[idx]
|
||||
}
|
||||
|
||||
// HasDataColumnIndex true if the DataColumnSidecar at the given index is available in the filesystem.
|
||||
func (s BlobStorageSummary) HasDataColumnIndex(idx uint64) bool {
|
||||
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
|
||||
if idx >= fieldparams.NumberOfColumns {
|
||||
return false
|
||||
}
|
||||
return s.mask[idx]
|
||||
}
|
||||
|
||||
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
|
||||
func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
if count > fieldparams.MaxBlobsPerBlock {
|
||||
@@ -39,6 +48,21 @@ func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// AllDataColumnsAvailable returns true if we have all datacolumns for corresponding indices.
|
||||
func (s BlobStorageSummary) AllDataColumnsAvailable(indices map[uint64]bool) bool {
|
||||
if uint64(len(indices)) > fieldparams.NumberOfColumns {
|
||||
return false
|
||||
}
|
||||
|
||||
for indice := range indices {
|
||||
if !s.mask[indice] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// BlobStorageSummarizer can be used to receive a summary of metadata about blobs on disk for a given root.
|
||||
// The BlobStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type BlobStorageSummarizer interface {
|
||||
@@ -68,9 +92,12 @@ func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) ensure(key [32]byte, slot primitives.Slot, idx uint64) error {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
// TODO: Separate blob index checks from data column index checks
|
||||
/*
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
*/
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v := s.cache[key]
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSlotByRoot_Summary(t *testing.T) {
|
||||
t.Skip("Use new test for data columns")
|
||||
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
|
||||
firstSet[0] = true
|
||||
lastSet[len(lastSet)-1] = true
|
||||
@@ -148,3 +149,108 @@ func TestAllAvailable(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasDataColumnIndex(t *testing.T) {
|
||||
storedIndices := map[uint64]bool{
|
||||
1: true,
|
||||
3: true,
|
||||
5: true,
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
idx uint64
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "index is too high",
|
||||
idx: fieldparams.NumberOfColumns,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "non existing index",
|
||||
idx: 2,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "existing index",
|
||||
idx: 3,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
var mask blobIndexMask
|
||||
|
||||
for idx := range storedIndices {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
sum := BlobStorageSummary{mask: mask}
|
||||
require.Equal(t, c.expected, sum.HasDataColumnIndex(c.idx))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllDataColumnAvailable(t *testing.T) {
|
||||
tooManyColumns := make(map[uint64]bool, fieldparams.NumberOfColumns+1)
|
||||
for i := uint64(0); i < fieldparams.NumberOfColumns+1; i++ {
|
||||
tooManyColumns[i] = true
|
||||
}
|
||||
|
||||
columns346 := map[uint64]bool{
|
||||
3: true,
|
||||
4: true,
|
||||
6: true,
|
||||
}
|
||||
|
||||
columns36 := map[uint64]bool{
|
||||
3: true,
|
||||
6: true,
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
storedIndices map[uint64]bool
|
||||
testedIndices map[uint64]bool
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "no tested indices",
|
||||
storedIndices: columns346,
|
||||
testedIndices: map[uint64]bool{},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "too many tested indices",
|
||||
storedIndices: columns346,
|
||||
testedIndices: tooManyColumns,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "not all tested indices are stored",
|
||||
storedIndices: columns36,
|
||||
testedIndices: columns346,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "all tested indices are stored",
|
||||
storedIndices: columns346,
|
||||
testedIndices: columns36,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
var mask blobIndexMask
|
||||
|
||||
for idx := range c.storedIndices {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
sum := BlobStorageSummary{mask: mask}
|
||||
require.Equal(t, c.expected, sum.AllDataColumnsAvailable(c.testedIndices))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,6 +64,10 @@ func newBlobPruner(fs afero.Fs, retain primitives.Epoch, opts ...prunerOpt) (*bl
|
||||
// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache
|
||||
// of root->slot mappings and decide when to evict old blobs based on the age of present blobs.
|
||||
func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) error {
|
||||
for i := range 42 {
|
||||
log.WithField("index", i).Info("test")
|
||||
}
|
||||
|
||||
if err := p.cache.ensure(root, latest, idx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//monitoring/backup:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
],
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
|
||||
slashertypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
|
||||
@@ -58,9 +56,6 @@ type ReadOnlyDatabase interface {
|
||||
// Fee recipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
// light client operations
|
||||
LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) (map[uint64]*ethpbv2.LightClientUpdateWithVersion, error)
|
||||
LightClientUpdate(ctx context.Context, period uint64) (*ethpbv2.LightClientUpdateWithVersion, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
@@ -97,8 +92,6 @@ type NoHeadAccessDatabase interface {
|
||||
// Fee recipients operations.
|
||||
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, addrs []common.Address) error
|
||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||
// light client operations
|
||||
SaveLightClientUpdate(ctx context.Context, period uint64, update *ethpbv2.LightClientUpdateWithVersion) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"genesis.go",
|
||||
"key.go",
|
||||
"kv.go",
|
||||
"lightclient.go",
|
||||
"log.go",
|
||||
"migration.go",
|
||||
"migration_archived_index.go",
|
||||
@@ -52,7 +51,6 @@ go_library(
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
@@ -89,7 +87,6 @@ go_test(
|
||||
"genesis_test.go",
|
||||
"init_test.go",
|
||||
"kv_test.go",
|
||||
"lightclient_test.go",
|
||||
"migration_archived_index_test.go",
|
||||
"migration_block_slot_index_test.go",
|
||||
"migration_state_validators_test.go",
|
||||
@@ -116,7 +113,6 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -23,10 +23,10 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// used to represent errors for inconsistent slot ranges.
|
||||
// Used to represent errors for inconsistent slot ranges.
|
||||
var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
|
||||
|
||||
// Block retrieval by root.
|
||||
// Block retrieval by root. Return nil if block is not found.
|
||||
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
|
||||
defer span.End()
|
||||
|
||||
@@ -107,7 +107,6 @@ var Buckets = [][]byte{
|
||||
powchainBucket,
|
||||
stateSummaryBucket,
|
||||
stateValidatorsBucket,
|
||||
lightClientUpdatesBucket,
|
||||
// Indices buckets.
|
||||
blockSlotIndicesBucket,
|
||||
stateSlotIndicesBucket,
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func (s *Store) SaveLightClientUpdate(ctx context.Context, period uint64, update *ethpbv2.LightClientUpdateWithVersion) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.saveLightClientUpdate")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(lightClientUpdatesBucket)
|
||||
updateMarshalled, err := encode(ctx, update)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bkt.Put(bytesutil.Uint64ToBytesBigEndian(period), updateMarshalled)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) (map[uint64]*ethpbv2.LightClientUpdateWithVersion, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LightClientUpdates")
|
||||
defer span.End()
|
||||
|
||||
if startPeriod > endPeriod {
|
||||
return nil, fmt.Errorf("start period %d is greater than end period %d", startPeriod, endPeriod)
|
||||
}
|
||||
|
||||
updates := make(map[uint64]*ethpbv2.LightClientUpdateWithVersion)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(lightClientUpdatesBucket)
|
||||
c := bkt.Cursor()
|
||||
|
||||
firstPeriodInDb, _ := c.First()
|
||||
if firstPeriodInDb == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for k, v := c.Seek(bytesutil.Uint64ToBytesBigEndian(startPeriod)); k != nil && binary.BigEndian.Uint64(k) <= endPeriod; k, v = c.Next() {
|
||||
currentPeriod := binary.BigEndian.Uint64(k)
|
||||
|
||||
var update ethpbv2.LightClientUpdateWithVersion
|
||||
if err := decode(ctx, v, &update); err != nil {
|
||||
return err
|
||||
}
|
||||
updates[currentPeriod] = &update
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return updates, err
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdate(ctx context.Context, period uint64) (*ethpbv2.LightClientUpdateWithVersion, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LightClientUpdate")
|
||||
defer span.End()
|
||||
|
||||
var update ethpbv2.LightClientUpdateWithVersion
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(lightClientUpdatesBucket)
|
||||
updateBytes := bkt.Get(bytesutil.Uint64ToBytesBigEndian(period))
|
||||
if updateBytes == nil {
|
||||
return nil
|
||||
}
|
||||
return decode(ctx, updateBytes, &update)
|
||||
})
|
||||
return &update, err
|
||||
}
|
||||
@@ -1,648 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestStore_LightclientUpdate_CanSaveRetrieve(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
update := ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
}
|
||||
|
||||
period := uint64(1)
|
||||
err := db.SaveLightClientUpdate(ctx, period, ðpbv2.LightClientUpdateWithVersion{
|
||||
Version: 1,
|
||||
Data: update,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Retrieve the update
|
||||
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, update.SignatureSlot, retrievedUpdate.Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
|
||||
}
|
||||
|
||||
func TestStore_LightclientUpdates_canRetrieveRange(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 9,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i+1), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdatesMap, err := db.LightClientUpdates(ctx, 1, 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(updates), len(retrievedUpdatesMap), "retrieved updates do not match saved updates")
|
||||
for i, update := range updates {
|
||||
require.Equal(t, update.Data.SignatureSlot, retrievedUpdatesMap[uint64(i+1)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_EndPeriodSmallerThanStartPeriod(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 9,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i+1), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 3, 1)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, err.Error(), "start period 3 is greater than end period 1")
|
||||
require.IsNil(t, retrievedUpdates)
|
||||
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_EndPeriodEqualToStartPeriod(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 9,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i+1), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 2, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(retrievedUpdates))
|
||||
require.Equal(t, updates[1].Data.SignatureSlot, retrievedUpdates[2].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_StartPeriodBeforeFirstUpdate(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 9,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i+2), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 0, 4)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(retrievedUpdates))
|
||||
for i, update := range updates {
|
||||
require.Equal(t, update.Data.SignatureSlot, retrievedUpdates[uint64(i+2)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_EndPeriodAfterLastUpdate(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 9,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i+1), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 1, 6)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(retrievedUpdates))
|
||||
for i, update := range updates {
|
||||
require.Equal(t, update.Data.SignatureSlot, retrievedUpdates[uint64(i+1)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_PartialUpdates(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 9,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i+1), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 1, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(retrievedUpdates))
|
||||
for i, update := range updates[:2] {
|
||||
require.Equal(t, update.Data.SignatureSlot, retrievedUpdates[uint64(i+1)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_MissingPeriods_SimpleData(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 11,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 12,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(update.Data.SignatureSlot), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 7, 12)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(retrievedUpdates))
|
||||
for _, update := range updates {
|
||||
require.Equal(t, update.Data.SignatureSlot, retrievedUpdates[uint64(update.Data.SignatureSlot)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
|
||||
// Retrieve the updates from the middle
|
||||
retrievedUpdates, err = db.LightClientUpdates(ctx, 8, 12)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(retrievedUpdates))
|
||||
require.Equal(t, updates[1].Data.SignatureSlot, retrievedUpdates[8].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, updates[2].Data.SignatureSlot, retrievedUpdates[11].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, updates[3].Data.SignatureSlot, retrievedUpdates[12].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
|
||||
// Retrieve the updates from after the missing period
|
||||
retrievedUpdates, err = db.LightClientUpdates(ctx, 11, 12)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(retrievedUpdates))
|
||||
require.Equal(t, updates[2].Data.SignatureSlot, retrievedUpdates[11].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, updates[3].Data.SignatureSlot, retrievedUpdates[12].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
|
||||
//retrieve the updates from before the missing period to after the missing period
|
||||
retrievedUpdates, err = db.LightClientUpdates(ctx, 3, 15)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(retrievedUpdates))
|
||||
require.Equal(t, updates[0].Data.SignatureSlot, retrievedUpdates[7].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, updates[1].Data.SignatureSlot, retrievedUpdates[8].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, updates[2].Data.SignatureSlot, retrievedUpdates[11].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, updates[3].Data.SignatureSlot, retrievedUpdates[12].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_EmptyDB(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 1, 3)
|
||||
require.IsNil(t, err)
|
||||
require.Equal(t, 0, len(retrievedUpdates))
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_MissingPeriodsAtTheEnd_SimpleData(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 1; i < 4; i++ {
|
||||
update := ðpbv2.LightClientUpdateWithVersion{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: primitives.Slot(uint64(i)),
|
||||
},
|
||||
}
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for i := 7; i < 10; i++ {
|
||||
update := ðpbv2.LightClientUpdateWithVersion{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: primitives.Slot(uint64(i)),
|
||||
},
|
||||
}
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates from 1 to 5
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 1, 5)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(retrievedUpdates))
|
||||
require.Equal(t, primitives.Slot(1), retrievedUpdates[1].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, primitives.Slot(2), retrievedUpdates[2].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, primitives.Slot(3), retrievedUpdates[3].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
|
||||
}
|
||||
|
||||
func setupLightClientTestDB(t *testing.T) (*Store, context.Context) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 10; i < 101; i++ { // 10 to 100
|
||||
update := ðpbv2.LightClientUpdateWithVersion{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: primitives.Slot(uint64(i)),
|
||||
},
|
||||
}
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 110; i < 201; i++ { // 110 to 200
|
||||
update := ðpbv2.LightClientUpdateWithVersion{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: primitives.Slot(uint64(i)),
|
||||
},
|
||||
}
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return db, ctx
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_MissingPeriodsInTheMiddleDistributed(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// Retrieve the updates - should fail because of missing periods in the middle
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 1, 300)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 91*2, len(retrievedUpdates))
|
||||
for i := 10; i < 101; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
for i := 110; i < 201; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_RetrieveValidRangeFromStart(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// retrieve 1 to 100 - should work because all periods are present after the firstPeriodInDB > startPeriod
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 1, 100)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 91, len(retrievedUpdates))
|
||||
for i := 10; i < 101; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_RetrieveValidRangeInTheMiddle(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// retrieve 110 to 200 - should work because all periods are present
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 110, 200)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 91, len(retrievedUpdates))
|
||||
for i := 110; i < 201; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_MissingPeriodInTheMiddleConcentrated(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// retrieve 100 to 200
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 100, 200)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 92, len(retrievedUpdates))
|
||||
require.Equal(t, primitives.Slot(100), retrievedUpdates[100].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
for i := 110; i < 201; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_MissingPeriodsAtTheEnd(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// retrieve 10 to 109
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 10, 109)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 91, len(retrievedUpdates))
|
||||
for i := 10; i < 101; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_MissingPeriodsAtTheBeginning(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// retrieve 105 to 200
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 105, 200)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 91, len(retrievedUpdates))
|
||||
for i := 110; i < 201; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_StartPeriodGreaterThanLastPeriod(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// retrieve 300 to 400
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 300, 400)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(retrievedUpdates))
|
||||
|
||||
}
|
||||
@@ -17,9 +17,6 @@ var (
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
|
||||
// Light Client Updates Bucket
|
||||
lightClientUpdatesBucket = []byte("light-client-updates")
|
||||
|
||||
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
|
||||
slotsHasObjectBucket = []byte("slots-has-objects")
|
||||
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
|
||||
|
||||
@@ -84,15 +84,11 @@ func (s *mockEngine) callCount(method string) int {
|
||||
}
|
||||
|
||||
func mockParseUintList(t *testing.T, data json.RawMessage) []uint64 {
|
||||
var list []string
|
||||
var list []uint64
|
||||
if err := json.Unmarshal(data, &list); err != nil {
|
||||
t.Fatalf("failed to parse uint list: %v", err)
|
||||
}
|
||||
uints := make([]uint64, len(list))
|
||||
for i, u := range list {
|
||||
uints[i] = hexutil.MustDecodeUint64(u)
|
||||
}
|
||||
return uints
|
||||
return list
|
||||
}
|
||||
|
||||
func mockParseHexByteList(t *testing.T, data json.RawMessage) []hexutil.Bytes {
|
||||
@@ -121,7 +117,7 @@ func TestParseRequest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cases := []struct {
|
||||
method string
|
||||
hexArgs []string // uint64 as hex
|
||||
uintArgs []uint64
|
||||
byteArgs []hexutil.Bytes
|
||||
}{
|
||||
{
|
||||
@@ -139,28 +135,26 @@ func TestParseRequest(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
method: GetPayloadBodiesByRangeV1,
|
||||
hexArgs: []string{hexutil.EncodeUint64(0), hexutil.EncodeUint64(1)},
|
||||
method: GetPayloadBodiesByRangeV1,
|
||||
uintArgs: []uint64{0, 1},
|
||||
},
|
||||
{
|
||||
method: GetPayloadBodiesByRangeV2,
|
||||
hexArgs: []string{hexutil.EncodeUint64(math.MaxUint64), hexutil.EncodeUint64(1)},
|
||||
method: GetPayloadBodiesByRangeV2,
|
||||
uintArgs: []uint64{math.MaxUint64, 1},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.method, func(t *testing.T) {
|
||||
cli, srv := newMockEngine(t)
|
||||
srv.register(c.method, func(msg *jsonrpcMessage, w http.ResponseWriter, _ *http.Request) {
|
||||
srv.register(c.method, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
require.Equal(t, c.method, msg.Method)
|
||||
nr := uint64(len(c.byteArgs))
|
||||
if len(c.byteArgs) > 0 {
|
||||
require.DeepEqual(t, c.byteArgs, mockParseHexByteList(t, msg.Params))
|
||||
}
|
||||
if len(c.hexArgs) > 0 {
|
||||
if len(c.uintArgs) > 0 {
|
||||
rang := mockParseUintList(t, msg.Params)
|
||||
for i, r := range rang {
|
||||
require.Equal(t, c.hexArgs[i], hexutil.EncodeUint64(r))
|
||||
}
|
||||
require.DeepEqual(t, c.uintArgs, rang)
|
||||
nr = rang[1]
|
||||
}
|
||||
mockWriteResult(t, w, msg, make([]*pb.ExecutionPayloadBody, nr))
|
||||
@@ -171,18 +165,18 @@ func TestParseRequest(t *testing.T) {
|
||||
if len(c.byteArgs) > 0 {
|
||||
args = []interface{}{c.byteArgs}
|
||||
}
|
||||
if len(c.hexArgs) > 0 {
|
||||
args = make([]interface{}, len(c.hexArgs))
|
||||
for i := range c.hexArgs {
|
||||
args[i] = c.hexArgs[i]
|
||||
if len(c.uintArgs) > 0 {
|
||||
args = make([]interface{}, len(c.uintArgs))
|
||||
for i := range c.uintArgs {
|
||||
args[i] = c.uintArgs[i]
|
||||
}
|
||||
}
|
||||
require.NoError(t, cli.CallContext(ctx, &result, c.method, args...))
|
||||
if len(c.byteArgs) > 0 {
|
||||
require.Equal(t, len(c.byteArgs), len(result))
|
||||
}
|
||||
if len(c.hexArgs) > 0 {
|
||||
require.Equal(t, int(hexutil.MustDecodeUint64(c.hexArgs[1])), len(result))
|
||||
if len(c.uintArgs) > 0 {
|
||||
require.Equal(t, int(c.uintArgs[1]), len(result))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -209,7 +203,7 @@ func TestCallCount(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
t.Run(c.method, func(t *testing.T) {
|
||||
cli, srv := newMockEngine(t)
|
||||
srv.register(c.method, func(msg *jsonrpcMessage, w http.ResponseWriter, _ *http.Request) {
|
||||
srv.register(c.method, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
mockWriteResult(t, w, msg, nil)
|
||||
})
|
||||
for i := 0; i < c.count; i++ {
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -161,7 +160,7 @@ func computeRanges(hbns []hashBlockNumber) []byRangeReq {
|
||||
|
||||
func (r *blindedBlockReconstructor) requestBodiesByRange(ctx context.Context, client RPCClient, method string, req byRangeReq) error {
|
||||
result := make([]*pb.ExecutionPayloadBody, 0)
|
||||
if err := client.CallContext(ctx, &result, method, hexutil.EncodeUint64(req.start), hexutil.EncodeUint64(req.count)); err != nil {
|
||||
if err := client.CallContext(ctx, &result, method, req.start, req.count); err != nil {
|
||||
return err
|
||||
}
|
||||
if uint64(len(result)) != req.count {
|
||||
|
||||
@@ -676,18 +676,3 @@ func (f *ForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch) (
|
||||
}
|
||||
return f.TargetRootForEpoch(targetNode.root, epoch)
|
||||
}
|
||||
|
||||
// ParentRoot returns the block root of the parent node if it is in forkchoice.
|
||||
// The exception is for the finalized checkpoint root which we return the zero
|
||||
// hash.
|
||||
func (f *ForkChoice) ParentRoot(root [32]byte) ([32]byte, error) {
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
if !ok || n == nil {
|
||||
return [32]byte{}, ErrNilNode
|
||||
}
|
||||
// Return the zero hash for the tree root
|
||||
if n.parent == nil {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
return n.parent.root, nil
|
||||
}
|
||||
|
||||
@@ -861,29 +861,3 @@ func TestForkChoiceSlot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Slot(3), slot)
|
||||
}
|
||||
|
||||
func TestForkchoiceParentRoot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
root1 := [32]byte{'a'}
|
||||
st, root, err := prepareForkchoiceState(ctx, 3, root1, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
root2 := [32]byte{'b'}
|
||||
st, root, err = prepareForkchoiceState(ctx, 3, root2, root1, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
root, err = f.ParentRoot(root2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root1, root)
|
||||
|
||||
_, err = f.ParentRoot([32]byte{'c'})
|
||||
require.ErrorIs(t, err, ErrNilNode)
|
||||
|
||||
zeroHash := [32]byte{}
|
||||
root, err = f.ParentRoot(zeroHash)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, zeroHash, root)
|
||||
}
|
||||
|
||||
@@ -80,7 +80,6 @@ type FastGetter interface {
|
||||
TargetRootForEpoch([32]byte, primitives.Epoch) ([32]byte, error)
|
||||
UnrealizedJustifiedPayloadBlockHash() [32]byte
|
||||
Weight(root [32]byte) (uint64, error)
|
||||
ParentRoot(root [32]byte) ([32]byte, error)
|
||||
}
|
||||
|
||||
// Setter allows to set forkchoice information
|
||||
|
||||
@@ -169,10 +169,3 @@ func (ro *ROForkChoice) TargetRootForEpoch(root [32]byte, epoch primitives.Epoch
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.TargetRootForEpoch(root, epoch)
|
||||
}
|
||||
|
||||
// ParentRoot delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) ParentRoot(root [32]byte) ([32]byte, error) {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.ParentRoot(root)
|
||||
}
|
||||
|
||||
@@ -37,7 +37,6 @@ const (
|
||||
slotCalled
|
||||
lastRootCalled
|
||||
targetRootForEpochCalled
|
||||
parentRootCalled
|
||||
)
|
||||
|
||||
func _discard(t *testing.T, e error) {
|
||||
@@ -292,8 +291,3 @@ func (ro *mockROForkchoice) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) (
|
||||
ro.calls = append(ro.calls, targetRootForEpochCalled)
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) ParentRoot(_ [32]byte) ([32]byte, error) {
|
||||
ro.calls = append(ro.calls, parentRootCalled)
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
|
||||
@@ -73,21 +73,6 @@ func configureBuilderCircuitBreaker(cliCtx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cliCtx.IsSet(flags.MinBuilderBid.Name) {
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.MinBuilderBid = cliCtx.Uint64(flags.MinBuilderBid.Name)
|
||||
if err := params.SetActive(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if cliCtx.IsSet(flags.MinBuilderDiff.Name) {
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.MinBuilderDiff = cliCtx.Uint64(flags.MinBuilderDiff.Name)
|
||||
if err := params.SetActive(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -132,7 +117,7 @@ func configureEth1Config(cliCtx *cli.Context) error {
|
||||
}
|
||||
|
||||
func configureNetwork(cliCtx *cli.Context) {
|
||||
if cliCtx.IsSet(cmd.BootstrapNode.Name) {
|
||||
if len(cliCtx.StringSlice(cmd.BootstrapNode.Name)) > 0 {
|
||||
c := params.BeaconNetworkConfig()
|
||||
c.BootstrapNodes = cliCtx.StringSlice(cmd.BootstrapNode.Name)
|
||||
params.OverrideBeaconNetworkConfig(c)
|
||||
|
||||
@@ -988,6 +988,7 @@ func (b *BeaconNode) registerRPCService(router *mux.Router) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"broadcaster.go",
|
||||
"config.go",
|
||||
"connection_gater.go",
|
||||
"custody.go",
|
||||
"dial_relay_node.go",
|
||||
"discovery.go",
|
||||
"doc.go",
|
||||
@@ -46,6 +47,7 @@ go_library(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -56,6 +58,7 @@ go_library(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -74,6 +77,8 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_btcsuite_btcd_btcec_v2//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
@@ -115,6 +120,7 @@ go_test(
|
||||
"addr_factory_test.go",
|
||||
"broadcaster_test.go",
|
||||
"connection_gater_test.go",
|
||||
"custody_test.go",
|
||||
"dial_relay_node_test.go",
|
||||
"discovery_test.go",
|
||||
"fork_test.go",
|
||||
@@ -136,9 +142,11 @@ go_test(
|
||||
flaky = True,
|
||||
tags = ["requires-network"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -151,6 +159,7 @@ go_test(
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
@@ -161,13 +170,12 @@ go_test(
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
|
||||
@@ -9,16 +9,18 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// ErrMessageNotMapped occurs on a Broadcast attempt when a message has not been defined in the
|
||||
@@ -96,7 +98,12 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att, forkDigest [4]byte) {
|
||||
func (s *Service) internalBroadcastAttestation(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
att ethpb.Att,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -152,7 +159,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -228,7 +235,12 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
|
||||
func (s *Service) internalBroadcastBlob(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
blobSidecar *ethpb.BlobSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -243,7 +255,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
blobSidecarCommitteeBroadcastAttempts.Inc()
|
||||
blobSidecarBroadcastAttempts.Inc()
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
@@ -252,7 +264,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
blobSidecarCommitteeBroadcasts.Inc()
|
||||
blobSidecarBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
@@ -268,6 +280,99 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
// TODO: Add tests
|
||||
func (s *Service) BroadcastDataColumn(ctx context.Context, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
if dataColumnSidecar == nil {
|
||||
return errors.Errorf("attempted to broadcast nil data column sidecar at subnet %d", columnSubnet)
|
||||
}
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "current fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumn(ctx, columnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumn(
|
||||
ctx context.Context,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Inc()
|
||||
|
||||
// Clear parent context / deadline.
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
|
||||
// Define a one-slot length context timeout.
|
||||
oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
// Build the topic corresponding to this column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
|
||||
|
||||
// Check if we have peers with this subnet.
|
||||
hasPeer := func() bool {
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
defer s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
return s.hasPeerWithSubnet(topic)
|
||||
}()
|
||||
|
||||
// If no peers are found, attempt to find peers with this subnet.
|
||||
if !hasPeer {
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
ok, err := s.FindPeersWithSubnet(ctx, topic, columnSubnet, 1 /*threshold*/)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "find peers for subnet")
|
||||
}
|
||||
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast data column sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
dataColumnSidecarBroadcasts.Inc()
|
||||
}
|
||||
|
||||
// method to broadcast messages to other peers in our gossip mesh.
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
@@ -297,14 +402,18 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
return nil
|
||||
}
|
||||
|
||||
func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -13,11 +13,16 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -25,7 +30,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestService_Broadcast(t *testing.T) {
|
||||
@@ -520,3 +524,70 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
require.NoError(t, p.BroadcastBlob(ctx, subnet, blobSidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
require.NoError(t, kzg.Start())
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
require.NotEqual(t, 0, len(p1.BHost.Network().Peers()), "No peers")
|
||||
|
||||
p := &Service{
|
||||
host: p1.BHost,
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
}
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockElectra())
|
||||
require.NoError(t, err)
|
||||
blobs := make([]kzg.Blob, fieldparams.MaxBlobsPerBlock)
|
||||
sidecars, err := peerdas.DataColumnSidecars(b, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
sidecar := sidecars[0]
|
||||
subnet := uint64(0)
|
||||
topic := DataColumnSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(sidecar)] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
msg, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ðpb.DataColumnSidecar{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(msg.Data, result))
|
||||
require.DeepEqual(t, result, sidecar)
|
||||
}(t)
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
ctx := context.Background()
|
||||
require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastDataColumn(ctx, subnet, nil))
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
require.NoError(t, p.BroadcastDataColumn(ctx, subnet, sidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
118
beacon-chain/p2p/custody.go
Normal file
118
beacon-chain/p2p/custody.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
// GetValidCustodyPeers returns a list of peers that custody a super set of the local node's custody columns.
|
||||
func (s *Service) GetValidCustodyPeers(peers []peer.ID) ([]peer.ID, error) {
|
||||
// Get the total number of columns.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
localCustodySubnetCount := peerdas.CustodySubnetCount()
|
||||
localCustodyColumns, err := peerdas.CustodyColumns(s.NodeID(), localCustodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody columns for local node")
|
||||
}
|
||||
|
||||
localCustotyColumnsCount := uint64(len(localCustodyColumns))
|
||||
|
||||
// Find the valid peers.
|
||||
validPeers := make([]peer.ID, 0, len(peers))
|
||||
|
||||
loop:
|
||||
for _, pid := range peers {
|
||||
// Get the custody subnets count of the remote peer.
|
||||
remoteCustodySubnetCount := s.CustodyCountFromRemotePeer(pid)
|
||||
|
||||
// Get the remote node ID from the peer ID.
|
||||
remoteNodeID, err := ConvertPeerIDToNodeID(pid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "convert peer ID to node ID")
|
||||
}
|
||||
|
||||
// Get the custody columns of the remote peer.
|
||||
remoteCustodyColumns, err := peerdas.CustodyColumns(remoteNodeID, remoteCustodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
remoteCustodyColumnsCount := uint64(len(remoteCustodyColumns))
|
||||
|
||||
// If the remote peer custodies less columns than the local node, skip it.
|
||||
if remoteCustodyColumnsCount < localCustotyColumnsCount {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the remote peers custodies all the possible columns, add it to the list.
|
||||
if remoteCustodyColumnsCount == numberOfColumns {
|
||||
copiedId := pid
|
||||
validPeers = append(validPeers, copiedId)
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter out invalid peers.
|
||||
for c := range localCustodyColumns {
|
||||
if !remoteCustodyColumns[c] {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
|
||||
copiedId := pid
|
||||
|
||||
// Add valid peer to list
|
||||
validPeers = append(validPeers, copiedId)
|
||||
}
|
||||
|
||||
return validPeers, nil
|
||||
}
|
||||
|
||||
// CustodyCountFromRemotePeer retrieves the custody count from a remote peer.
|
||||
func (s *Service) CustodyCountFromRemotePeer(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of subnets.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// First, try to get the custody count from the peer's metadata.
|
||||
metadata, err := s.peers.Metadata(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Debug("Failed to retrieve metadata for peer, defaulting to the ENR value")
|
||||
}
|
||||
|
||||
if metadata != nil {
|
||||
custodyCount := metadata.CustodySubnetCount()
|
||||
if custodyCount > 0 {
|
||||
return custodyCount
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("peerID", pid).Debug("Failed to retrieve custody count from metadata for peer, defaulting to the ENR value")
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
record, err := s.peers.ENR(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
}).Debug("Failed to retrieve ENR for peer, defaulting to the default value")
|
||||
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
// Retrieve the custody subnets count from the ENR.
|
||||
custodyCount, err := peerdas.CustodyCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
}).Debug("Failed to retrieve custody count from ENR for peer, defaulting to the default value")
|
||||
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
196
beacon-chain/p2p/custody_test.go
Normal file
196
beacon-chain/p2p/custody_test.go
Normal file
@@ -0,0 +1,196 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
prysmNetwork "github.com/prysmaticlabs/prysm/v5/network"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func createPeer(t *testing.T, privateKeyOffset int, custodyCount uint64) (*enr.Record, peer.ID, *ecdsa.PrivateKey) {
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(privateKeyOffset + i)
|
||||
}
|
||||
|
||||
unmarshalledPrivateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
privateKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(unmarshalledPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
peerID, err := peer.IDFromPrivateKey(unmarshalledPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := &enr.Record{}
|
||||
record.Set(peerdas.Csc(custodyCount))
|
||||
record.Set(enode.Secp256k1(privateKey.PublicKey))
|
||||
|
||||
return record, peerID, privateKey
|
||||
}
|
||||
|
||||
func TestGetValidCustodyPeers(t *testing.T) {
|
||||
genesisValidatorRoot := make([]byte, 32)
|
||||
|
||||
for i := 0; i < 32; i++ {
|
||||
genesisValidatorRoot[i] = byte(i)
|
||||
}
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: genesisValidatorRoot,
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
}
|
||||
|
||||
ipAddrString, err := prysmNetwork.ExternalIPv4()
|
||||
require.NoError(t, err)
|
||||
ipAddr := net.ParseIP(ipAddrString)
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Peer 1 custodies exactly the same columns than us.
|
||||
// (We use the same keys pair than ours for simplicity)
|
||||
peer1Record, peer1ID, localPrivateKey := createPeer(t, 1, custodyRequirement)
|
||||
|
||||
// Peer 2 custodies all the columns.
|
||||
peer2Record, peer2ID, _ := createPeer(t, 2, dataColumnSidecarSubnetCount)
|
||||
|
||||
// Peer 3 custodies different columns than us (but the same count).
|
||||
// (We use the same public key than peer 2 for simplicity)
|
||||
peer3Record, peer3ID, _ := createPeer(t, 3, custodyRequirement)
|
||||
|
||||
// Peer 4 custodies less columns than us.
|
||||
peer4Record, peer4ID, _ := createPeer(t, 4, custodyRequirement-1)
|
||||
|
||||
listener, err := service.createListener(ipAddr, localPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.dv5Listener = listener
|
||||
|
||||
service.peers.Add(peer1Record, peer1ID, nil, network.DirOutbound)
|
||||
service.peers.Add(peer2Record, peer2ID, nil, network.DirOutbound)
|
||||
service.peers.Add(peer3Record, peer3ID, nil, network.DirOutbound)
|
||||
service.peers.Add(peer4Record, peer4ID, nil, network.DirOutbound)
|
||||
|
||||
actual, err := service.GetValidCustodyPeers([]peer.ID{peer1ID, peer2ID, peer3ID, peer4ID})
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := []peer.ID{peer1ID, peer2ID}
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestCustodyCountFromRemotePeer(t *testing.T) {
|
||||
const (
|
||||
expectedENR uint64 = 7
|
||||
expectedMetadata uint64 = 8
|
||||
pid = "test-id"
|
||||
)
|
||||
|
||||
csc := peerdas.Csc(expectedENR)
|
||||
|
||||
// Define a nil record
|
||||
var nilRecord *enr.Record = nil
|
||||
|
||||
// Define an empty record (record with non `csc` entry)
|
||||
emptyRecord := &enr.Record{}
|
||||
|
||||
// Define a nominal record
|
||||
nominalRecord := &enr.Record{}
|
||||
nominalRecord.Set(csc)
|
||||
|
||||
// Define a metadata with zero custody.
|
||||
zeroMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodySubnetCount: 0,
|
||||
})
|
||||
|
||||
// Define a nominal metadata.
|
||||
nominalMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodySubnetCount: expectedMetadata,
|
||||
})
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
record *enr.Record
|
||||
metadata metadata.Metadata
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "No metadata - No ENR",
|
||||
record: nilRecord,
|
||||
expected: params.BeaconConfig().CustodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "No metadata - Empty ENR",
|
||||
record: emptyRecord,
|
||||
expected: params.BeaconConfig().CustodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "No Metadata - ENR",
|
||||
record: nominalRecord,
|
||||
expected: expectedENR,
|
||||
},
|
||||
{
|
||||
name: "Metadata with 0 value - ENR",
|
||||
record: nominalRecord,
|
||||
metadata: zeroMetadata,
|
||||
expected: expectedENR,
|
||||
},
|
||||
{
|
||||
name: "Metadata - ENR",
|
||||
record: nominalRecord,
|
||||
metadata: nominalMetadata,
|
||||
expected: expectedMetadata,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create peers status.
|
||||
peers := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
|
||||
// Set the metadata.
|
||||
if tc.metadata != nil {
|
||||
peers.SetMetadata(pid, tc.metadata)
|
||||
}
|
||||
|
||||
// Add a new peer with the record.
|
||||
peers.Add(tc.record, pid, nil, network.DirOutbound)
|
||||
|
||||
// Create a new service.
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
metaData: tc.metadata,
|
||||
}
|
||||
|
||||
// Retrieve the custody count from the remote peer.
|
||||
actual := service.CustodyCountFromRemotePeer(pid)
|
||||
|
||||
// Verify the result.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -15,7 +15,9 @@ import (
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -42,67 +44,148 @@ const (
|
||||
udp6
|
||||
)
|
||||
|
||||
const quickProtocolEnrKey = "quic"
|
||||
|
||||
type quicProtocol uint16
|
||||
|
||||
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
|
||||
func (quicProtocol) ENRKey() string { return "quic" }
|
||||
func (quicProtocol) ENRKey() string { return quickProtocolEnrKey }
|
||||
|
||||
// RefreshENR uses an epoch to refresh the enr entry for our node
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
func (s *Service) RefreshENR() {
|
||||
// return early if discv5 isn't running
|
||||
// RefreshPersistentSubnets checks that we are tracking our local persistent subnets for a variety of gossip topics.
|
||||
// This routine checks for our attestation, sync committee and data column subnets and updates them if they have
|
||||
// been rotated.
|
||||
func (s *Service) RefreshPersistentSubnets() {
|
||||
// Return early if discv5 service isn't running.
|
||||
if s.dv5Listener == nil || !s.isInitialized() {
|
||||
return
|
||||
}
|
||||
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
|
||||
if err := initializePersistentSubnets(s.dv5Listener.LocalNode().ID(), currEpoch); err != nil {
|
||||
|
||||
// Get the current epoch.
|
||||
currentSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Get our node ID.
|
||||
nodeID := s.dv5Listener.LocalNode().ID()
|
||||
|
||||
// Get our node record.
|
||||
record := s.dv5Listener.Self().Record()
|
||||
|
||||
// Get the version of our metadata.
|
||||
metadataVersion := s.Metadata().Version()
|
||||
|
||||
// Initialize persistent subnets.
|
||||
if err := initializePersistentSubnets(nodeID, currentEpoch); err != nil {
|
||||
log.WithError(err).Error("Could not initialize persistent subnets")
|
||||
return
|
||||
}
|
||||
|
||||
// Initialize persistent column subnets.
|
||||
if err := initializePersistentColumnSubnets(nodeID); err != nil {
|
||||
log.WithError(err).Error("Could not initialize persistent column subnets")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the current attestation subnet bitfield.
|
||||
bitV := bitfield.NewBitvector64()
|
||||
committees := cache.SubnetIDs.GetAllSubnets()
|
||||
for _, idx := range committees {
|
||||
attestationCommittees := cache.SubnetIDs.GetAllSubnets()
|
||||
for _, idx := range attestationCommittees {
|
||||
bitV.SetBitAt(idx, true)
|
||||
}
|
||||
currentBitV, err := attBitvector(s.dv5Listener.Self().Record())
|
||||
|
||||
// Get the attestation subnet bitfield we store in our record.
|
||||
inRecordBitV, err := attBitvector(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve att bitfield")
|
||||
return
|
||||
}
|
||||
|
||||
// Compare current epoch with our fork epochs
|
||||
// Get the attestation subnet bitfield in our metadata.
|
||||
inMetadataBitV := s.Metadata().AttnetsBitfield()
|
||||
|
||||
// Is our attestation bitvector record up to date?
|
||||
isBitVUpToDate := bytes.Equal(bitV, inRecordBitV) && bytes.Equal(bitV, inMetadataBitV)
|
||||
|
||||
// Compare current epoch with Altair fork epoch
|
||||
altairForkEpoch := params.BeaconConfig().AltairForkEpoch
|
||||
switch {
|
||||
case currEpoch < altairForkEpoch:
|
||||
|
||||
if currentEpoch < altairForkEpoch {
|
||||
// Phase 0 behaviour.
|
||||
if bytes.Equal(bitV, currentBitV) {
|
||||
// return early if bitfield hasn't changed
|
||||
if isBitVUpToDate {
|
||||
// Return early if bitfield hasn't changed.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data changed. Update the record and the metadata.
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
default:
|
||||
// Retrieve sync subnets from application level
|
||||
// cache.
|
||||
bitS := bitfield.Bitvector4{byte(0x00)}
|
||||
committees = cache.SyncSubnetIDs.GetAllSubnets(currEpoch)
|
||||
for _, idx := range committees {
|
||||
bitS.SetBitAt(idx, true)
|
||||
}
|
||||
currentBitS, err := syncBitvector(s.dv5Listener.Self().Record())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve sync bitfield")
|
||||
return
|
||||
}
|
||||
if bytes.Equal(bitV, currentBitV) && bytes.Equal(bitS, currentBitS) &&
|
||||
s.Metadata().Version() == version.Altair {
|
||||
// return early if bitfields haven't changed
|
||||
return
|
||||
}
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
|
||||
// Ping all peers.
|
||||
s.pingPeers()
|
||||
|
||||
return
|
||||
}
|
||||
// ping all peers to inform them of new metadata
|
||||
|
||||
// Get the current sync subnet bitfield.
|
||||
bitS := bitfield.Bitvector4{byte(0x00)}
|
||||
syncCommittees := cache.SyncSubnetIDs.GetAllSubnets(currentEpoch)
|
||||
for _, idx := range syncCommittees {
|
||||
bitS.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
// Get the sync subnet bitfield we store in our record.
|
||||
inRecordBitS, err := syncBitvector(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve sync bitfield")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the sync subnet bitfield in our metadata.
|
||||
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
|
||||
|
||||
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
|
||||
|
||||
// Compare current epoch with EIP-7594 fork epoch.
|
||||
eip7594ForkEpoch := params.BeaconConfig().Eip7594ForkEpoch
|
||||
|
||||
if currentEpoch < eip7594ForkEpoch {
|
||||
// Altair behaviour.
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
|
||||
// Nothing to do, return early.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data have changed, update our record and metadata.
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
s.pingPeers()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Get the current custody subnet count.
|
||||
custodySubnetCount := peerdas.CustodySubnetCount()
|
||||
|
||||
// Get the custody subnet count we store in our record.
|
||||
inRecordCustodySubnetCount, err := peerdas.CustodyCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody subnet count")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the custody subnet count in our metadata.
|
||||
inMetadataCustodySubnetCount := s.Metadata().CustodySubnetCount()
|
||||
|
||||
isCustodySubnetCountUpToDate := (custodySubnetCount == inRecordCustodySubnetCount && custodySubnetCount == inMetadataCustodySubnetCount)
|
||||
|
||||
if isBitVUpToDate && isBitSUpToDate && isCustodySubnetCountUpToDate {
|
||||
// Nothing to do, return early.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data changed. Update the record and the metadata.
|
||||
s.updateSubnetRecordWithMetadataV3(bitV, bitS, custodySubnetCount)
|
||||
|
||||
// Ping all peers.
|
||||
s.pingPeers()
|
||||
}
|
||||
|
||||
@@ -258,6 +341,10 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if params.PeerDASEnabled() {
|
||||
localNode.Set(peerdas.Csc(peerdas.CustodySubnetCount()))
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
@@ -346,6 +433,8 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
|
||||
// Ignore nodes that are already active.
|
||||
if s.peers.IsActive(peerData.ID) {
|
||||
// Constantly update enr for known peers
|
||||
s.peers.UpdateENR(node.Record(), peerData.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -16,12 +16,15 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
@@ -30,13 +33,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
prysmNetwork "github.com/prysmaticlabs/prysm/v5/network"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -131,6 +133,10 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCreateLocalNode(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.Eip7594ForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
testCases := []struct {
|
||||
name string
|
||||
cfg *Config
|
||||
@@ -227,6 +233,11 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
syncSubnets := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, syncSubnets)))
|
||||
require.DeepSSZEqual(t, []byte{0}, *syncSubnets)
|
||||
|
||||
// Check custody_subnet_count config.
|
||||
custodySubnetCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(peerdas.CustodySubnetCountEnrKey, custodySubnetCount)))
|
||||
require.Equal(t, uint64(1), *custodySubnetCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -435,177 +446,314 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState)
|
||||
return id
|
||||
}
|
||||
|
||||
func TestRefreshENR_ForkBoundaries(t *testing.T) {
|
||||
func createAndConnectPeer(t *testing.T, p2pService *testp2p.TestP2P, offset int) {
|
||||
// Create the private key.
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(offset + i)
|
||||
}
|
||||
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the peer.
|
||||
peer := testp2p.NewTestP2P(t, swarmt.OptPeerPrivateKey(privateKey))
|
||||
|
||||
// Add the peer and connect it.
|
||||
p2pService.Peers().Add(&enr.Record{}, peer.PeerID(), nil, network.DirOutbound)
|
||||
p2pService.Peers().SetConnectionState(peer.PeerID(), peers.PeerConnected)
|
||||
p2pService.Connect(peer)
|
||||
}
|
||||
|
||||
// Define the ping count.
|
||||
var actualPingCount int
|
||||
|
||||
type check struct {
|
||||
pingCount int
|
||||
metadataSequenceNumber uint64
|
||||
attestationSubnets []uint64
|
||||
syncSubnets []uint64
|
||||
custodySubnetCount *uint64
|
||||
}
|
||||
|
||||
func checkPingCountCacheMetadataRecord(
|
||||
t *testing.T,
|
||||
service *Service,
|
||||
expected check,
|
||||
) {
|
||||
// Check the ping count.
|
||||
require.Equal(t, expected.pingCount, actualPingCount)
|
||||
|
||||
// Check the attestation subnets in the cache.
|
||||
actualAttestationSubnets := cache.SubnetIDs.GetAllSubnets()
|
||||
require.DeepSSZEqual(t, expected.attestationSubnets, actualAttestationSubnets)
|
||||
|
||||
// Check the metadata sequence number.
|
||||
actualMetadataSequenceNumber := service.metaData.SequenceNumber()
|
||||
require.Equal(t, expected.metadataSequenceNumber, actualMetadataSequenceNumber)
|
||||
|
||||
// Compute expected attestation subnets bits.
|
||||
expectedBitV := bitfield.NewBitvector64()
|
||||
exists := false
|
||||
|
||||
for _, idx := range expected.attestationSubnets {
|
||||
exists = true
|
||||
expectedBitV.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
// Check attnets in ENR.
|
||||
var actualBitVENR bitfield.Bitvector64
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(attSubnetEnrKey, &actualBitVENR))
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedBitV, actualBitVENR)
|
||||
|
||||
// Check attnets in metadata.
|
||||
if !exists {
|
||||
expectedBitV = nil
|
||||
}
|
||||
|
||||
actualBitVMetadata := service.metaData.AttnetsBitfield()
|
||||
require.DeepSSZEqual(t, expectedBitV, actualBitVMetadata)
|
||||
|
||||
if expected.syncSubnets != nil {
|
||||
// Compute expected sync subnets bits.
|
||||
expectedBitS := bitfield.NewBitvector4()
|
||||
exists = false
|
||||
|
||||
for _, idx := range expected.syncSubnets {
|
||||
exists = true
|
||||
expectedBitS.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
// Check syncnets in ENR.
|
||||
var actualBitSENR bitfield.Bitvector4
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, &actualBitSENR))
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedBitS, actualBitSENR)
|
||||
|
||||
// Check syncnets in metadata.
|
||||
if !exists {
|
||||
expectedBitS = nil
|
||||
}
|
||||
|
||||
actualBitSMetadata := service.metaData.SyncnetsBitfield()
|
||||
require.DeepSSZEqual(t, expectedBitS, actualBitSMetadata)
|
||||
}
|
||||
|
||||
if expected.custodySubnetCount != nil {
|
||||
// Check custody subnet count in ENR.
|
||||
var actualCustodySubnetCount uint64
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(peerdas.CustodySubnetCountEnrKey, &actualCustodySubnetCount))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *expected.custodySubnetCount, actualCustodySubnetCount)
|
||||
|
||||
// Check custody subnet count in metadata.
|
||||
actualCustodySubnetCountMetadata := service.metaData.CustodySubnetCount()
|
||||
require.Equal(t, *expected.custodySubnetCount, actualCustodySubnetCountMetadata)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
// Clean up caches after usage.
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
svcBuilder func(t *testing.T) *Service
|
||||
postValidation func(t *testing.T, s *Service)
|
||||
const (
|
||||
altairForkEpoch = 5
|
||||
eip7594ForkEpoch = 10
|
||||
)
|
||||
|
||||
custodySubnetCount := uint64(1)
|
||||
|
||||
// Set up epochs.
|
||||
defaultCfg := params.BeaconConfig()
|
||||
cfg := defaultCfg.Copy()
|
||||
cfg.AltairForkEpoch = altairForkEpoch
|
||||
cfg.Eip7594ForkEpoch = eip7594ForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the number of seconds per epoch.
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
secondsPerEpoch := secondsPerSlot * uint64(slotsPerEpoch)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
epochSinceGenesis uint64
|
||||
checks []check
|
||||
}{
|
||||
{
|
||||
name: "metadata no change",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
assert.NoError(t, err)
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
|
||||
subs, err := computeSubscribedSubnets(s.dv5Listener.LocalNode().ID(), currEpoch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for _, idx := range subs {
|
||||
bitV.SetBitAt(idx, true)
|
||||
}
|
||||
assert.DeepEqual(t, bitV, s.metaData.AttnetsBitfield())
|
||||
name: "Phase0",
|
||||
epochSinceGenesis: 0,
|
||||
checks: []check{
|
||||
{
|
||||
pingCount: 0,
|
||||
metadataSequenceNumber: 0,
|
||||
attestationSubnets: []uint64{},
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
assert.NoError(t, err)
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
name: "Altair",
|
||||
epochSinceGenesis: altairForkEpoch,
|
||||
checks: []check{
|
||||
{
|
||||
pingCount: 0,
|
||||
metadataSequenceNumber: 0,
|
||||
attestationSubnets: []uint64{},
|
||||
syncSubnets: nil,
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: nil,
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated at fork epoch",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated at fork epoch with no bitfield",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
|
||||
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
|
||||
subs, err := computeSubscribedSubnets(s.dv5Listener.LocalNode().ID(), currEpoch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for _, idx := range subs {
|
||||
bitV.SetBitAt(idx, true)
|
||||
}
|
||||
assert.DeepEqual(t, bitV, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated past fork epoch with bitfields",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-6 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
listener, err := s.createListener(ipAddr, pkey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte{'A'}, 0, []uint64{0, 1}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x03}, s.metaData.MetadataObjV1().Syncnets)
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
name: "PeerDAS",
|
||||
epochSinceGenesis: eip7594ForkEpoch,
|
||||
checks: []check{
|
||||
{
|
||||
pingCount: 0,
|
||||
metadataSequenceNumber: 0,
|
||||
attestationSubnets: []uint64{},
|
||||
syncSubnets: nil,
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: nil,
|
||||
custodySubnetCount: &custodySubnetCount,
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
custodySubnetCount: &custodySubnetCount,
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
custodySubnetCount: &custodySubnetCount,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcBuilder(t)
|
||||
s.RefreshENR()
|
||||
tt.postValidation(t, s)
|
||||
s.dv5Listener.Close()
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const peerOffset = 1
|
||||
|
||||
// Initialize the ping count.
|
||||
actualPingCount = 0
|
||||
|
||||
// Create the private key.
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(i)
|
||||
}
|
||||
|
||||
unmarshalledPrivateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
privateKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(unmarshalledPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a p2p service.
|
||||
p2p := testp2p.NewTestP2P(t)
|
||||
|
||||
// Create and connect a peer.
|
||||
createAndConnectPeer(t, p2p, peerOffset)
|
||||
|
||||
// Create a service.
|
||||
service := &Service{
|
||||
pingMethod: func(_ context.Context, _ peer.ID) error {
|
||||
actualPingCount++
|
||||
return nil
|
||||
},
|
||||
cfg: &Config{UDPPort: 2000},
|
||||
peers: p2p.Peers(),
|
||||
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
}
|
||||
|
||||
// Create a listener.
|
||||
listener, err := service.createListener(nil, privateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set the listener and the metadata.
|
||||
service.dv5Listener = listener
|
||||
service.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[0])
|
||||
|
||||
// Refresh the persistent subnets.
|
||||
service.RefreshPersistentSubnets()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[1])
|
||||
|
||||
// Add a sync committee subnet.
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte{'a'}, altairForkEpoch, []uint64{1, 2}, 1*time.Hour)
|
||||
|
||||
// Refresh the persistent subnets.
|
||||
service.RefreshPersistentSubnets()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[2])
|
||||
|
||||
// Refresh the persistent subnets.
|
||||
service.RefreshPersistentSubnets()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[3])
|
||||
|
||||
// Clean the test.
|
||||
service.dv5Listener.Close()
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
})
|
||||
}
|
||||
|
||||
// Reset the config.
|
||||
params.OverrideBeaconConfig(defaultCfg)
|
||||
}
|
||||
|
||||
@@ -121,7 +121,7 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultAttesterSlashingTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
|
||||
return defaultBlsToExecutionChangeTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage), strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
// TODO(Deneb): Using the default block scoring. But this should be updated.
|
||||
return defaultBlockTopicParams(), nil
|
||||
default:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user