mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
62 Commits
lost-data-
...
disablingD
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c979fb57cc | ||
|
|
f4f596aa05 | ||
|
|
bc3ff69448 | ||
|
|
8055b5e639 | ||
|
|
3fb1fa47d1 | ||
|
|
158f4aec9d | ||
|
|
addb69b804 | ||
|
|
cf029aaadf | ||
|
|
ca92a955b8 | ||
|
|
7945cb8477 | ||
|
|
bb803a2cf4 | ||
|
|
42c6bbc4ec | ||
|
|
1f91169554 | ||
|
|
bc1e0d1dd5 | ||
|
|
ec7bbc856b | ||
|
|
00daa4ad23 | ||
|
|
e05217bcef | ||
|
|
036c10a12f | ||
|
|
4329c511ab | ||
|
|
e6cb7d032f | ||
|
|
625cb88d78 | ||
|
|
bc428fe3d7 | ||
|
|
2bbab6d945 | ||
|
|
ecd37bf584 | ||
|
|
6be17b151b | ||
|
|
769f4bd69f | ||
|
|
a713d69f6e | ||
|
|
e92b0e9cc2 | ||
|
|
81bad0ec82 | ||
|
|
889396c44c | ||
|
|
c2d2d8d727 | ||
|
|
89871ec39d | ||
|
|
5d85e47968 | ||
|
|
ded08c87e5 | ||
|
|
69a2a7e71d | ||
|
|
6653f835b1 | ||
|
|
364c95362b | ||
|
|
85f9820e31 | ||
|
|
ab0cd8e0b6 | ||
|
|
2e82b31d2e | ||
|
|
a42d48265d | ||
|
|
da9a60427a | ||
|
|
b2b4efe84d | ||
|
|
5083b23483 | ||
|
|
74e2fb43e3 | ||
|
|
ab5da74e73 | ||
|
|
aaf69101e1 | ||
|
|
fa1b02926b | ||
|
|
dc88dcea92 | ||
|
|
5784e94dc0 | ||
|
|
3d2cf28624 | ||
|
|
e2e68f57e0 | ||
|
|
581f8f505e | ||
|
|
61208142ff | ||
|
|
a910c370e2 | ||
|
|
b4536d4494 | ||
|
|
e176dfa613 | ||
|
|
b049018c0f | ||
|
|
fe8c86e0db | ||
|
|
1eb0e50746 | ||
|
|
d8bcce7689 | ||
|
|
ff15d86ff3 |
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -10,7 +10,6 @@
|
||||
in review.
|
||||
4. Note that PRs updating dependencies and new Go versions are not accepted.
|
||||
Please file an issue instead.
|
||||
5. A changelog entry is required for user facing issues.
|
||||
-->
|
||||
|
||||
**What type of PR is this?**
|
||||
@@ -29,9 +28,3 @@
|
||||
Fixes #
|
||||
|
||||
**Other notes for review**
|
||||
|
||||
**Acknowledgements**
|
||||
|
||||
- [ ] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
|
||||
- [ ] I have made an appropriate entry to [CHANGELOG.md](https://github.com/prysmaticlabs/prysm/blob/develop/CHANGELOG.md).
|
||||
- [ ] I have added a description to this PR with sufficient context for reviewers to understand this PR.
|
||||
|
||||
33
.github/workflows/changelog.yml
vendored
33
.github/workflows/changelog.yml
vendored
@@ -1,33 +0,0 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
|
||||
jobs:
|
||||
changed_files:
|
||||
runs-on: ubuntu-latest
|
||||
name: Check CHANGELOG.md
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: changelog modified
|
||||
id: changelog-modified
|
||||
uses: tj-actions/changed-files@v45
|
||||
with:
|
||||
files: CHANGELOG.md
|
||||
|
||||
- name: List all changed files
|
||||
env:
|
||||
ALL_CHANGED_FILES: ${{ steps.changelog-modified.outputs.all_changed_files }}
|
||||
run: |
|
||||
if [[ ${ALL_CHANGED_FILES[*]} =~ (^|[[:space:]])"CHANGELOG.md"($|[[:space:]]) ]];
|
||||
then
|
||||
echo "CHANGELOG.md was modified.";
|
||||
exit 0;
|
||||
else
|
||||
echo "CHANGELOG.md was not modified.";
|
||||
echo "Please see CHANGELOG.md and follow the instructions to add your changes to that file."
|
||||
echo "In some rare scenarios, a changelog entry is not required and this CI check can be ignored."
|
||||
exit 1;
|
||||
fi
|
||||
22
.github/workflows/go.yml
vendored
22
.github/workflows/go.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Go mod tidy checker
|
||||
id: gomodtidy
|
||||
@@ -27,11 +27,11 @@ jobs:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.22.6'
|
||||
go-version: '1.22.4'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
@@ -43,16 +43,16 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.22.6'
|
||||
go-version: '1.22.4'
|
||||
id: go
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.56.1
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
@@ -62,13 +62,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.22.6'
|
||||
go-version: '1.22.4'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get dependencies
|
||||
run: |
|
||||
|
||||
@@ -6,7 +6,7 @@ run:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
timeout: 10m
|
||||
go: '1.22.6'
|
||||
go: '1.22.4'
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
|
||||
2582
CHANGELOG.md
2582
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@@ -6,9 +6,6 @@ Excited by our work and want to get involved in building out our sharding releas
|
||||
|
||||
You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PR’s after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/CTYGPUJ) drop us a line there if you want to get more involved or have any questions on our implementation!
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Please, **do not send pull requests for trivial changes**, such as typos, these will be rejected. These types of pull requests incur a cost to reviewers and do not provide much value to the project. If you are unsure, please open an issue first to discuss the change.
|
||||
|
||||
## Contribution Steps
|
||||
|
||||
**1. Set up Prysm following the instructions in README.md.**
|
||||
@@ -123,19 +120,15 @@ $ git push myrepo feature-in-progress-branch
|
||||
|
||||
Navigate to your fork of the repo on GitHub. On the upper left where the current branch is listed, change the branch to your feature-in-progress-branch. Open the files that you have worked on and check to make sure they include your changes.
|
||||
|
||||
**16. Add an entry to CHANGELOG.md.**
|
||||
**16. Create a pull request.**
|
||||
|
||||
If your change is user facing, you must include a CHANGELOG.md entry. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
|
||||
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base master”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls.
|
||||
|
||||
**17. Create a pull request.**
|
||||
|
||||
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base master”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls. Ensure that you have added an entry to CHANGELOG.md if your PR is a user-facing change. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
|
||||
|
||||
**18. Respond to comments by Core Contributors.**
|
||||
**17. Respond to comments by Core Contributors.**
|
||||
|
||||
Core Contributors may ask questions and request that you make edits. If you set notifications at the top of the page to “not watching,” you will still be notified by email whenever someone comments on the page of a pull request you have created. If you are asked to modify your pull request, repeat steps 8 through 15, then leave a comment to notify the Core Contributors that the pull request is ready for further review.
|
||||
|
||||
**19. If the number of commits becomes excessive, you may be asked to squash your commits.**
|
||||
**18. If the number of commits becomes excessive, you may be asked to squash your commits.**
|
||||
|
||||
You can do this with an interactive rebase. Start by running the following command to determine the commit that is the base of your branch...
|
||||
|
||||
@@ -143,7 +136,7 @@ Core Contributors may ask questions and request that you make edits. If you set
|
||||
$ git merge-base feature-in-progress-branch prysm/master
|
||||
```
|
||||
|
||||
**20. The previous command will return a commit-hash that you should use in the following command.**
|
||||
**19. The previous command will return a commit-hash that you should use in the following command.**
|
||||
|
||||
```
|
||||
$ git rebase -i commit-hash
|
||||
@@ -167,30 +160,13 @@ squash hash add a feature
|
||||
|
||||
Save and close the file, then a commit command will appear in the terminal that squashes the smaller commits into one. Check to be sure the commit message accurately reflects your changes and then hit enter to execute it.
|
||||
|
||||
**21. Update your pull request with the following command.**
|
||||
**20. Update your pull request with the following command.**
|
||||
|
||||
```
|
||||
$ git push myrepo feature-in-progress-branch -f
|
||||
```
|
||||
|
||||
**22. Finally, again leave a comment to the Core Contributors on the pull request to let them know that the pull request has been updated.**
|
||||
|
||||
## Maintaining CHANGELOG.md
|
||||
|
||||
This project follows the changelog guidelines from [keepachangelog.com](https://keepachangelog.com/en/1.1.0/).
|
||||
|
||||
All PRs with user facing changes should have an entry in the CHANGELOG.md file and the change should be categorized in the appropriate category within the "Unreleased" section. The categories are:
|
||||
|
||||
- `Added` for new features.
|
||||
- `Changed` for changes in existing functionality.
|
||||
- `Deprecated` for soon-to-be removed features.
|
||||
- `Removed` for now removed features.
|
||||
- `Fixed` for any bug fixes.
|
||||
- `Security` in case of vulnerabilities. Please see the [Security Policy](SECURITY.md) for responsible disclosure before adding a change with this category.
|
||||
|
||||
### Releasing
|
||||
|
||||
When a new release is made, the "Unreleased" section should be moved to a new section with the release version and the current date. Then a new "Unreleased" section is made at the top of the file with the categories listed above.
|
||||
**21. Finally, again leave a comment to the Core Contributors on the pull request to let them know that the pull request has been updated.**
|
||||
|
||||
## Contributor Responsibilities
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
@@ -29,6 +28,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -19,11 +19,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
package structs
|
||||
|
||||
type LightClientHeader struct {
|
||||
Beacon *BeaconBlockHeader `json:"beacon"`
|
||||
}
|
||||
|
||||
type LightClientBootstrapResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientBootstrap `json:"data"`
|
||||
}
|
||||
|
||||
type LightClientBootstrap struct {
|
||||
Header *LightClientHeader `json:"header"`
|
||||
Header *BeaconBlockHeader `json:"header"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
CurrentSyncCommitteeBranch []string `json:"current_sync_committee_branch"`
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
"init_sync_process_block.go",
|
||||
"lightclient.go",
|
||||
"log.go",
|
||||
"merge_ascii_art.go",
|
||||
"metrics.go",
|
||||
@@ -48,7 +49,6 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
@@ -83,10 +83,10 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
@@ -119,6 +119,7 @@ go_test(
|
||||
"head_test.go",
|
||||
"init_sync_process_block_test.go",
|
||||
"init_test.go",
|
||||
"lightclient_test.go",
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_test.go",
|
||||
@@ -176,6 +177,7 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -16,9 +16,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ChainInfoFetcher defines a common interface for methods in blockchain service which
|
||||
|
||||
@@ -21,11 +21,11 @@ import (
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const blobCommitmentVersionKZG uint8 = 0x01
|
||||
|
||||
@@ -18,11 +18,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
|
||||
|
||||
@@ -53,7 +53,7 @@ func Start() error {
|
||||
}
|
||||
if !kzgLoaded {
|
||||
// TODO: Provide a configuration option for this.
|
||||
var precompute uint = 8
|
||||
var precompute uint = 0
|
||||
|
||||
// Free the current trusted setup before running this method. CKZG
|
||||
// panics if the same setup is run multiple times.
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
package light_client
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
)
|
||||
|
||||
const (
|
||||
160
beacon-chain/blockchain/lightclient_test.go
Normal file
160
beacon-chain/blockchain/lightclient_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
type testlc struct {
|
||||
t *testing.T
|
||||
ctx context.Context
|
||||
state state.BeaconState
|
||||
block interfaces.ReadOnlySignedBeaconBlock
|
||||
attestedState state.BeaconState
|
||||
attestedHeader *ethpb.BeaconBlockHeader
|
||||
}
|
||||
|
||||
func newTestLc(t *testing.T) *testlc {
|
||||
return &testlc{t: t}
|
||||
}
|
||||
|
||||
func (l *testlc) setupTest() *testlc {
|
||||
ctx := context.Background()
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().AltairForkEpoch * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
|
||||
|
||||
attestedState, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = attestedState.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parent := util.NewBeaconBlockCapella()
|
||||
parent.Block.Slot = slot
|
||||
|
||||
signedParent, err := blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentHeader, err := signedParent.Header()
|
||||
require.NoError(l.t, err)
|
||||
attestedHeader := parentHeader.Header
|
||||
|
||||
err = attestedState.SetLatestBlockHeader(attestedHeader)
|
||||
require.NoError(l.t, err)
|
||||
attestedStateRoot, err := attestedState.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
parent.Block.StateRoot = attestedStateRoot[:]
|
||||
signedParent, err = blocks.NewSignedBeaconBlock(parent)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
state, err := util.NewBeaconStateCapella()
|
||||
require.NoError(l.t, err)
|
||||
err = state.SetSlot(slot)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
parentRoot, err := signedParent.Block().HashTreeRoot()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
block := util.NewBeaconBlockCapella()
|
||||
block.Block.Slot = slot
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
|
||||
for i := uint64(0); i < params.BeaconConfig().MinSyncCommitteeParticipants; i++ {
|
||||
block.Block.Body.SyncAggregate.SyncCommitteeBits.SetBitAt(i, true)
|
||||
}
|
||||
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
h, err := signedBlock.Header()
|
||||
require.NoError(l.t, err)
|
||||
|
||||
err = state.SetLatestBlockHeader(h.Header)
|
||||
require.NoError(l.t, err)
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
// get a new signed block so the root is updated with the new state root
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
signedBlock, err = blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(l.t, err)
|
||||
|
||||
l.state = state
|
||||
l.attestedState = attestedState
|
||||
l.attestedHeader = attestedHeader
|
||||
l.block = signedBlock
|
||||
l.ctx = ctx
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *testlc) checkAttestedHeader(update *ethpbv2.LightClientUpdate) {
|
||||
require.Equal(l.t, l.attestedHeader.Slot, update.AttestedHeader.Slot, "Attested header slot is not equal")
|
||||
require.Equal(l.t, l.attestedHeader.ProposerIndex, update.AttestedHeader.ProposerIndex, "Attested header proposer index is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.ParentRoot, update.AttestedHeader.ParentRoot, "Attested header parent root is not equal")
|
||||
require.DeepSSZEqual(l.t, l.attestedHeader.BodyRoot, update.AttestedHeader.BodyRoot, "Attested header body root is not equal")
|
||||
|
||||
attestedStateRoot, err := l.attestedState.HashTreeRoot(l.ctx)
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, attestedStateRoot[:], update.AttestedHeader.StateRoot, "Attested header state root is not equal")
|
||||
}
|
||||
|
||||
func (l *testlc) checkSyncAggregate(update *ethpbv2.LightClientUpdate) {
|
||||
syncAggregate, err := l.block.Block().Body().SyncAggregate()
|
||||
require.NoError(l.t, err)
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeBits, update.SyncAggregate.SyncCommitteeBits, "SyncAggregate bits is not equal")
|
||||
require.DeepSSZEqual(l.t, syncAggregate.SyncCommitteeSignature, update.SyncAggregate.SyncCommitteeSignature, "SyncAggregate signature is not equal")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientOptimisticUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
require.Equal(t, (*v1.BeaconBlockHeader)(nil), update.FinalizedHeader, "Finalized header is not nil")
|
||||
require.DeepSSZEqual(t, ([][]byte)(nil), update.FinalityBranch, "Finality branch is not nil")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
l := newTestLc(t).setupTest()
|
||||
|
||||
update, err := NewLightClientFinalityUpdateFromBeaconState(l.ctx, l.state, l.block, l.attestedState, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.checkSyncAggregate(update)
|
||||
l.checkAttestedHeader(update)
|
||||
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
require.NotNil(t, update.FinalizedHeader, "Finalized header is nil")
|
||||
require.Equal(t, primitives.Slot(0), update.FinalizedHeader.Slot, "Finalized header slot is not zero")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), update.FinalizedHeader.ProposerIndex, "Finalized header proposer index is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.ParentRoot, "Finalized header parent root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.StateRoot, "Finalized header state root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.BodyRoot, "Finalized header body root is not zero")
|
||||
require.Equal(t, FinalityBranchNumOfLeaves, len(update.FinalityBranch), "Invalid finality branch leaves")
|
||||
for _, leaf := range update.FinalityBranch {
|
||||
require.DeepSSZEqual(t, zeroHash, leaf, "Leaf is not zero")
|
||||
}
|
||||
}
|
||||
@@ -3,10 +3,12 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
@@ -27,12 +29,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// A custom slot deadline for processing state slots in our cache.
|
||||
@@ -546,9 +546,8 @@ func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if coreTime.PeerDASIsActive(signed.Block().Slot()) {
|
||||
return s.areDataColumnsAvailable(ctx, root, signed)
|
||||
return s.isDataColumnsAvailable(ctx, root, signed)
|
||||
}
|
||||
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -625,17 +624,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
func (s *Service) isDataColumnsAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -664,18 +653,14 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, si
|
||||
return nil
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#subnet-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
subnetSamplingSize := peerdas.SubnetSamplingSize()
|
||||
|
||||
colMap, err := peerdas.CustodyColumns(nodeID, subnetSamplingSize)
|
||||
colMap, err := peerdas.CustodyColumns(s.cfg.P2P.NodeID(), peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// colMap represents the data columnns a node is expected to custody.
|
||||
if len(colMap) == 0 {
|
||||
// Expected is the number of custody data columnns a node is expected to have.
|
||||
expected := len(colMap)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -699,14 +684,14 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, si
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
missing, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missingMap) == 0 {
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -715,36 +700,17 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, si
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
expected interface{} = "all"
|
||||
missing interface{} = "all"
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
colMapCount := uint64(len(colMap))
|
||||
|
||||
if colMapCount < numberOfColumns {
|
||||
expected = uint64MapToSortedSlice(colMap)
|
||||
}
|
||||
|
||||
if missingMapCount < numberOfColumns {
|
||||
missing = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
}).Error("Some data columns are still unavailable at slot end")
|
||||
"columnsWaiting": len(missing),
|
||||
}).Error("Still waiting for data columns DA check at slot end.")
|
||||
})
|
||||
|
||||
defer nst.Stop()
|
||||
}
|
||||
|
||||
@@ -757,7 +723,7 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, si
|
||||
}
|
||||
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[rootIndex.Index]; ok {
|
||||
if _, ok := missing[rootIndex.Index]; ok {
|
||||
retrievedDataColumnsCount++
|
||||
}
|
||||
|
||||
@@ -768,22 +734,19 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, si
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, rootIndex.Index)
|
||||
delete(missing, rootIndex.Index)
|
||||
|
||||
// Exit if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missingMap))
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
missingIndexes := make([]uint64, 0, len(missing))
|
||||
for val := range missing {
|
||||
copiedVal := val
|
||||
missingIndexes = append(missingIndexes, copiedVal)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndexes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
@@ -22,11 +20,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
@@ -178,7 +176,7 @@ func (s *Service) sendLightClientFinalityUpdate(ctx context.Context, signed inte
|
||||
}
|
||||
}
|
||||
|
||||
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(
|
||||
update, err := NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState,
|
||||
signed,
|
||||
@@ -193,7 +191,7 @@ func (s *Service) sendLightClientFinalityUpdate(ctx context.Context, signed inte
|
||||
// Return the result
|
||||
result := ðpbv2.LightClientFinalityUpdateWithVersion{
|
||||
Version: ethpbv2.Version(signed.Version()),
|
||||
Data: lightclient.CreateLightClientFinalityUpdate(update),
|
||||
Data: CreateLightClientFinalityUpdate(update),
|
||||
}
|
||||
|
||||
// Send event
|
||||
@@ -213,7 +211,7 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
|
||||
return 0, errors.Wrap(err, "could not get attested state")
|
||||
}
|
||||
|
||||
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(
|
||||
update, err := NewLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx,
|
||||
postState,
|
||||
signed,
|
||||
@@ -227,7 +225,7 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
|
||||
// Return the result
|
||||
result := ðpbv2.LightClientOptimisticUpdateWithVersion{
|
||||
Version: ethpbv2.Version(signed.Version()),
|
||||
Data: lightclient.CreateLightClientOptimisticUpdate(update),
|
||||
Data: CreateLightClientOptimisticUpdate(update),
|
||||
}
|
||||
|
||||
return s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
|
||||
@@ -12,11 +12,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// reorgLateBlockCountAttestations is the time until the end of the slot in which we count
|
||||
|
||||
@@ -21,12 +21,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
|
||||
@@ -39,10 +39,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
|
||||
@@ -34,13 +34,13 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -14,9 +14,9 @@ import (
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessAttestationsNoVerifySignature applies processing operations to a block's inner attestation
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// AttDelta contains rewards and penalties for a single attestation.
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
e "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.
|
||||
|
||||
@@ -40,7 +40,6 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -50,6 +49,7 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -14,10 +14,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessAttestationsNoVerifySignature applies processing operations to a block's inner attestation
|
||||
|
||||
@@ -24,7 +24,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -11,10 +11,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessAttestations process the attestations in state and update individual validator's pre computes,
|
||||
|
||||
@@ -33,7 +33,6 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
@@ -43,6 +42,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -16,11 +16,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["lightclient.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["lightclient_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,54 +0,0 @@
|
||||
package light_client_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
lightClient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
)
|
||||
|
||||
func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTest()
|
||||
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.Block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.CheckSyncAggregate(update)
|
||||
l.CheckAttestedHeader(update)
|
||||
|
||||
require.Equal(t, (*v1.BeaconBlockHeader)(nil), update.FinalizedHeader, "Finalized header is not nil")
|
||||
require.DeepSSZEqual(t, ([][]byte)(nil), update.FinalityBranch, "Finality branch is not nil")
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTest()
|
||||
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.Block.Block().Slot(), update.SignatureSlot, "Signature slot is not equal")
|
||||
|
||||
l.CheckSyncAggregate(update)
|
||||
l.CheckAttestedHeader(update)
|
||||
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
require.NotNil(t, update.FinalizedHeader, "Finalized header is nil")
|
||||
require.Equal(t, primitives.Slot(0), update.FinalizedHeader.Slot, "Finalized header slot is not zero")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), update.FinalizedHeader.ProposerIndex, "Finalized header proposer index is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.ParentRoot, "Finalized header parent root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.StateRoot, "Finalized header state root is not zero")
|
||||
require.DeepSSZEqual(t, zeroHash, update.FinalizedHeader.BodyRoot, "Finalized header body root is not zero")
|
||||
require.Equal(t, lightClient.FinalityBranchNumOfLeaves, len(update.FinalityBranch), "Invalid finality branch leaves")
|
||||
for _, leaf := range update.FinalityBranch {
|
||||
require.DeepSSZEqual(t, zeroHash, leaf, "Leaf is not zero")
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@ go_library(
|
||||
srcs = [
|
||||
"helpers.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
@@ -23,8 +22,6 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
@@ -34,7 +33,7 @@ const (
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
|
||||
type Csc uint64
|
||||
type Csc uint8
|
||||
|
||||
func (Csc) ENRKey() string { return CustodySubnetCountEnrKey }
|
||||
|
||||
@@ -118,7 +117,6 @@ func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block and blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []kzg.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
startTime := time.Now()
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
@@ -147,24 +145,16 @@ func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobsCount)
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobsCount)
|
||||
|
||||
eg, _ := errgroup.WithContext(context.Background())
|
||||
for i := range blobs {
|
||||
blobIndex := i
|
||||
eg.Go(func() error {
|
||||
blob := &blobs[blobIndex]
|
||||
blobCellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
blob := &blobs[i]
|
||||
blobCellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cellsAndProofs[blobIndex] = blobCellsAndProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
cellsAndProofs = append(cellsAndProofs, blobCellsAndProofs)
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
@@ -206,7 +196,7 @@ func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
dataColumnComputationTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
@@ -431,20 +421,11 @@ func VerifyDataColumnSidecarKZGProofs(sc blocks.RODataColumn) (bool, error) {
|
||||
|
||||
// CustodySubnetCount returns the number of subnets the node should participate in for custody.
|
||||
func CustodySubnetCount() uint64 {
|
||||
count := params.BeaconConfig().CustodyRequirement
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
return params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
count = params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
}
|
||||
|
||||
return params.BeaconConfig().CustodyRequirement
|
||||
}
|
||||
|
||||
// SubnetSamplingSize returns the number of subnets the node should sample from.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#subnet-sampling
|
||||
func SubnetSamplingSize() uint64 {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
custodySubnetCount := CustodySubnetCount()
|
||||
|
||||
return max(samplesPerSlot, custodySubnetCount)
|
||||
return count
|
||||
}
|
||||
|
||||
// CustodyColumnCount returns the number of columns the node should custody.
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
@@ -41,7 +41,6 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -25,9 +25,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
goTrace "go.opencensus.io/trace"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
type customProcessingFn func(context.Context, state.BeaconState) error
|
||||
@@ -259,7 +258,7 @@ func cacheBestBeaconStateOnErrFn(highestSlot primitives.Slot, key [32]byte) cust
|
||||
// if (state.slot + 1) % SLOTS_PER_EPOCH == 0:
|
||||
// process_epoch(state)
|
||||
// state.slot = Slot(state.slot + 1)
|
||||
func ProcessSlotsCore(ctx context.Context, span *goTrace.Span, state state.BeaconState, slot primitives.Slot, fn customProcessingFn) (state.BeaconState, error) {
|
||||
func ProcessSlotsCore(ctx context.Context, span *trace.Span, state state.BeaconState, slot primitives.Slot, fn customProcessingFn) (state.BeaconState, error) {
|
||||
var err error
|
||||
for state.Slot() < slot {
|
||||
if fn != nil {
|
||||
|
||||
@@ -16,8 +16,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ExecuteStateTransitionNoVerifyAnySig defines the procedure for a state transition function.
|
||||
|
||||
@@ -18,7 +18,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//monitoring/backup:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
],
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
|
||||
slashertypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
|
||||
@@ -58,9 +56,6 @@ type ReadOnlyDatabase interface {
|
||||
// Fee recipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
// light client operations
|
||||
LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) (map[uint64]*ethpbv2.LightClientUpdateWithVersion, error)
|
||||
LightClientUpdate(ctx context.Context, period uint64) (*ethpbv2.LightClientUpdateWithVersion, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
@@ -97,8 +92,6 @@ type NoHeadAccessDatabase interface {
|
||||
// Fee recipients operations.
|
||||
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, addrs []common.Address) error
|
||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||
// light client operations
|
||||
SaveLightClientUpdate(ctx context.Context, period uint64, update *ethpbv2.LightClientUpdateWithVersion) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"genesis.go",
|
||||
"key.go",
|
||||
"kv.go",
|
||||
"lightclient.go",
|
||||
"log.go",
|
||||
"migration.go",
|
||||
"migration_archived_index.go",
|
||||
@@ -51,9 +50,7 @@ go_library(
|
||||
"//io/file:go_default_library",
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
@@ -90,7 +87,6 @@ go_test(
|
||||
"genesis_test.go",
|
||||
"init_test.go",
|
||||
"kv_test.go",
|
||||
"lightclient_test.go",
|
||||
"migration_archived_index_test.go",
|
||||
"migration_block_slot_index_test.go",
|
||||
"migration_state_validators_test.go",
|
||||
@@ -117,7 +113,6 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const backupsDirectoryName = "backups"
|
||||
|
||||
@@ -16,11 +16,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// Used to represent errors for inconsistent slot ranges.
|
||||
|
||||
@@ -8,9 +8,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var errMissingStateForCheckpoint = errors.New("missing state summary for checkpoint root")
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
v2 "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var previousFinalizedCheckpointKey = []byte("previous-finalized-checkpoint")
|
||||
|
||||
@@ -107,7 +107,6 @@ var Buckets = [][]byte{
|
||||
powchainBucket,
|
||||
stateSummaryBucket,
|
||||
stateValidatorsBucket,
|
||||
lightClientUpdatesBucket,
|
||||
// Indices buckets.
|
||||
blockSlotIndicesBucket,
|
||||
stateSlotIndicesBucket,
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func (s *Store) SaveLightClientUpdate(ctx context.Context, period uint64, update *ethpbv2.LightClientUpdateWithVersion) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.saveLightClientUpdate")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(lightClientUpdatesBucket)
|
||||
updateMarshalled, err := encode(ctx, update)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bkt.Put(bytesutil.Uint64ToBytesBigEndian(period), updateMarshalled)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) (map[uint64]*ethpbv2.LightClientUpdateWithVersion, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LightClientUpdates")
|
||||
defer span.End()
|
||||
|
||||
if startPeriod > endPeriod {
|
||||
return nil, fmt.Errorf("start period %d is greater than end period %d", startPeriod, endPeriod)
|
||||
}
|
||||
|
||||
updates := make(map[uint64]*ethpbv2.LightClientUpdateWithVersion)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(lightClientUpdatesBucket)
|
||||
c := bkt.Cursor()
|
||||
|
||||
firstPeriodInDb, _ := c.First()
|
||||
if firstPeriodInDb == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for k, v := c.Seek(bytesutil.Uint64ToBytesBigEndian(startPeriod)); k != nil && binary.BigEndian.Uint64(k) <= endPeriod; k, v = c.Next() {
|
||||
currentPeriod := binary.BigEndian.Uint64(k)
|
||||
|
||||
var update ethpbv2.LightClientUpdateWithVersion
|
||||
if err := decode(ctx, v, &update); err != nil {
|
||||
return err
|
||||
}
|
||||
updates[currentPeriod] = &update
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return updates, err
|
||||
}
|
||||
|
||||
func (s *Store) LightClientUpdate(ctx context.Context, period uint64) (*ethpbv2.LightClientUpdateWithVersion, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LightClientUpdate")
|
||||
defer span.End()
|
||||
|
||||
var update ethpbv2.LightClientUpdateWithVersion
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(lightClientUpdatesBucket)
|
||||
updateBytes := bkt.Get(bytesutil.Uint64ToBytesBigEndian(period))
|
||||
if updateBytes == nil {
|
||||
return nil
|
||||
}
|
||||
return decode(ctx, updateBytes, &update)
|
||||
})
|
||||
return &update, err
|
||||
}
|
||||
@@ -1,648 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestStore_LightclientUpdate_CanSaveRetrieve(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
update := ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
}
|
||||
|
||||
period := uint64(1)
|
||||
err := db.SaveLightClientUpdate(ctx, period, ðpbv2.LightClientUpdateWithVersion{
|
||||
Version: 1,
|
||||
Data: update,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Retrieve the update
|
||||
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, update.SignatureSlot, retrievedUpdate.Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
|
||||
}
|
||||
|
||||
func TestStore_LightclientUpdates_canRetrieveRange(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 9,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i+1), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdatesMap, err := db.LightClientUpdates(ctx, 1, 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(updates), len(retrievedUpdatesMap), "retrieved updates do not match saved updates")
|
||||
for i, update := range updates {
|
||||
require.Equal(t, update.Data.SignatureSlot, retrievedUpdatesMap[uint64(i+1)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_EndPeriodSmallerThanStartPeriod(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 9,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i+1), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 3, 1)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, err.Error(), "start period 3 is greater than end period 1")
|
||||
require.IsNil(t, retrievedUpdates)
|
||||
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_EndPeriodEqualToStartPeriod(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 9,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i+1), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 2, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(retrievedUpdates))
|
||||
require.Equal(t, updates[1].Data.SignatureSlot, retrievedUpdates[2].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_StartPeriodBeforeFirstUpdate(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 9,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i+2), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 0, 4)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(retrievedUpdates))
|
||||
for i, update := range updates {
|
||||
require.Equal(t, update.Data.SignatureSlot, retrievedUpdates[uint64(i+2)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_EndPeriodAfterLastUpdate(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 9,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i+1), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 1, 6)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(retrievedUpdates))
|
||||
for i, update := range updates {
|
||||
require.Equal(t, update.Data.SignatureSlot, retrievedUpdates[uint64(i+1)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_PartialUpdates(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 9,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i+1), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 1, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(retrievedUpdates))
|
||||
for i, update := range updates[:2] {
|
||||
require.Equal(t, update.Data.SignatureSlot, retrievedUpdates[uint64(i+1)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_MissingPeriods_SimpleData(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
updates := []*ethpbv2.LightClientUpdateWithVersion{
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 11,
|
||||
},
|
||||
},
|
||||
{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: 12,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, update := range updates {
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(update.Data.SignatureSlot), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 7, 12)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(retrievedUpdates))
|
||||
for _, update := range updates {
|
||||
require.Equal(t, update.Data.SignatureSlot, retrievedUpdates[uint64(update.Data.SignatureSlot)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
|
||||
// Retrieve the updates from the middle
|
||||
retrievedUpdates, err = db.LightClientUpdates(ctx, 8, 12)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(retrievedUpdates))
|
||||
require.Equal(t, updates[1].Data.SignatureSlot, retrievedUpdates[8].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, updates[2].Data.SignatureSlot, retrievedUpdates[11].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, updates[3].Data.SignatureSlot, retrievedUpdates[12].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
|
||||
// Retrieve the updates from after the missing period
|
||||
retrievedUpdates, err = db.LightClientUpdates(ctx, 11, 12)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(retrievedUpdates))
|
||||
require.Equal(t, updates[2].Data.SignatureSlot, retrievedUpdates[11].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, updates[3].Data.SignatureSlot, retrievedUpdates[12].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
|
||||
//retrieve the updates from before the missing period to after the missing period
|
||||
retrievedUpdates, err = db.LightClientUpdates(ctx, 3, 15)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(retrievedUpdates))
|
||||
require.Equal(t, updates[0].Data.SignatureSlot, retrievedUpdates[7].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, updates[1].Data.SignatureSlot, retrievedUpdates[8].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, updates[2].Data.SignatureSlot, retrievedUpdates[11].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, updates[3].Data.SignatureSlot, retrievedUpdates[12].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_EmptyDB(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
// Retrieve the updates
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 1, 3)
|
||||
require.IsNil(t, err)
|
||||
require.Equal(t, 0, len(retrievedUpdates))
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_MissingPeriodsAtTheEnd_SimpleData(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 1; i < 4; i++ {
|
||||
update := ðpbv2.LightClientUpdateWithVersion{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: primitives.Slot(uint64(i)),
|
||||
},
|
||||
}
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for i := 7; i < 10; i++ {
|
||||
update := ðpbv2.LightClientUpdateWithVersion{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: primitives.Slot(uint64(i)),
|
||||
},
|
||||
}
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Retrieve the updates from 1 to 5
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 1, 5)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(retrievedUpdates))
|
||||
require.Equal(t, primitives.Slot(1), retrievedUpdates[1].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, primitives.Slot(2), retrievedUpdates[2].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
require.Equal(t, primitives.Slot(3), retrievedUpdates[3].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
|
||||
}
|
||||
|
||||
func setupLightClientTestDB(t *testing.T) (*Store, context.Context) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 10; i < 101; i++ { // 10 to 100
|
||||
update := ðpbv2.LightClientUpdateWithVersion{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: primitives.Slot(uint64(i)),
|
||||
},
|
||||
}
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 110; i < 201; i++ { // 110 to 200
|
||||
update := ðpbv2.LightClientUpdateWithVersion{
|
||||
Version: 1,
|
||||
Data: ðpbv2.LightClientUpdate{
|
||||
AttestedHeader: nil,
|
||||
NextSyncCommittee: nil,
|
||||
NextSyncCommitteeBranch: nil,
|
||||
FinalizedHeader: nil,
|
||||
FinalityBranch: nil,
|
||||
SyncAggregate: nil,
|
||||
SignatureSlot: primitives.Slot(uint64(i)),
|
||||
},
|
||||
}
|
||||
err := db.SaveLightClientUpdate(ctx, uint64(i), update)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
return db, ctx
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_MissingPeriodsInTheMiddleDistributed(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// Retrieve the updates - should fail because of missing periods in the middle
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 1, 300)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 91*2, len(retrievedUpdates))
|
||||
for i := 10; i < 101; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
for i := 110; i < 201; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_RetrieveValidRangeFromStart(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// retrieve 1 to 100 - should work because all periods are present after the firstPeriodInDB > startPeriod
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 1, 100)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 91, len(retrievedUpdates))
|
||||
for i := 10; i < 101; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_RetrieveValidRangeInTheMiddle(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// retrieve 110 to 200 - should work because all periods are present
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 110, 200)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 91, len(retrievedUpdates))
|
||||
for i := 110; i < 201; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_MissingPeriodInTheMiddleConcentrated(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// retrieve 100 to 200
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 100, 200)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 92, len(retrievedUpdates))
|
||||
require.Equal(t, primitives.Slot(100), retrievedUpdates[100].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
for i := 110; i < 201; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_MissingPeriodsAtTheEnd(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// retrieve 10 to 109
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 10, 109)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 91, len(retrievedUpdates))
|
||||
for i := 10; i < 101; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_MissingPeriodsAtTheBeginning(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// retrieve 105 to 200
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 105, 200)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 91, len(retrievedUpdates))
|
||||
for i := 110; i < 201; i++ {
|
||||
require.Equal(t, primitives.Slot(uint64(i)), retrievedUpdates[uint64(i)].Data.SignatureSlot, "retrieved update does not match saved update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LightClientUpdate_StartPeriodGreaterThanLastPeriod(t *testing.T) {
|
||||
db, ctx := setupLightClientTestDB(t)
|
||||
|
||||
// retrieve 300 to 400
|
||||
retrievedUpdates, err := db.LightClientUpdates(ctx, 300, 400)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(retrievedUpdates))
|
||||
|
||||
}
|
||||
@@ -17,9 +17,6 @@ var (
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
|
||||
// Light Client Updates Bucket
|
||||
lightClientUpdatesBucket = []byte("light-client-updates")
|
||||
|
||||
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
|
||||
slotsHasObjectBucket = []byte("slots-has-objects")
|
||||
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// SaveStateSummary saves a state summary object to the DB.
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// lookupValuesForIndices takes in a list of indices and looks up
|
||||
|
||||
@@ -3,9 +3,9 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// LastValidatedCheckpoint returns the latest fully validated checkpoint in beacon chain.
|
||||
|
||||
@@ -50,7 +50,6 @@ go_library(
|
||||
"//io/logs:go_default_library",
|
||||
"//monitoring/clientstats:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/authorization:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
@@ -71,6 +70,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// searchThreshold to apply for when searching for blocks of a particular time. If the buffer
|
||||
|
||||
@@ -21,11 +21,11 @@ import (
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
|
||||
@@ -33,7 +33,6 @@ go_library(
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
@@ -41,6 +40,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -15,11 +15,11 @@ import (
|
||||
forkchoice2 "github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// New initializes a new fork choice store.
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// head starts from justified root and then follows the best descendant links
|
||||
|
||||
@@ -132,7 +132,7 @@ func configureEth1Config(cliCtx *cli.Context) error {
|
||||
}
|
||||
|
||||
func configureNetwork(cliCtx *cli.Context) {
|
||||
if cliCtx.IsSet(cmd.BootstrapNode.Name) {
|
||||
if len(cliCtx.StringSlice(cmd.BootstrapNode.Name)) > 0 {
|
||||
c := params.BeaconNetworkConfig()
|
||||
c.BootstrapNodes = cliCtx.StringSlice(cmd.BootstrapNode.Name)
|
||||
params.OverrideBeaconNetworkConfig(c)
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"handshake.go",
|
||||
"info.go",
|
||||
"interfaces.go",
|
||||
"iterator.go",
|
||||
"log.go",
|
||||
"message_id.go",
|
||||
"monitoring.go",
|
||||
@@ -68,7 +69,6 @@ go_library(
|
||||
"//io/file:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -9,17 +9,18 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// ErrMessageNotMapped occurs on a Broadcast attempt when a message has not been defined in the
|
||||
|
||||
@@ -230,11 +230,11 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Use smaller batch size for testing.
|
||||
currentBatchSize := batchSize
|
||||
batchSize = 2
|
||||
// Use shorter period for testing.
|
||||
currentPeriod := pollingPeriod
|
||||
pollingPeriod = 1 * time.Second
|
||||
defer func() {
|
||||
batchSize = currentBatchSize
|
||||
pollingPeriod = currentPeriod
|
||||
}()
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
|
||||
@@ -33,7 +33,7 @@ func (*Service) InterceptPeerDial(_ peer.ID) (allow bool) {
|
||||
// multiaddr for the given peer.
|
||||
func (s *Service) InterceptAddrDial(pid peer.ID, m multiaddr.Multiaddr) (allow bool) {
|
||||
// Disallow bad peers from dialing in.
|
||||
if s.peers.IsBad(pid) != nil {
|
||||
if s.peers.IsBad(pid) {
|
||||
return false
|
||||
}
|
||||
return filterConnections(s.addrFilter, m)
|
||||
|
||||
@@ -72,10 +72,26 @@ loop:
|
||||
return validPeers, nil
|
||||
}
|
||||
|
||||
func (s *Service) custodyCountFromRemotePeerEnr(pid peer.ID) uint64 {
|
||||
// CustodyCountFromRemotePeer retrieves the custody count from a remote peer.
|
||||
func (s *Service) CustodyCountFromRemotePeer(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of subnets.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// First, try to get the custody count from the peer's metadata.
|
||||
metadata, err := s.peers.Metadata(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Debug("Failed to retrieve metadata for peer, defaulting to the ENR value")
|
||||
}
|
||||
|
||||
if metadata != nil {
|
||||
custodyCount := metadata.CustodySubnetCount()
|
||||
if custodyCount > 0 {
|
||||
return uint64(custodyCount)
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("peerID", pid).Debug("Failed to retrieve custody count from metadata for peer, defaulting to the ENR value")
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
record, err := s.peers.ENR(pid)
|
||||
if err != nil {
|
||||
@@ -100,30 +116,3 @@ func (s *Service) custodyCountFromRemotePeerEnr(pid peer.ID) uint64 {
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
|
||||
// CustodyCountFromRemotePeer retrieves the custody count from a remote peer.
|
||||
func (s *Service) CustodyCountFromRemotePeer(pid peer.ID) uint64 {
|
||||
// Try to get the custody count from the peer's metadata.
|
||||
metadata, err := s.peers.Metadata(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Debug("Failed to retrieve metadata for peer, defaulting to the ENR value")
|
||||
return s.custodyCountFromRemotePeerEnr(pid)
|
||||
}
|
||||
|
||||
// If the metadata is nil, default to the ENR value.
|
||||
if metadata == nil {
|
||||
log.WithField("peerID", pid).Debug("Metadata is nil, defaulting to the ENR value")
|
||||
return s.custodyCountFromRemotePeerEnr(pid)
|
||||
}
|
||||
|
||||
// Get the custody subnets count from the metadata.
|
||||
custodyCount := metadata.CustodySubnetCount()
|
||||
|
||||
// If the custody count is null, default to the ENR value.
|
||||
if custodyCount == 0 {
|
||||
log.WithField("peerID", pid).Debug("The custody count extracted from the metadata equals to 0, defaulting to the ENR value")
|
||||
return s.custodyCountFromRemotePeerEnr(pid)
|
||||
}
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
|
||||
@@ -121,12 +121,12 @@ func TestCustodyCountFromRemotePeer(t *testing.T) {
|
||||
|
||||
// Define a metadata with zero custody.
|
||||
zeroMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodySubnetCount: 0,
|
||||
CustodySubnetCount: []byte{0},
|
||||
})
|
||||
|
||||
// Define a nominal metadata.
|
||||
nominalMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodySubnetCount: expectedMetadata,
|
||||
CustodySubnetCount: []byte{uint8(expectedMetadata)},
|
||||
})
|
||||
|
||||
testCases := []struct {
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
@@ -120,7 +119,7 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
|
||||
// Ping all peers.
|
||||
s.pingPeersAndLogEnr()
|
||||
s.pingPeers()
|
||||
|
||||
return
|
||||
}
|
||||
@@ -158,7 +157,7 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
s.pingPeersAndLogEnr()
|
||||
s.pingPeers()
|
||||
|
||||
return
|
||||
}
|
||||
@@ -176,7 +175,7 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
// Get the custody subnet count in our metadata.
|
||||
inMetadataCustodySubnetCount := s.Metadata().CustodySubnetCount()
|
||||
|
||||
isCustodySubnetCountUpToDate := (custodySubnetCount == inRecordCustodySubnetCount && custodySubnetCount == inMetadataCustodySubnetCount)
|
||||
isCustodySubnetCountUpToDate := custodySubnetCount == inRecordCustodySubnetCount && custodySubnetCount == uint64(inMetadataCustodySubnetCount)
|
||||
|
||||
if isBitVUpToDate && isBitSUpToDate && isCustodySubnetCountUpToDate {
|
||||
// Nothing to do, return early.
|
||||
@@ -187,31 +186,12 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
s.updateSubnetRecordWithMetadataV3(bitV, bitS, custodySubnetCount)
|
||||
|
||||
// Ping all peers.
|
||||
s.pingPeersAndLogEnr()
|
||||
s.pingPeers()
|
||||
}
|
||||
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
func (s *Service) listenForNewNodes() {
|
||||
const minLogInterval = 1 * time.Minute
|
||||
|
||||
peersSummary := func(threshold uint) (uint, uint) {
|
||||
// Retrieve how many active peers we have.
|
||||
activePeers := s.Peers().Active()
|
||||
activePeerCount := uint(len(activePeers))
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
if activePeerCount >= threshold {
|
||||
return activePeerCount, 0
|
||||
}
|
||||
|
||||
missingPeerCount := threshold - activePeerCount
|
||||
|
||||
return activePeerCount, missingPeerCount
|
||||
}
|
||||
|
||||
var lastLogTime time.Time
|
||||
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
iterator := filterNodes(s.ctx, s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
defer iterator.Close()
|
||||
|
||||
for {
|
||||
@@ -227,35 +207,17 @@ func (s *Service) listenForNewNodes() {
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
// Compute the number of new peers we want to dial.
|
||||
activePeerCount, missingPeerCount := peersSummary(s.cfg.MaxPeers)
|
||||
|
||||
fields := logrus.Fields{
|
||||
"currentPeerCount": activePeerCount,
|
||||
"targetPeerCount": s.cfg.MaxPeers,
|
||||
}
|
||||
|
||||
if missingPeerCount == 0 {
|
||||
wantedCount := s.wantedPeerDials()
|
||||
if wantedCount == 0 {
|
||||
log.Trace("Not looking for peers, at peer limit")
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
if time.Since(lastLogTime) > minLogInterval {
|
||||
lastLogTime = time.Now()
|
||||
log.WithFields(fields).Debug("Searching for new active peers")
|
||||
}
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials := uint(flags.Get().MaxConcurrentDials)
|
||||
missingPeerCount = min(missingPeerCount, maxConcurrentDials)
|
||||
wantedCount = min(wantedCount, flags.Get().MaxConcurrentDials)
|
||||
}
|
||||
|
||||
// Search for new peers.
|
||||
wantedNodes := searchForPeers(iterator, batchSize, missingPeerCount, s.filterPeer)
|
||||
|
||||
wantedNodes := enode.ReadNodes(iterator, wantedCount)
|
||||
wg := new(sync.WaitGroup)
|
||||
for i := 0; i < len(wantedNodes); i++ {
|
||||
node := wantedNodes[i]
|
||||
@@ -380,8 +342,7 @@ func (s *Service) createLocalNode(
|
||||
}
|
||||
|
||||
if params.PeerDASEnabled() {
|
||||
custodySubnetCount := peerdas.CustodySubnetCount()
|
||||
localNode.Set(peerdas.Csc(custodySubnetCount))
|
||||
localNode.Set(peerdas.Csc(peerdas.CustodySubnetCount()))
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
@@ -466,7 +427,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
}
|
||||
|
||||
// Ignore bad nodes.
|
||||
if s.peers.IsBad(peerData.ID) != nil {
|
||||
if s.peers.IsBad(peerData.ID) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -526,6 +487,17 @@ func (s *Service) isPeerAtLimit(inbound bool) bool {
|
||||
return activePeers >= maxPeers || numOfConns >= maxPeers
|
||||
}
|
||||
|
||||
func (s *Service) wantedPeerDials() int {
|
||||
maxPeers := int(s.cfg.MaxPeers)
|
||||
|
||||
activePeers := len(s.Peers().Active())
|
||||
wantedCount := 0
|
||||
if maxPeers > activePeers {
|
||||
wantedCount = maxPeers - activePeers
|
||||
}
|
||||
return wantedCount
|
||||
}
|
||||
|
||||
// PeersFromStringAddrs converts peer raw ENRs into multiaddrs for p2p.
|
||||
func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
var allAddrs []ma.Multiaddr
|
||||
|
||||
@@ -473,7 +473,7 @@ type check struct {
|
||||
metadataSequenceNumber uint64
|
||||
attestationSubnets []uint64
|
||||
syncSubnets []uint64
|
||||
custodySubnetCount *uint64
|
||||
custodySubnetCount *uint8
|
||||
}
|
||||
|
||||
func checkPingCountCacheMetadataRecord(
|
||||
@@ -542,7 +542,7 @@ func checkPingCountCacheMetadataRecord(
|
||||
|
||||
if expected.custodySubnetCount != nil {
|
||||
// Check custody subnet count in ENR.
|
||||
var actualCustodySubnetCount uint64
|
||||
var actualCustodySubnetCount uint8
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(peerdas.CustodySubnetCountEnrKey, &actualCustodySubnetCount))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *expected.custodySubnetCount, actualCustodySubnetCount)
|
||||
@@ -565,7 +565,7 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
eip7594ForkEpoch = 10
|
||||
)
|
||||
|
||||
custodySubnetCount := params.BeaconConfig().CustodyRequirement
|
||||
custodySubnetCount := uint8(params.BeaconConfig().CustodyRequirement)
|
||||
|
||||
// Set up epochs.
|
||||
defaultCfg := params.BeaconConfig()
|
||||
|
||||
@@ -2,6 +2,7 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
@@ -9,7 +10,6 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
@@ -22,57 +22,7 @@ const (
|
||||
)
|
||||
|
||||
func peerMultiaddrString(conn network.Conn) string {
|
||||
remoteMultiaddr := conn.RemoteMultiaddr().String()
|
||||
remotePeerID := conn.RemotePeer().String()
|
||||
return fmt.Sprintf("%s/p2p/%s", remoteMultiaddr, remotePeerID)
|
||||
}
|
||||
|
||||
func (s *Service) connectToPeer(conn network.Conn) {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnected)
|
||||
// Go through the handshake process.
|
||||
log.WithFields(logrus.Fields{
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Debug("Initiate peer connection")
|
||||
}
|
||||
|
||||
func (s *Service) disconnectFromPeer(
|
||||
conn network.Conn,
|
||||
goodByeFunc func(ctx context.Context, id peer.ID) error,
|
||||
badPeerErr error,
|
||||
) {
|
||||
// Get the remote peer ID.
|
||||
remotePeerID := conn.RemotePeer()
|
||||
|
||||
// Get the direction of the connection.
|
||||
direction := conn.Stat().Direction.String()
|
||||
|
||||
// Get the remote peer multiaddr.
|
||||
remotePeerMultiAddr := peerMultiaddrString(conn)
|
||||
|
||||
// Set the peer to disconnecting state.
|
||||
s.peers.SetConnectionState(remotePeerID, peers.PeerDisconnecting)
|
||||
|
||||
// Only attempt a goodbye if we are still connected to the peer.
|
||||
if s.host.Network().Connectedness(remotePeerID) == network.Connected {
|
||||
if err := goodByeFunc(context.TODO(), remotePeerID); err != nil {
|
||||
log.WithError(err).Error("Unable to disconnect from peer")
|
||||
}
|
||||
}
|
||||
|
||||
// Get the remaining active peers.
|
||||
activePeerCount := len(s.peers.Active())
|
||||
log.
|
||||
WithError(badPeerErr).
|
||||
WithFields(logrus.Fields{
|
||||
"multiaddr": remotePeerMultiAddr,
|
||||
"direction": direction,
|
||||
"remainingActivePeers": activePeerCount,
|
||||
}).
|
||||
Debug("Initiate peer disconnection")
|
||||
|
||||
s.peers.SetConnectionState(remotePeerID, peers.PeerDisconnected)
|
||||
return fmt.Sprintf("%s/p2p/%s", conn.RemoteMultiaddr().String(), conn.RemotePeer().String())
|
||||
}
|
||||
|
||||
// AddConnectionHandler adds a callback function which handles the connection with a
|
||||
@@ -109,7 +59,16 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
ConnectedF: func(net network.Network, conn network.Conn) {
|
||||
remotePeer := conn.RemotePeer()
|
||||
|
||||
disconnectFromPeer := func() {
|
||||
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnecting)
|
||||
// Only attempt a goodbye if we are still connected to the peer.
|
||||
if s.host.Network().Connectedness(remotePeer) == network.Connected {
|
||||
if err := goodByeFunc(context.TODO(), remotePeer); err != nil {
|
||||
log.WithError(err).Error("Unable to disconnect from peer")
|
||||
}
|
||||
}
|
||||
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnected)
|
||||
}
|
||||
// Connection handler must be non-blocking as part of libp2p design.
|
||||
go func() {
|
||||
if peerHandshaking(remotePeer) {
|
||||
@@ -118,21 +77,28 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
return
|
||||
}
|
||||
defer peerFinished(remotePeer)
|
||||
|
||||
// Handle the various pre-existing conditions that will result in us not handshaking.
|
||||
peerConnectionState, err := s.peers.ConnectionState(remotePeer)
|
||||
if err == nil && (peerConnectionState == peers.PeerConnected || peerConnectionState == peers.PeerConnecting) {
|
||||
log.WithField("currentState", peerConnectionState).WithField("reason", "already active").Trace("Ignoring connection request")
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.Add(nil /* ENR */, remotePeer, conn.RemoteMultiaddr(), conn.Stat().Direction)
|
||||
|
||||
// Defensive check in the event we still get a bad peer.
|
||||
if err := s.peers.IsBad(remotePeer); err != nil {
|
||||
s.disconnectFromPeer(conn, goodByeFunc, err)
|
||||
if s.peers.IsBad(remotePeer) {
|
||||
log.WithField("reason", "bad peer").Trace("Ignoring connection request")
|
||||
disconnectFromPeer()
|
||||
return
|
||||
}
|
||||
validPeerConnection := func() {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnected)
|
||||
// Go through the handshake process.
|
||||
log.WithFields(logrus.Fields{
|
||||
"direction": conn.Stat().Direction,
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Debug("Peer connected")
|
||||
}
|
||||
|
||||
// Do not perform handshake on inbound dials.
|
||||
if conn.Stat().Direction == network.DirInbound {
|
||||
@@ -151,83 +117,63 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
// If peer hasn't sent a status request, we disconnect with them
|
||||
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
|
||||
statusMessageMissing.Inc()
|
||||
s.disconnectFromPeer(conn, goodByeFunc, errors.Wrap(err, "chain state"))
|
||||
disconnectFromPeer()
|
||||
return
|
||||
}
|
||||
|
||||
if peerExists {
|
||||
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
|
||||
if err != nil {
|
||||
s.disconnectFromPeer(conn, goodByeFunc, errors.Wrap(err, "chain state last updated"))
|
||||
disconnectFromPeer()
|
||||
return
|
||||
}
|
||||
|
||||
// Exit if we don't receive any current status messages from peer.
|
||||
if updated.IsZero() {
|
||||
s.disconnectFromPeer(conn, goodByeFunc, errors.New("is zero"))
|
||||
return
|
||||
}
|
||||
|
||||
if !updated.After(currentTime) {
|
||||
s.disconnectFromPeer(conn, goodByeFunc, errors.New("did not update"))
|
||||
// exit if we don't receive any current status messages from
|
||||
// peer.
|
||||
if updated.IsZero() || !updated.After(currentTime) {
|
||||
disconnectFromPeer()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s.connectToPeer(conn)
|
||||
validPeerConnection()
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnecting)
|
||||
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
|
||||
s.disconnectFromPeer(conn, goodByeFunc, err)
|
||||
log.WithError(err).Trace("Handshake failed")
|
||||
disconnectFromPeer()
|
||||
return
|
||||
}
|
||||
|
||||
s.connectToPeer(conn)
|
||||
validPeerConnection()
|
||||
}()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
|
||||
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
|
||||
// This also calls the handler responsible for maintaining other parts of the sync or p2p system.
|
||||
func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id peer.ID) error) {
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
DisconnectedF: func(net network.Network, conn network.Conn) {
|
||||
remotePeerMultiAddr := peerMultiaddrString(conn)
|
||||
peerID := conn.RemotePeer()
|
||||
direction := conn.Stat().Direction.String()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"multiAddr": remotePeerMultiAddr,
|
||||
"direction": direction,
|
||||
})
|
||||
|
||||
log := log.WithField("multiAddr", peerMultiaddrString(conn))
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
// Exit early if we are still connected to the peer.
|
||||
if net.Connectedness(peerID) == network.Connected {
|
||||
if net.Connectedness(conn.RemotePeer()) == network.Connected {
|
||||
return
|
||||
}
|
||||
|
||||
priorState, err := s.peers.ConnectionState(peerID)
|
||||
priorState, err := s.peers.ConnectionState(conn.RemotePeer())
|
||||
if err != nil {
|
||||
// Can happen if the peer has already disconnected, so...
|
||||
priorState = peers.PeerDisconnected
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.PeerDisconnecting)
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnecting)
|
||||
if err := handler(context.TODO(), conn.RemotePeer()); err != nil {
|
||||
log.WithError(err).Error("Disconnect handler failed")
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.PeerDisconnected)
|
||||
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
|
||||
// Only log disconnections if we were fully connected.
|
||||
if priorState == peers.PeerConnected {
|
||||
activePeersCount := len(s.peers.Active())
|
||||
log.WithField("remainingActivePeers", activePeersCount).Debug("Peer disconnected")
|
||||
log.WithField("activePeers", len(s.peers.Active())).Debug("Peer disconnected")
|
||||
}
|
||||
}()
|
||||
},
|
||||
|
||||
36
beacon-chain/p2p/iterator.go
Normal file
36
beacon-chain/p2p/iterator.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
// filterNodes wraps an iterator such that Next only returns nodes for which
|
||||
// the 'check' function returns true. This custom implementation also
|
||||
// checks for context deadlines so that in the event the parent context has
|
||||
// expired, we do exit from the search rather than perform more network
|
||||
// lookups for additional peers.
|
||||
func filterNodes(ctx context.Context, it enode.Iterator, check func(*enode.Node) bool) enode.Iterator {
|
||||
return &filterIter{ctx, it, check}
|
||||
}
|
||||
|
||||
type filterIter struct {
|
||||
context.Context
|
||||
enode.Iterator
|
||||
check func(*enode.Node) bool
|
||||
}
|
||||
|
||||
// Next looks up for the next valid node according to our
|
||||
// filter criteria.
|
||||
func (f *filterIter) Next() bool {
|
||||
for f.Iterator.Next() {
|
||||
if f.Context.Err() != nil {
|
||||
return false
|
||||
}
|
||||
if f.check(f.Node()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -20,7 +20,6 @@ go_library(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
)
|
||||
|
||||
@@ -62,7 +61,7 @@ func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
|
||||
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *BadResponsesScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -117,24 +116,18 @@ func (s *BadResponsesScorer) Increment(pid peer.ID) {
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
|
||||
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
|
||||
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
if peerData.BadResponses >= s.config.Threshold {
|
||||
return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
}
|
||||
|
||||
return nil
|
||||
return peerData.BadResponses >= s.config.Threshold
|
||||
}
|
||||
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -144,7 +137,7 @@ func (s *BadResponsesScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,19 +33,19 @@ func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
scorer.Increment(pid)
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
|
||||
@@ -152,17 +152,17 @@ func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pid := peer.ID("peer1")
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pid)
|
||||
if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
} else {
|
||||
assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -185,11 +185,11 @@ func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
scorer.Increment(pids[2])
|
||||
scorer.Increment(pids[4])
|
||||
}
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
badPeers := scorer.BadPeers()
|
||||
sort.Slice(badPeers, func(i, j int) bool {
|
||||
|
||||
@@ -177,8 +177,8 @@ func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
|
||||
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
|
||||
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
|
||||
// out low-scorers (see WeightSorted method).
|
||||
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) error {
|
||||
return nil
|
||||
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
|
||||
@@ -481,8 +481,8 @@ func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
|
||||
scorer.IncrementProcessedBlocks("peer1", 64)
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"))
|
||||
assert.Equal(t, 0, len(scorer.BadPeers()))
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package scorers
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -52,24 +51,19 @@ func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
func (s *GossipScorer) IsBadPeer(pid peer.ID) error {
|
||||
func (s *GossipScorer) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
|
||||
if peerData.GossipScore < gossipThreshold {
|
||||
return errors.Errorf("gossip score below threshold: got %f - threshold %f", peerData.GossipScore, gossipThreshold)
|
||||
}
|
||||
|
||||
return nil
|
||||
return peerData.GossipScore < gossipThreshold
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -79,7 +73,7 @@ func (s *GossipScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, -101.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.NotNil(t, scorer.IsBadPeer("peer1"), "Unexpected good peer")
|
||||
assert.Equal(t, true, scorer.IsBadPeer("peer1"), "Unexpected good peer")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -44,7 +44,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
|
||||
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -67,34 +67,30 @@ func (s *PeerStatusScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) error {
|
||||
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
func (s *PeerStatusScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
|
||||
// Mark peer as bad, if the latest error is one of the terminal ones.
|
||||
terminalErrs := []error{
|
||||
p2ptypes.ErrWrongForkDigestVersion,
|
||||
p2ptypes.ErrInvalidFinalizedRoot,
|
||||
p2ptypes.ErrInvalidRequest,
|
||||
}
|
||||
|
||||
for _, err := range terminalErrs {
|
||||
if errors.Is(peerData.ChainStateValidationError, err) {
|
||||
return err
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -104,7 +100,7 @@ func (s *PeerStatusScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -140,12 +140,12 @@ func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
pid := peer.ID("peer1")
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
}
|
||||
|
||||
func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
@@ -155,22 +155,22 @@ func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
pid1 := peer.ID("peer1")
|
||||
pid2 := peer.ID("peer2")
|
||||
pid3 := peer.ID("peer3")
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.NoError(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.NotNil(t, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().PeerStatusScorer().BadPeers()))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
)
|
||||
@@ -25,7 +24,7 @@ const BadPeerScore = gossipThreshold
|
||||
// Scorer defines minimum set of methods every peer scorer must expose.
|
||||
type Scorer interface {
|
||||
Score(pid peer.ID) float64
|
||||
IsBadPeer(pid peer.ID) error
|
||||
IsBadPeer(pid peer.ID) bool
|
||||
BadPeers() []peer.ID
|
||||
}
|
||||
|
||||
@@ -125,29 +124,26 @@ func (s *Service) ScoreNoLock(pid peer.ID) float64 {
|
||||
}
|
||||
|
||||
// IsBadPeer traverses all the scorers to see if any of them classifies peer as bad.
|
||||
func (s *Service) IsBadPeer(pid peer.ID) error {
|
||||
func (s *Service) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.IsBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// IsBadPeerNoLock is a lock-free version of IsBadPeer.
|
||||
func (s *Service) IsBadPeerNoLock(pid peer.ID) error {
|
||||
if err := s.scorers.badResponsesScorer.isBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "bad responses scorer")
|
||||
func (s *Service) IsBadPeerNoLock(pid peer.ID) bool {
|
||||
if s.scorers.badResponsesScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
}
|
||||
|
||||
if err := s.scorers.peerStatusScorer.isBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "peer status scorer")
|
||||
if s.scorers.peerStatusScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
}
|
||||
|
||||
if features.Get().EnablePeerScorer {
|
||||
if err := s.scorers.gossipScorer.isBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "gossip scorer")
|
||||
if s.scorers.gossipScorer.isBadPeerNoLock(pid) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad by any of registered scorers.
|
||||
@@ -157,7 +153,7 @@ func (s *Service) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.IsBadPeerNoLock(pid) != nil {
|
||||
if s.IsBadPeerNoLock(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -237,7 +237,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
for i := 0; i < s1.Params().Threshold+5; i++ {
|
||||
s1.Increment(pid1)
|
||||
}
|
||||
assert.NotNil(t, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
assert.Equal(t, true, s1.IsBadPeer(pid1), "Peer should be marked as bad")
|
||||
|
||||
s2.IncrementProcessedBlocks("peer1", 221)
|
||||
assert.Equal(t, uint64(221), s2.ProcessedBlocks("peer1"))
|
||||
@@ -252,7 +252,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if s1.IsBadPeer(pid1) == nil && s2.ProcessedBlocks("peer1") == 0 {
|
||||
if s1.IsBadPeer(pid1) == false && s2.ProcessedBlocks("peer1") == 0 {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
@@ -263,7 +263,7 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
}()
|
||||
|
||||
<-done
|
||||
assert.NoError(t, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, false, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
}
|
||||
|
||||
@@ -278,10 +278,10 @@ func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
}
|
||||
|
||||
func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
@@ -295,16 +295,16 @@ func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.NoError(t, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.NotNil(t, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
|
||||
@@ -34,7 +34,6 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
@@ -344,29 +343,19 @@ func (p *Status) ChainStateLastUpdated(pid peer.ID) (time.Time, error) {
|
||||
|
||||
// IsBad states if the peer is to be considered bad (by *any* of the registered scorers).
|
||||
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
|
||||
func (p *Status) IsBad(pid peer.ID) error {
|
||||
func (p *Status) IsBad(pid peer.ID) bool {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
|
||||
return p.isBad(pid)
|
||||
}
|
||||
|
||||
// isBad is the lock-free version of IsBad.
|
||||
func (p *Status) isBad(pid peer.ID) error {
|
||||
func (p *Status) isBad(pid peer.ID) bool {
|
||||
// Do not disconnect from trusted peers.
|
||||
if p.store.IsTrustedPeer(pid) {
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
|
||||
if err := p.isfromBadIP(pid); err != nil {
|
||||
return errors.Wrap(err, "peer is from a bad IP")
|
||||
}
|
||||
|
||||
if err := p.scorers.IsBadPeerNoLock(pid); err != nil {
|
||||
return errors.Wrap(err, "is bad peer no lock")
|
||||
}
|
||||
|
||||
return nil
|
||||
return p.isfromBadIP(pid) || p.scorers.IsBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// NextValidTime gets the earliest possible time it is to contact/dial
|
||||
@@ -611,7 +600,7 @@ func (p *Status) Prune() {
|
||||
return
|
||||
}
|
||||
notBadPeer := func(pid peer.ID) bool {
|
||||
return p.isBad(pid) == nil
|
||||
return !p.isBad(pid)
|
||||
}
|
||||
notTrustedPeer := func(pid peer.ID) bool {
|
||||
return !p.isTrustedPeers(pid)
|
||||
@@ -1001,28 +990,24 @@ func (p *Status) isTrustedPeers(pid peer.ID) bool {
|
||||
|
||||
// this method assumes the store lock is acquired before
|
||||
// executing the method.
|
||||
func (p *Status) isfromBadIP(pid peer.ID) error {
|
||||
func (p *Status) isfromBadIP(pid peer.ID) bool {
|
||||
peerData, ok := p.store.PeerData(pid)
|
||||
if !ok {
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
|
||||
if peerData.Address == nil {
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
|
||||
ip, err := manet.ToIP(peerData.Address)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "to ip")
|
||||
return true
|
||||
}
|
||||
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > CollocationLimit {
|
||||
return errors.Errorf("colocation limit exceeded: got %d - limit %d", val, CollocationLimit)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Status) addIpToTracker(pid peer.ID) {
|
||||
|
||||
@@ -347,7 +347,7 @@ func TestPeerBadResponses(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
address, err := ma.NewMultiaddr("/ip4/213.202.254.180/tcp/13000")
|
||||
require.NoError(t, err, "Failed to create address")
|
||||
@@ -358,25 +358,25 @@ func TestPeerBadResponses(t *testing.T) {
|
||||
resBadResponses, err := scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, resBadResponses, "Unexpected bad responses")
|
||||
assert.NoError(t, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
assert.Equal(t, false, p.IsBad(id), "Peer marked as bad when should be good")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
|
||||
scorer.Increment(id)
|
||||
resBadResponses, err = scorer.Count(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, resBadResponses, "Unexpected bad responses")
|
||||
assert.NotNil(t, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
assert.Equal(t, true, p.IsBad(id), "Peer not marked as bad when it should be")
|
||||
}
|
||||
|
||||
func TestAddMetaData(t *testing.T) {
|
||||
@@ -574,7 +574,7 @@ func TestPeerIPTracker(t *testing.T) {
|
||||
badPeers = append(badPeers, createPeer(t, p, addr, network.DirUnknown, peerdata.PeerConnectionState(ethpb.ConnectionState_DISCONNECTED)))
|
||||
}
|
||||
for _, pr := range badPeers {
|
||||
assert.NotNil(t, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
assert.Equal(t, true, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
}
|
||||
|
||||
// Add in bad peers, so that our records are trimmed out
|
||||
@@ -587,7 +587,7 @@ func TestPeerIPTracker(t *testing.T) {
|
||||
p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.NoError(t, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,27 +10,16 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ pubsub.SubscriptionFilter = (*Service)(nil)
|
||||
|
||||
// It is set at this limit to handle the possibility
|
||||
// of double topic subscriptions at fork boundaries.
|
||||
// -> BeaconBlock * 2 = 2
|
||||
// -> BeaconAggregateAndProof * 2 = 2
|
||||
// -> VoluntaryExit * 2 = 2
|
||||
// -> ProposerSlashing * 2 = 2
|
||||
// -> AttesterSlashing * 2 = 2
|
||||
// -> 64 Beacon Attestation * 2 = 128
|
||||
// -> SyncContributionAndProof * 2 = 2
|
||||
// -> 4 SyncCommitteeSubnets * 2 = 8
|
||||
// -> BlsToExecutionChange * 2 = 2
|
||||
// -> 128 DataColumnSidecar * 2 = 256
|
||||
// -------------------------------------
|
||||
// TOTAL = 406
|
||||
// (Note: BlobSidecar is not included in this list since it is superseded by DataColumnSidecar)
|
||||
const pubsubSubscriptionRequestLimit = 500
|
||||
// -> 64 Attestation Subnets * 2.
|
||||
// -> 4 Sync Committee Subnets * 2.
|
||||
// -> Block,Aggregate,ProposerSlashing,AttesterSlashing,Exits,SyncContribution * 2.
|
||||
const pubsubSubscriptionRequestLimit = 200
|
||||
|
||||
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
|
||||
func (s *Service) CanSubscribe(topic string) bool {
|
||||
@@ -106,15 +95,8 @@ func (s *Service) CanSubscribe(topic string) bool {
|
||||
// FilterIncomingSubscriptions is invoked for all RPCs containing subscription notifications.
|
||||
// This method returns only the topics of interest and may return an error if the subscription
|
||||
// request contains too many topics.
|
||||
func (s *Service) FilterIncomingSubscriptions(peerID peer.ID, subs []*pubsubpb.RPC_SubOpts) ([]*pubsubpb.RPC_SubOpts, error) {
|
||||
func (s *Service) FilterIncomingSubscriptions(_ peer.ID, subs []*pubsubpb.RPC_SubOpts) ([]*pubsubpb.RPC_SubOpts, error) {
|
||||
if len(subs) > pubsubSubscriptionRequestLimit {
|
||||
subsCount := len(subs)
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": peerID,
|
||||
"subscriptionCounts": subsCount,
|
||||
"subscriptionLimit": pubsubSubscriptionRequestLimit,
|
||||
}).Debug("Too many incoming subscriptions, filtering them")
|
||||
|
||||
return nil, pubsub.ErrTooManySubscriptions
|
||||
}
|
||||
|
||||
|
||||
@@ -27,12 +27,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
prysmnetwork "github.com/prysmaticlabs/prysm/v5/network"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var _ runtime.Service = (*Service)(nil)
|
||||
@@ -43,10 +43,6 @@ var _ runtime.Service = (*Service)(nil)
|
||||
// defined below.
|
||||
var pollingPeriod = 6 * time.Second
|
||||
|
||||
// When looking for new nodes, if not enough nodes are found,
|
||||
// we stop after this amount of iterations.
|
||||
var batchSize = 2_000
|
||||
|
||||
// Refresh rate of ENR set at twice per slot.
|
||||
var refreshRate = slots.DivideSlotBy(2)
|
||||
|
||||
@@ -206,13 +202,12 @@ func (s *Service) Start() {
|
||||
s.startupErr = err
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.connectToBootnodes(); err != nil {
|
||||
log.WithError(err).Error("Could not connect to boot nodes")
|
||||
err = s.connectToBootnodes()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not add bootnode to the exclusion list")
|
||||
s.startupErr = err
|
||||
return
|
||||
}
|
||||
|
||||
s.dv5Listener = listener
|
||||
go s.listenForNewNodes()
|
||||
}
|
||||
@@ -398,17 +393,12 @@ func (s *Service) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) er
|
||||
s.pingMethodLock.Unlock()
|
||||
}
|
||||
|
||||
func (s *Service) pingPeersAndLogEnr() {
|
||||
func (s *Service) pingPeers() {
|
||||
s.pingMethodLock.RLock()
|
||||
defer s.pingMethodLock.RUnlock()
|
||||
|
||||
localENR := s.dv5Listener.Self()
|
||||
log.WithField("ENR", localENR).Info("New node record")
|
||||
|
||||
if s.pingMethod == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, pid := range s.peers.Connected() {
|
||||
go func(id peer.ID) {
|
||||
if err := s.pingMethod(s.ctx, id); err != nil {
|
||||
@@ -481,8 +471,8 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
|
||||
if info.ID == s.host.ID() {
|
||||
return nil
|
||||
}
|
||||
if err := s.Peers().IsBad(info.ID); err != nil {
|
||||
return errors.Wrap(err, "refused to connect to bad peer")
|
||||
if s.Peers().IsBad(info.ID) {
|
||||
return errors.New("refused to connect to bad peer")
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
|
||||
defer cancel()
|
||||
|
||||
@@ -201,11 +201,11 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Use shorter batch size for testing.
|
||||
currentBatchSize := batchSize
|
||||
batchSize = 5
|
||||
// Use shorter period for testing.
|
||||
currentPeriod := pollingPeriod
|
||||
pollingPeriod = 1 * time.Second
|
||||
defer func() {
|
||||
batchSize = currentBatchSize
|
||||
pollingPeriod = currentPeriod
|
||||
}()
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
|
||||
@@ -2,7 +2,6 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -12,6 +11,8 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
@@ -21,9 +22,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
mathutil "github.com/prysmaticlabs/prysm/v5/math"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
|
||||
@@ -54,79 +54,6 @@ const blobSubnetLockerVal = 110
|
||||
// chosen more than sync, attestation and blob subnet (6) combined.
|
||||
const dataColumnSubnetVal = 150
|
||||
|
||||
// nodeFilter return a function that filters nodes based on the subnet topic and subnet index.
|
||||
func (s *Service) nodeFilter(topic string, index uint64) (func(node *enode.Node) bool, error) {
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
return s.filterPeerForAttSubnet(index), nil
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
return s.filterPeerForSyncSubnet(index), nil
|
||||
case strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
return s.filterPeerForDataColumnsSubnet(index), nil
|
||||
default:
|
||||
return nil, errors.Errorf("no subnet exists for provided topic: %s", topic)
|
||||
}
|
||||
}
|
||||
|
||||
// searchForPeers performs a network search for peers subscribed to a particular subnet.
|
||||
// It exits as soon as one of these conditions is met:
|
||||
// - It looped through `batchSize` nodes.
|
||||
// - It found `peersToFindCount“ peers corresponding to the `filter` criteria.
|
||||
// - Iterator is exhausted.
|
||||
func searchForPeers(
|
||||
iterator enode.Iterator,
|
||||
batchSize int,
|
||||
peersToFindCount uint,
|
||||
filter func(node *enode.Node) bool,
|
||||
) []*enode.Node {
|
||||
nodeFromNodeID := make(map[enode.ID]*enode.Node, batchSize)
|
||||
for i := 0; i < batchSize && uint(len(nodeFromNodeID)) <= peersToFindCount && iterator.Next(); i++ {
|
||||
node := iterator.Node()
|
||||
|
||||
// Filter out nodes that do not meet the criteria.
|
||||
if !filter(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
prevNode, ok := nodeFromNodeID[node.ID()]
|
||||
if ok && prevNode.Seq() > node.Seq() {
|
||||
continue
|
||||
}
|
||||
|
||||
nodeFromNodeID[node.ID()] = node
|
||||
}
|
||||
|
||||
// Convert the map to a slice.
|
||||
nodes := make([]*enode.Node, 0, len(nodeFromNodeID))
|
||||
for _, node := range nodeFromNodeID {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// dialPeer dials a peer in a separate goroutine.
|
||||
func (s *Service) dialPeer(ctx context.Context, wg *sync.WaitGroup, node *enode.Node) {
|
||||
info, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
return
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := s.connectWithPeer(ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet performs a network search for peers
|
||||
// subscribed to a particular subnet. Then it tries to connect
|
||||
// with those peers. This method will block until either:
|
||||
@@ -135,104 +62,69 @@ func (s *Service) dialPeer(ctx context.Context, wg *sync.WaitGroup, node *enode.
|
||||
// On some edge cases, this method may hang indefinitely while peers
|
||||
// are actually found. In such a case, the user should cancel the context
|
||||
// and re-run the method again.
|
||||
func (s *Service) FindPeersWithSubnet(
|
||||
ctx context.Context,
|
||||
topic string,
|
||||
index uint64,
|
||||
threshold int,
|
||||
) (bool, error) {
|
||||
const minLogInterval = 1 * time.Minute
|
||||
|
||||
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
index uint64, threshold int) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("index", int64(index))) // lint:ignore uintcast -- It's safe to do this for tracing.
|
||||
|
||||
if s.dv5Listener == nil {
|
||||
// Return if discovery isn't set
|
||||
// return if discovery isn't set
|
||||
return false, nil
|
||||
}
|
||||
|
||||
topic += s.Encoding().ProtocolSuffix()
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
defer iterator.Close()
|
||||
|
||||
filter, err := s.nodeFilter(topic, index)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "node filter")
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index))
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(index))
|
||||
case strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForDataColumnsSubnet(index))
|
||||
default:
|
||||
return false, errors.Errorf("no subnet exists for provided topic: %s", topic)
|
||||
}
|
||||
|
||||
peersSummary := func(topic string, threshold int) (int, int) {
|
||||
// Retrieve how many peers we have for this topic.
|
||||
peerCountForTopic := len(s.pubsub.ListPeers(topic))
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
missingPeerCountForTopic := max(0, threshold-peerCountForTopic)
|
||||
|
||||
return peerCountForTopic, missingPeerCountForTopic
|
||||
}
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
|
||||
// Exit early if we have enough peers.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"targetPeerCount": threshold,
|
||||
})
|
||||
|
||||
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - start")
|
||||
|
||||
lastLogTime := time.Now()
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
for {
|
||||
// If the context is done, we can exit the loop. This is the unhappy path.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return false, errors.Errorf(
|
||||
"unable to find requisite number of peers for topic %s - only %d out of %d peers available after searching",
|
||||
topic, peerCountForTopic, threshold,
|
||||
)
|
||||
}
|
||||
|
||||
// Search for new peers in the network.
|
||||
nodes := searchForPeers(iterator, batchSize, uint(missingPeerCountForTopic), filter)
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials = flags.Get().MaxConcurrentDials
|
||||
}
|
||||
|
||||
// Dial the peers in batches.
|
||||
for start := 0; start < len(nodes); start += maxConcurrentDials {
|
||||
stop := min(start+maxConcurrentDials, len(nodes))
|
||||
for _, node := range nodes[start:stop] {
|
||||
s.dialPeer(ctx, wg, node)
|
||||
}
|
||||
|
||||
// Wait for all dials to be completed.
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
peerCountForTopic, missingPeerCountForTopic := peersSummary(topic, threshold)
|
||||
|
||||
// If we have enough peers, we can exit the loop. This is the happy path.
|
||||
if missingPeerCountForTopic == 0 {
|
||||
currNum := len(s.pubsub.ListPeers(topic))
|
||||
if currNum >= threshold {
|
||||
break
|
||||
}
|
||||
|
||||
if time.Since(lastLogTime) > minLogInterval {
|
||||
lastLogTime = time.Now()
|
||||
log.WithField("currentPeerCount", peerCountForTopic).Debug("Searching for new peers for a subnet - continue")
|
||||
if err := ctx.Err(); err != nil {
|
||||
return false, errors.Errorf("unable to find requisite number of peers for topic %s - "+
|
||||
"only %d out of %d peers were able to be found", topic, currNum, threshold)
|
||||
}
|
||||
}
|
||||
nodeCount := int(params.BeaconNetworkConfig().MinimumPeersInSubnetSearch)
|
||||
// Restrict dials if limit is applied.
|
||||
if flags.MaxDialIsActive() {
|
||||
nodeCount = min(nodeCount, flags.Get().MaxConcurrentDials)
|
||||
}
|
||||
nodes := enode.ReadNodes(iterator, nodeCount)
|
||||
for _, node := range nodes {
|
||||
info, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("currentPeerCount", threshold).Debug("Searching for new peers for a subnet - success")
|
||||
if info == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := s.connectWithPeer(ctx, *info); err != nil {
|
||||
log.WithError(err).Tracef("Could not connect with peer %s", info.String())
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
// Wait for all dials to be completed.
|
||||
wg.Wait()
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -292,17 +184,11 @@ func (s *Service) filterPeerForDataColumnsSubnet(index uint64) func(node *enode.
|
||||
// lower threshold to broadcast object compared to searching
|
||||
// for a subnet. So that even in the event of poor peer
|
||||
// connectivity, we can still broadcast an attestation.
|
||||
func (s *Service) hasPeerWithSubnet(subnetTopic string) bool {
|
||||
func (s *Service) hasPeerWithSubnet(topic string) bool {
|
||||
// In the event peer threshold is lower, we will choose the lower
|
||||
// threshold.
|
||||
minPeers := min(1, flags.Get().MinimumPeersPerSubnet)
|
||||
topic := subnetTopic + s.Encoding().ProtocolSuffix()
|
||||
peersWithSubnet := s.pubsub.ListPeers(topic)
|
||||
peersWithSubnetCount := len(peersWithSubnet)
|
||||
|
||||
enoughPeers := peersWithSubnetCount >= minPeers
|
||||
|
||||
return enoughPeers
|
||||
minPeers := mathutil.Min(1, uint64(flags.Get().MinimumPeersPerSubnet))
|
||||
return len(s.pubsub.ListPeers(topic+s.Encoding().ProtocolSuffix())) >= int(minPeers) // lint:ignore uintcast -- Min peers can be safely cast to int.
|
||||
}
|
||||
|
||||
// Updates the service's discv5 listener record's attestation subnet
|
||||
@@ -354,12 +240,13 @@ func (s *Service) updateSubnetRecordWithMetadataV3(
|
||||
localNode.Set(custodySubnetCountEntry)
|
||||
|
||||
newSeqNumber := s.metaData.SequenceNumber() + 1
|
||||
cscBytes := []byte{uint8(custodySubnetCount)}
|
||||
|
||||
s.metaData = wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
SeqNumber: newSeqNumber,
|
||||
Attnets: bitVAtt,
|
||||
Syncnets: bitVSync,
|
||||
CustodySubnetCount: custodySubnetCount,
|
||||
CustodySubnetCount: cscBytes,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -377,29 +264,22 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// initializePersistentColumnSubnets initialize persisten column subnets
|
||||
func initializePersistentColumnSubnets(id enode.ID) error {
|
||||
// Check if the column subnets are already cached.
|
||||
_, ok, expTime := cache.ColumnSubnetIDs.GetColumnSubnets()
|
||||
if ok && expTime.After(time.Now()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the subnets we should be subscribed to.
|
||||
subnetSamplingSize := peerdas.SubnetSamplingSize()
|
||||
subnetsMap, err := peerdas.CustodyColumnSubnets(id, subnetSamplingSize)
|
||||
subsMap, err := peerdas.CustodyColumnSubnets(id, peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody column subnets")
|
||||
return err
|
||||
}
|
||||
|
||||
subnets := make([]uint64, 0, len(subnetsMap))
|
||||
for subnet := range subnetsMap {
|
||||
subnets = append(subnets, subnet)
|
||||
subs := make([]uint64, 0, len(subsMap))
|
||||
for sub := range subsMap {
|
||||
subs = append(subs, sub)
|
||||
}
|
||||
|
||||
// Add the subnets to the cache.
|
||||
cache.ColumnSubnetIDs.AddColumnSubnets(subnets)
|
||||
|
||||
cache.ColumnSubnetIDs.AddColumnSubnets(subs)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -290,7 +290,7 @@ func (*TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
// AddConnectionHandler handles the connection with a newly connected peer.
|
||||
func (p *TestP2P) AddConnectionHandler(f, _ func(ctx context.Context, id peer.ID) error) {
|
||||
p.BHost.Network().Notify(&network.NotifyBundle{
|
||||
ConnectedF: func(_ network.Network, conn network.Conn) {
|
||||
ConnectedF: func(net network.Network, conn network.Conn) {
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
p.peers.Add(new(enr.Record), conn.RemotePeer(), conn.RemoteMultiaddr(), conn.Stat().Direction)
|
||||
@@ -314,7 +314,7 @@ func (p *TestP2P) AddConnectionHandler(f, _ func(ctx context.Context, id peer.ID
|
||||
// AddDisconnectionHandler --
|
||||
func (p *TestP2P) AddDisconnectionHandler(f func(ctx context.Context, id peer.ID) error) {
|
||||
p.BHost.Network().Notify(&network.NotifyBundle{
|
||||
DisconnectedF: func(_ network.Network, conn network.Conn) {
|
||||
DisconnectedF: func(net network.Network, conn network.Conn) {
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
p.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnecting)
|
||||
|
||||
@@ -815,7 +815,6 @@ func (s *Service) lightClientEndpoints(blocker lookup.Blocker, stater lookup.Sta
|
||||
Blocker: blocker,
|
||||
Stater: stater,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
}
|
||||
|
||||
const namespace = "lightclient"
|
||||
|
||||
@@ -286,7 +286,7 @@ func (s *Server) PublishBlindedBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) publishBlindedBlockSSZ(ctx context.Context, w http.ResponseWriter, r *http.Request, versionRequired bool) { // nolint:gocognit
|
||||
func (s *Server) publishBlindedBlockSSZ(ctx context.Context, w http.ResponseWriter, r *http.Request, versionRequired bool) {
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not read request body: "+err.Error(), http.StatusInternalServerError)
|
||||
@@ -297,29 +297,6 @@ func (s *Server) publishBlindedBlockSSZ(ctx context.Context, w http.ResponseWrit
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
electraBlock := ð.SignedBlindedBeaconBlockElectra{}
|
||||
if err = electraBlock.UnmarshalSSZ(body); err == nil {
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_BlindedElectra{
|
||||
BlindedElectra: electraBlock,
|
||||
},
|
||||
}
|
||||
if err = s.validateBroadcast(ctx, r, genericBlock); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, genericBlock)
|
||||
return
|
||||
}
|
||||
if versionHeader == version.String(version.Electra) {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Electra), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
denebBlock := ð.SignedBlindedBeaconBlockDeneb{}
|
||||
if err = denebBlock.UnmarshalSSZ(body); err == nil {
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
@@ -438,7 +415,7 @@ func (s *Server) publishBlindedBlockSSZ(ctx context.Context, w http.ResponseWrit
|
||||
httputil.HandleError(w, "Body does not represent a valid block type", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
func (s *Server) publishBlindedBlock(ctx context.Context, w http.ResponseWriter, r *http.Request, versionRequired bool) { // nolint:gocognit
|
||||
func (s *Server) publishBlindedBlock(ctx context.Context, w http.ResponseWriter, r *http.Request, versionRequired bool) {
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not read request body", http.StatusInternalServerError)
|
||||
@@ -451,27 +428,6 @@ func (s *Server) publishBlindedBlock(ctx context.Context, w http.ResponseWriter,
|
||||
|
||||
var consensusBlock *eth.GenericSignedBeaconBlock
|
||||
|
||||
var electraBlock *structs.SignedBlindedBeaconBlockElectra
|
||||
if err = unmarshalStrict(body, &electraBlock); err == nil {
|
||||
consensusBlock, err = electraBlock.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
if versionHeader == version.String(version.Electra) {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Electra), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
var denebBlock *structs.SignedBlindedBeaconBlockDeneb
|
||||
if err = unmarshalStrict(body, &denebBlock); err == nil {
|
||||
consensusBlock, err = denebBlock.ToGeneric()
|
||||
@@ -623,7 +579,7 @@ func (s *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *http.Request, versionRequired bool) { // nolint:gocognit
|
||||
func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *http.Request, versionRequired bool) {
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not read request body", http.StatusInternalServerError)
|
||||
@@ -635,39 +591,6 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
return
|
||||
}
|
||||
|
||||
electraBlock := ð.SignedBeaconBlockContentsElectra{}
|
||||
if err = electraBlock.UnmarshalSSZ(body); err == nil {
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Electra{
|
||||
Electra: electraBlock,
|
||||
},
|
||||
}
|
||||
if err = s.validateBroadcast(ctx, r, genericBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := s.broadcastSeenBlockSidecars(ctx, b, genericBlock.GetElectra().Blobs, genericBlock.GetElectra().KzgProofs); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecars")
|
||||
}
|
||||
}
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, genericBlock)
|
||||
return
|
||||
}
|
||||
if versionHeader == version.String(version.Electra) {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Electra), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
denebBlock := ð.SignedBeaconBlockContentsDeneb{}
|
||||
if err = denebBlock.UnmarshalSSZ(body); err == nil {
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
@@ -796,7 +719,7 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
httputil.HandleError(w, "Body does not represent a valid block type", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *http.Request, versionRequired bool) { // nolint:gocognit
|
||||
func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *http.Request, versionRequired bool) {
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not read request body", http.StatusInternalServerError)
|
||||
@@ -810,37 +733,6 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
|
||||
var consensusBlock *eth.GenericSignedBeaconBlock
|
||||
|
||||
var electraBlockContents *structs.SignedBeaconBlockContentsElectra
|
||||
if err = unmarshalStrict(body, &electraBlockContents); err == nil {
|
||||
consensusBlock, err = electraBlockContents.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(consensusBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := s.broadcastSeenBlockSidecars(ctx, b, consensusBlock.GetElectra().Blobs, consensusBlock.GetElectra().KzgProofs); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecars")
|
||||
}
|
||||
}
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
if versionHeader == version.String(version.Electra) {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Electra), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
var denebBlockContents *structs.SignedBeaconBlockContentsDeneb
|
||||
if err = unmarshalStrict(body, &denebBlockContents); err == nil {
|
||||
consensusBlock, err = denebBlockContents.ToGeneric()
|
||||
|
||||
@@ -1071,29 +1071,6 @@ func TestPublishBlock(t *testing.T) {
|
||||
server.PublishBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
block, ok := req.Block.(*eth.GenericSignedBeaconBlock_Electra)
|
||||
converted, err := structs.SignedBeaconBlockContentsElectraFromConsensus(block.Electra)
|
||||
require.NoError(t, err)
|
||||
var signedblock *structs.SignedBeaconBlockContentsElectra
|
||||
err = json.Unmarshal([]byte(rpctesting.ElectraBlockContents), &signedblock)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, converted, signedblock)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(rpctesting.ElectraBlockContents)))
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Electra))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -1277,32 +1254,6 @@ func TestPublishBlockSSZ(t *testing.T) {
|
||||
server.PublishBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_Electra)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var blk structs.SignedBeaconBlockContentsElectra
|
||||
err := json.Unmarshal([]byte(rpctesting.ElectraBlockContents), &blk)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := blk.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
ssz, err := genericBlock.GetElectra().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Electra))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -1482,30 +1433,6 @@ func TestPublishBlindedBlock(t *testing.T) {
|
||||
server.PublishBlindedBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Blinded Electra", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
block, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedElectra)
|
||||
converted, err := structs.BlindedBeaconBlockElectraFromConsensus(block.BlindedElectra.Message)
|
||||
require.NoError(t, err)
|
||||
var signedblock *structs.SignedBlindedBeaconBlockElectra
|
||||
err = json.Unmarshal([]byte(rpctesting.BlindedElectraBlock), &signedblock)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, converted, signedblock.Message)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(rpctesting.BlindedElectraBlock)))
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Electra))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -1690,32 +1617,6 @@ func TestPublishBlindedBlockSSZ(t *testing.T) {
|
||||
server.PublishBlindedBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedElectra)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var blk structs.SignedBlindedBeaconBlockElectra
|
||||
err := json.Unmarshal([]byte(rpctesting.BlindedElectraBlock), &blk)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := blk.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
ssz, err := genericBlock.GetBlindedElectra().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Electra))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -1887,30 +1788,6 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
server.PublishBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
block, ok := req.Block.(*eth.GenericSignedBeaconBlock_Electra)
|
||||
converted, err := structs.SignedBeaconBlockContentsElectraFromConsensus(block.Electra)
|
||||
require.NoError(t, err)
|
||||
var signedblock *structs.SignedBeaconBlockContentsElectra
|
||||
err = json.Unmarshal([]byte(rpctesting.ElectraBlockContents), &signedblock)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, converted, signedblock)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(rpctesting.ElectraBlockContents)))
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Electra))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -2107,32 +1984,6 @@ func TestPublishBlockV2SSZ(t *testing.T) {
|
||||
server.PublishBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_Electra)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var blk structs.SignedBeaconBlockContentsElectra
|
||||
err := json.Unmarshal([]byte(rpctesting.ElectraBlockContents), &blk)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := blk.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
ssz, err := genericBlock.GetElectra().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Electra))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -2325,30 +2176,6 @@ func TestPublishBlindedBlockV2(t *testing.T) {
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Blinded Electra", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
block, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedElectra)
|
||||
converted, err := structs.BlindedBeaconBlockElectraFromConsensus(block.BlindedElectra.Message)
|
||||
require.NoError(t, err)
|
||||
var signedblock *structs.SignedBlindedBeaconBlockElectra
|
||||
err = json.Unmarshal([]byte(rpctesting.BlindedElectraBlock), &signedblock)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, converted, signedblock.Message)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(rpctesting.BlindedElectraBlock)))
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Electra))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlockV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -2545,32 +2372,6 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) {
|
||||
server.PublishBlindedBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
_, ok := req.Block.(*eth.GenericSignedBeaconBlock_BlindedElectra)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var blk structs.SignedBlindedBeaconBlockElectra
|
||||
err := json.Unmarshal([]byte(rpctesting.BlindedElectraBlock), &blk)
|
||||
require.NoError(t, err)
|
||||
genericBlock, err := blk.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
ssz, err := genericBlock.GetBlindedElectra().MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(ssz))
|
||||
request.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Electra))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlindedBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("invalid block", func(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
|
||||
@@ -20,7 +20,6 @@ go_library(
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
@@ -29,6 +28,7 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -19,13 +19,13 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/httputil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -12,8 +12,6 @@ go_library(
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -43,9 +41,9 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -73,7 +73,7 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
resp := &structs.LightClientBootstrapResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, "capella", resp.Version)
|
||||
require.Equal(t, hexutil.Encode(header.Header.BodyRoot), resp.Data.Header.Beacon.BodyRoot)
|
||||
require.Equal(t, hexutil.Encode(header.Header.BodyRoot), resp.Data.Header.BodyRoot)
|
||||
require.NotNil(t, resp.Data)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,10 +6,9 @@ import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -67,20 +66,17 @@ func createLightClientBootstrap(ctx context.Context, state state.BeaconState) (*
|
||||
branch[i] = hexutil.Encode(proof)
|
||||
}
|
||||
|
||||
beacon := structs.BeaconBlockHeaderFromConsensus(latestBlockHeader)
|
||||
if beacon == nil {
|
||||
header := structs.BeaconBlockHeaderFromConsensus(latestBlockHeader)
|
||||
if header == nil {
|
||||
return nil, fmt.Errorf("could not get beacon block header")
|
||||
}
|
||||
header := &structs.LightClientHeader{
|
||||
Beacon: beacon,
|
||||
}
|
||||
|
||||
// Above shared util function won't calculate state root, so we need to do it manually
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get state root: %s", err.Error())
|
||||
}
|
||||
header.Beacon.StateRoot = hexutil.Encode(stateRoot[:])
|
||||
header.StateRoot = hexutil.Encode(stateRoot[:])
|
||||
|
||||
// Return result
|
||||
result := &structs.LightClientBootstrap{
|
||||
@@ -156,7 +152,7 @@ func createLightClientUpdate(
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (*structs.LightClientUpdate, error) {
|
||||
result, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, state, block, attestedState, finalizedBlock)
|
||||
result, err := blockchain.NewLightClientFinalityUpdateFromBeaconState(ctx, state, block, attestedState, finalizedBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -214,7 +210,7 @@ func newLightClientFinalityUpdateFromBeaconState(
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (*structs.LightClientUpdate, error) {
|
||||
result, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(ctx, state, block, attestedState, finalizedBlock)
|
||||
result, err := blockchain.NewLightClientFinalityUpdateFromBeaconState(ctx, state, block, attestedState, finalizedBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -227,7 +223,7 @@ func newLightClientOptimisticUpdateFromBeaconState(
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState) (*structs.LightClientUpdate, error) {
|
||||
result, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(ctx, state, block, attestedState)
|
||||
result, err := blockchain.NewLightClientOptimisticUpdateFromBeaconState(ctx, state, block, attestedState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -240,7 +236,7 @@ func NewLightClientBootstrapFromJSON(bootstrapJSON *structs.LightClientBootstrap
|
||||
|
||||
var err error
|
||||
|
||||
v1Alpha1Header, err := bootstrapJSON.Header.Beacon.ToConsensus()
|
||||
v1Alpha1Header, err := bootstrapJSON.Header.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -323,7 +319,7 @@ func IsSyncCommitteeUpdate(update *v2.LightClientUpdate) bool {
|
||||
}
|
||||
|
||||
func IsFinalityUpdate(update *v2.LightClientUpdate) bool {
|
||||
finalityBranch := make([][]byte, lightclient.FinalityBranchNumOfLeaves)
|
||||
finalityBranch := make([][]byte, blockchain.FinalityBranchNumOfLeaves)
|
||||
return !reflect.DeepEqual(update.FinalityBranch, finalityBranch)
|
||||
}
|
||||
|
||||
|
||||
@@ -3,8 +3,7 @@ package lightclient
|
||||
import (
|
||||
"testing"
|
||||
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
@@ -20,7 +19,7 @@ func createNonEmptySyncCommitteeBranch() [][]byte {
|
||||
|
||||
// When the update has finality
|
||||
func createNonEmptyFinalityBranch() [][]byte {
|
||||
res := make([][]byte, lightclient.FinalityBranchNumOfLeaves)
|
||||
res := make([][]byte, blockchain.FinalityBranchNumOfLeaves)
|
||||
res[0] = []byte("xyz")
|
||||
return res
|
||||
}
|
||||
@@ -147,7 +146,7 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
},
|
||||
NextSyncCommitteeBranch: createNonEmptySyncCommitteeBranch(),
|
||||
SignatureSlot: 9999,
|
||||
FinalityBranch: make([][]byte, lightclient.FinalityBranchNumOfLeaves),
|
||||
FinalityBranch: make([][]byte, blockchain.FinalityBranchNumOfLeaves),
|
||||
},
|
||||
newUpdate: ðpbv2.LightClientUpdate{
|
||||
SyncAggregate: ðpbv1.SyncAggregate{
|
||||
@@ -184,7 +183,7 @@ func TestIsBetterUpdate(t *testing.T) {
|
||||
},
|
||||
NextSyncCommitteeBranch: createNonEmptySyncCommitteeBranch(),
|
||||
SignatureSlot: 9999,
|
||||
FinalityBranch: make([][]byte, lightclient.FinalityBranchNumOfLeaves),
|
||||
FinalityBranch: make([][]byte, blockchain.FinalityBranchNumOfLeaves),
|
||||
},
|
||||
expectedResult: false,
|
||||
},
|
||||
|
||||
@@ -2,7 +2,6 @@ package lightclient
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/lookup"
|
||||
)
|
||||
|
||||
@@ -10,5 +9,4 @@ type Server struct {
|
||||
Blocker lookup.Blocker
|
||||
Stater lookup.Stater
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
}
|
||||
|
||||
@@ -3,11 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"json.go",
|
||||
"json_mainnet.go",
|
||||
"json_minimal.go", # keep
|
||||
],
|
||||
srcs = ["json.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared/testing",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user