mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
2 Commits
peerDAS-el
...
expStateHa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ef92530045 | ||
|
|
7879732c55 |
1
.bazelrc
1
.bazelrc
@@ -22,7 +22,6 @@ coverage --define=coverage_enabled=1
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --compilation_mode=opt
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -54,7 +54,7 @@ jobs:
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
version: v1.56.1
|
||||
version: v1.55.2
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
|
||||
@@ -73,7 +73,6 @@ linters:
|
||||
- promlinter
|
||||
- protogetter
|
||||
- revive
|
||||
- spancheck
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- tagalign
|
||||
|
||||
40
CHANGELOG.md
40
CHANGELOG.md
@@ -8,27 +8,16 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
|
||||
### Added
|
||||
|
||||
- Electra EIP6110: Queue deposit [pr](https://github.com/prysmaticlabs/prysm/pull/14430).
|
||||
- Electra EIP6110: Queue deposit [pr](https://github.com/prysmaticlabs/prysm/pull/14430)
|
||||
- Add Bellatrix tests for light client functions.
|
||||
- Add Discovery Rebooter Feature.
|
||||
- Added GetBlockAttestationsV2 endpoint.
|
||||
- Light client support: Consensus types for Electra.
|
||||
- Light client support: Consensus types for Electra
|
||||
- Added SubmitPoolAttesterSlashingV2 endpoint.
|
||||
- Added SubmitAggregateAndProofsRequestV2 endpoint.
|
||||
- Updated the `beacon-chain/monitor` package to Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14562)
|
||||
- Added ListAttestationsV2 endpoint.
|
||||
- Add ability to rollback node's internal state during processing.
|
||||
- Change how unsafe protobuf state is created to prevent unnecessary copies.
|
||||
- Added benchmarks for process slots for Capella, Deneb, Electra.
|
||||
- Add helper to cast bytes to string without allocating memory.
|
||||
- Added GetAggregatedAttestationV2 endpoint.
|
||||
- Added SubmitAttestationsV2 endpoint.
|
||||
- Validator REST mode Electra block support.
|
||||
- Added validator index label to `validator_statuses` metric.
|
||||
- Added Validator REST mode use of Attestation V2 endpoints and Electra attestations.
|
||||
- PeerDAS: Added proto for `DataColumnIdentifier`, `DataColumnSidecar`, `DataColumnSidecarsByRangeRequest` and `MetadataV2`.
|
||||
- Better attestation packing for Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14534)
|
||||
- P2P: Add logs when a peer is (dis)connected. Add the reason of the disconnection when we initiate it.
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -43,28 +32,12 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Use read only validator for core processing to avoid unnecessary copying.
|
||||
- Use ROBlock across block processing pipeline.
|
||||
- Added missing Eth-Consensus-Version headers to GetBlockAttestationsV2 and GetAttesterSlashingsV2 endpoints.
|
||||
- When instantiating new validators, explicit set `Slashed` to false and move `EffectiveBalance` to match struct definition.
|
||||
- Updated pgo profile for beacon chain with holesky data. This improves the profile guided
|
||||
optimizations in the go compiler.
|
||||
- Use read only state when computing the active validator list.
|
||||
- Simplified `ExitedValidatorIndices`.
|
||||
- Simplified `EjectedValidatorIndices`.
|
||||
- `engine_newPayloadV4`,`engine_getPayloadV4` are changes due to new execution request serialization decisions, [PR](https://github.com/prysmaticlabs/prysm/pull/14580)
|
||||
- Fixed various small things in state-native code.
|
||||
- Use ROBlock earlier in block syncing pipeline.
|
||||
- Changed the signature of `ProcessPayload`.
|
||||
- Only Build the Protobuf state once during serialization.
|
||||
- Capella blocks are execution.
|
||||
- Fixed panic when http request to subscribe to event stream fails.
|
||||
- Return early for blob reconstructor during capella fork.
|
||||
- Updated block endpoint from V1 to V2.
|
||||
- Rename instances of "deposit receipts" to "deposit requests".
|
||||
- Non-blocking payload attribute event handling in beacon api [pr](https://github.com/prysmaticlabs/prysm/pull/14644).
|
||||
- Updated light client protobufs. [PR](https://github.com/prysmaticlabs/prysm/pull/14650)
|
||||
- Added `Eth-Consensus-Version` header to `ListAttestationsV2` and `GetAggregateAttestationV2` endpoints.
|
||||
- Updated light client consensus types. [PR](https://github.com/prysmaticlabs/prysm/pull/14652)
|
||||
- Fixed pending deposits processing on Electra.
|
||||
- Modified `ListAttestationsV2`, `GetAttesterSlashingsV2` and `GetAggregateAttestationV2` endpoints to use slot to determine fork version.
|
||||
|
||||
### Deprecated
|
||||
|
||||
@@ -74,8 +47,6 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
|
||||
- Removed finalized validator index cache, no longer needed.
|
||||
- Removed validator queue position log on key reload and wait for activation.
|
||||
- Removed outdated spectest exclusions for EIP-6110.
|
||||
- Removed kzg proof check from blob reconstructor.
|
||||
|
||||
### Fixed
|
||||
|
||||
@@ -90,12 +61,6 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Fix keymanager API so that get keys returns an empty response instead of a 500 error when using an unsupported keystore.
|
||||
- Small log imporvement, removing some redundant or duplicate logs
|
||||
- EIP7521 - Fixes withdrawal bug by accounting for pending partial withdrawals and deducting already withdrawn amounts from the sweep balance. [PR](https://github.com/prysmaticlabs/prysm/pull/14578)
|
||||
- unskip electra merkle spec test
|
||||
- Fix panic in validator REST mode when checking status after removing all keys
|
||||
- Fix panic on attestation interface since we call data before validation
|
||||
- corrects nil check on some interface attestation types
|
||||
- temporary solution to handling electra attesation and attester_slashing events. [pr](14655)
|
||||
- Diverse log improvements and comment additions.
|
||||
|
||||
|
||||
### Security
|
||||
@@ -210,7 +175,6 @@ Updating to this release is recommended at your convenience.
|
||||
- Light client support: fix light client attested header execution fields' wrong version bug.
|
||||
- Testing: added custom matcher for better push settings testing.
|
||||
- Registered `GetDepositSnapshot` Beacon API endpoint.
|
||||
- Fix rolling back of a block due to a context deadline.
|
||||
|
||||
### Security
|
||||
|
||||
|
||||
@@ -93,7 +93,6 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestNewEventStream(t *testing.T) {
|
||||
|
||||
func TestEventStream(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, _ *http.Request) {
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
|
||||
flusher, ok := w.(http.Flusher)
|
||||
require.Equal(t, true, ok)
|
||||
for i := 1; i <= 3; i++ {
|
||||
@@ -79,23 +79,3 @@ func TestEventStream(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventStreamRequestError(t *testing.T) {
|
||||
topics := []string{"head"}
|
||||
eventsChannel := make(chan *Event, 1)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// use valid url that will result in failed request with nil body
|
||||
stream, err := NewEventStream(ctx, http.DefaultClient, "http://badhost:1234", topics)
|
||||
require.NoError(t, err)
|
||||
|
||||
// error will happen when request is made, should be received over events channel
|
||||
go stream.Subscribe(eventsChannel)
|
||||
|
||||
event := <-eventsChannel
|
||||
if event.EventType != EventConnectionError {
|
||||
t.Errorf("Expected event type %q, got %q", EventConnectionError, event.EventType)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ type ListAttestationsResponse struct {
|
||||
}
|
||||
|
||||
type SubmitAttestationsRequest struct {
|
||||
Data json.RawMessage `json:"data"`
|
||||
Data []*Attestation `json:"data"`
|
||||
}
|
||||
|
||||
type ListVoluntaryExitsResponse struct {
|
||||
|
||||
@@ -7,8 +7,7 @@ import (
|
||||
)
|
||||
|
||||
type AggregateAttestationResponse struct {
|
||||
Version string `json:"version,omitempty"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
Data *Attestation `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitContributionAndProofsRequest struct {
|
||||
|
||||
@@ -25,7 +25,6 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -49,7 +48,6 @@ go_library(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -159,7 +157,6 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -33,7 +33,6 @@ var (
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
|
||||
@@ -6,11 +6,8 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
@@ -72,7 +69,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
if arg.attributes == nil {
|
||||
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
|
||||
}
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), arg)
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
|
||||
if err != nil {
|
||||
switch {
|
||||
@@ -171,38 +167,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
return payloadID, nil
|
||||
}
|
||||
|
||||
func firePayloadAttributesEvent(ctx context.Context, f event.SubscriberSender, cfg *fcuConfig) {
|
||||
pidx, err := helpers.BeaconProposerIndex(ctx, cfg.headState)
|
||||
if err != nil {
|
||||
log.WithError(err).
|
||||
WithField("head_root", cfg.headRoot[:]).
|
||||
Error("Could not get proposer index for PayloadAttributes event")
|
||||
return
|
||||
}
|
||||
evd := payloadattribute.EventData{
|
||||
ProposerIndex: pidx,
|
||||
ProposalSlot: cfg.headState.Slot(),
|
||||
ParentBlockRoot: cfg.headRoot[:],
|
||||
Attributer: cfg.attributes,
|
||||
HeadRoot: cfg.headRoot,
|
||||
HeadState: cfg.headState,
|
||||
HeadBlock: cfg.headBlock,
|
||||
}
|
||||
if cfg.headBlock != nil && !cfg.headBlock.IsNil() {
|
||||
headPayload, err := cfg.headBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return
|
||||
}
|
||||
evd.ParentBlockHash = headPayload.BlockHash()
|
||||
evd.ParentBlockNumber = headPayload.BlockNumber()
|
||||
}
|
||||
f.Send(&feed.Event{
|
||||
Type: statefeed.PayloadAttributes,
|
||||
Data: evd,
|
||||
})
|
||||
}
|
||||
|
||||
// getPayloadHash returns the payload hash given the block root.
|
||||
// if the block is before bellatrix fork epoch, it returns the zero hash.
|
||||
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kzg.go",
|
||||
"trusted_setup.go",
|
||||
"validation.go",
|
||||
],
|
||||
@@ -13,9 +12,6 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,109 +0,0 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// BytesPerBlob is the number of bytes in a single blob.
|
||||
const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
|
||||
// Blob represents a serialized chunk of data.
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
|
||||
// Commitment represent a KZG commitment to a Blob.
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
|
||||
// Bytes32 is a 32-byte array.
|
||||
type Bytes32 = ckzg4844.Bytes32
|
||||
|
||||
// CellsAndProofs represents the Cells and Proofs corresponding to
|
||||
// a single blob.
|
||||
type CellsAndProofs struct {
|
||||
Cells []Cell
|
||||
Proofs []Proof
|
||||
}
|
||||
|
||||
func BlobToKZGCommitment(blob *Blob) (Commitment, error) {
|
||||
comm, err := kzg4844.BlobToCommitment(kzg4844.Blob(*blob))
|
||||
if err != nil {
|
||||
return Commitment{}, err
|
||||
}
|
||||
return Commitment(comm), nil
|
||||
}
|
||||
|
||||
func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) {
|
||||
proof, err := kzg4844.ComputeBlobProof(kzg4844.Blob(*blob), kzg4844.Commitment(commitment))
|
||||
if err != nil {
|
||||
return [48]byte{}, err
|
||||
}
|
||||
return Proof(proof), nil
|
||||
}
|
||||
|
||||
func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) {
|
||||
ckzgBlob := (*ckzg4844.Blob)(blob)
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(ckzgBlob)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
for i := range partialCells {
|
||||
ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i])
|
||||
}
|
||||
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
// Convert cells/proofs to the CellsAndProofs type defined in this package.
|
||||
func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) {
|
||||
if len(ckzgCells) != len(ckzgProofs) {
|
||||
return CellsAndProofs{}, errors.New("different number of cells/proofs")
|
||||
}
|
||||
|
||||
var cells []Cell
|
||||
var proofs []Proof
|
||||
for i := range ckzgCells {
|
||||
cells = append(cells, Cell(ckzgCells[i]))
|
||||
proofs = append(proofs, Proof(ckzgProofs[i]))
|
||||
}
|
||||
|
||||
return CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
}, nil
|
||||
}
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
CKZG "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -14,53 +12,17 @@ var (
|
||||
//go:embed trusted_setup.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
kzgLoaded bool
|
||||
)
|
||||
|
||||
type TrustedSetup struct {
|
||||
G1Monomial [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_monomial"`
|
||||
G1Lagrange [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_lagrange"`
|
||||
G2Monomial [65]GoKZG.G2CompressedHexStr `json:"g2_monomial"`
|
||||
}
|
||||
|
||||
func Start() error {
|
||||
trustedSetup := &TrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, trustedSetup)
|
||||
parsedSetup := GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse trusted setup JSON")
|
||||
}
|
||||
kzgContext, err = GoKZG.NewContext4096(&GoKZG.JSONTrustedSetup{
|
||||
SetupG2: trustedSetup.G2Monomial[:],
|
||||
SetupG1Lagrange: trustedSetup.G1Lagrange})
|
||||
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize go-kzg context")
|
||||
}
|
||||
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1MonomialBytes := make([]byte, len(trustedSetup.G1Monomial)*(len(trustedSetup.G1Monomial[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Monomial {
|
||||
copy(g1MonomialBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1LagrangeBytes := make([]byte, len(trustedSetup.G1Lagrange)*(len(trustedSetup.G1Lagrange[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Lagrange {
|
||||
copy(g1LagrangeBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G2 point, converted from hex to binary.
|
||||
g2MonomialBytes := make([]byte, len(trustedSetup.G2Monomial)*(len(trustedSetup.G2Monomial[0])-2)/2)
|
||||
for i, g2 := range &trustedSetup.G2Monomial {
|
||||
copy(g2MonomialBytes[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
|
||||
}
|
||||
if !kzgLoaded {
|
||||
// TODO: Provide a configuration option for this.
|
||||
var precompute uint = 8
|
||||
|
||||
// Free the current trusted setup before running this method. CKZG
|
||||
// panics if the same setup is run multiple times.
|
||||
if err = CKZG.LoadTrustedSetup(g1MonomialBytes, g1LagrangeBytes, g2MonomialBytes, precompute); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
kzgLoaded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -118,9 +118,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Acceser) Option {
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2P = p
|
||||
s.cfg.P2p = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,12 +92,12 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
{
|
||||
name: "process nil attestation",
|
||||
a: nil,
|
||||
wantedErr: "attestation is nil",
|
||||
wantedErr: "attestation can't be nil",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Data) in attestation",
|
||||
a: ðpb.Attestation{},
|
||||
wantedErr: "attestation is nil",
|
||||
wantedErr: "attestation's data can't be nil",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Target) in attestation",
|
||||
|
||||
@@ -3,13 +3,13 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
@@ -76,8 +76,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
|
||||
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.roblock)
|
||||
if err != nil {
|
||||
// Do not use parent context in the event it deadlined
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
s.rollbackBlock(ctx, cfg.roblock.Root())
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.roblock.Block().Slot())
|
||||
}
|
||||
@@ -233,9 +231,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
if err := avs.IsDataAvailable(ctx, nodeID, s.CurrentSlot(), b); err != nil {
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
@@ -507,7 +503,7 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
}
|
||||
indices, err := bs.Indices(root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "indices")
|
||||
return nil, err
|
||||
}
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
for i := range expected {
|
||||
@@ -521,40 +517,12 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
indices, err := bs.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for col := range expected {
|
||||
if !indices[col] {
|
||||
missing[col] = true
|
||||
}
|
||||
}
|
||||
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if coreTime.PeerDASIsActive(signed.Block().Slot()) {
|
||||
return s.areDataColumnsAvailable(ctx, root, signed)
|
||||
}
|
||||
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -584,7 +552,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing indices")
|
||||
return err
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
@@ -603,13 +571,8 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
@@ -631,166 +594,12 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#subnet-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
subnetSamplingSize := peerdas.SubnetSamplingSize()
|
||||
|
||||
colMap, err := peerdas.CustodyColumns(nodeID, subnetSamplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// colMap represents the data columnns a node is expected to custody.
|
||||
if len(colMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe to newsly data columns stored in the database.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
subscription := s.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get the count of data columns we already have in the store.
|
||||
retrievedDataColumns, err := s.blobStorage.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "column indices")
|
||||
}
|
||||
|
||||
retrievedDataColumnsCount := uint64(len(retrievedDataColumns))
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
expected interface{} = "all"
|
||||
missing interface{} = "all"
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
colMapCount := uint64(len(colMap))
|
||||
|
||||
if colMapCount < numberOfColumns {
|
||||
expected = uint64MapToSortedSlice(colMap)
|
||||
}
|
||||
|
||||
if missingMapCount < numberOfColumns {
|
||||
missing = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
}).Error("Some data columns are still unavailable at slot end")
|
||||
})
|
||||
|
||||
defer nst.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case rootIndex := <-rootIndexChan:
|
||||
if rootIndex.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[rootIndex.Index]; ok {
|
||||
retrievedDataColumnsCount++
|
||||
}
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, rootIndex.Index)
|
||||
|
||||
// Exit if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missingMap))
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -809,6 +618,9 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
}
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.MissedSlot,
|
||||
})
|
||||
s.headLock.RLock()
|
||||
headRoot := s.headRoot()
|
||||
headState := s.headState(ctx)
|
||||
@@ -836,13 +648,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: nil,
|
||||
attributes: attribute,
|
||||
}
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), fcuArgs)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -877,7 +682,7 @@ func (s *Service) waitForSync() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte) error {
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
|
||||
@@ -1520,9 +1520,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
@@ -1716,9 +1714,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
@@ -1968,9 +1964,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that the headroot/state are not in DB and restart the node
|
||||
@@ -2352,85 +2346,6 @@ func TestRollbackBlock(t *testing.T) {
|
||||
require.Equal(t, false, hasState)
|
||||
}
|
||||
|
||||
func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 33)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
|
||||
require.Equal(t, true, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err := service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasState)
|
||||
|
||||
// Set deadlined context when processing the block
|
||||
cancCtx, canc := context.WithCancel(context.Background())
|
||||
canc()
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentRoot = roblock.Block().ParentRoot()
|
||||
|
||||
cj := ðpb.Checkpoint{}
|
||||
cj.Epoch = 1
|
||||
cj.Root = parentRoot[:]
|
||||
require.NoError(t, postState.SetCurrentJustifiedCheckpoint(cj))
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err = service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasState)
|
||||
}
|
||||
|
||||
func fakeCommitments(n int) [][]byte {
|
||||
f := make([][]byte, n)
|
||||
for i := range f {
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -53,12 +52,6 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
@@ -77,7 +70,6 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
|
||||
return nil
|
||||
}
|
||||
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
@@ -86,28 +78,20 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
}
|
||||
|
||||
currentCheckpoints := s.saveCurrentCheckpoints(preState)
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(blockCopy, blockRoot)
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
s.defragmentState(postState)
|
||||
|
||||
@@ -117,6 +101,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, blockCopy, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save post state info")
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
@@ -200,7 +188,8 @@ func (s *Service) updateCheckpoints(
|
||||
func (s *Service) validateExecutionAndConsensus(
|
||||
ctx context.Context,
|
||||
preState state.BeaconState,
|
||||
block consensusblocks.ROBlock,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
) (state.BeaconState, bool, error) {
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
@@ -219,7 +208,7 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block)
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
}
|
||||
@@ -243,14 +232,12 @@ func (s *Service) handleDA(
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
if err := avs.IsDataAvailable(ctx, nodeID, s.CurrentSlot(), rob); err != nil {
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
|
||||
}
|
||||
} else {
|
||||
if err := s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return 0, errors.Wrap(err, "is data available")
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability")
|
||||
}
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
@@ -572,16 +559,16 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
}
|
||||
|
||||
// validateExecutionOnBlock notifies the engine of the incoming block execution payload and returns true if the payload is valid
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, block consensusblocks.ROBlock) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
|
||||
if err != nil {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
|
||||
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
if block.Block().Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, block); err != nil {
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
|
||||
return isValidPayload, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
func (s *Service) ReceiveDataColumn(ds blocks.VerifiedRODataColumn) error {
|
||||
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
|
||||
return errors.Wrap(err, "save data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -82,7 +82,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2P p2p.Acceser
|
||||
P2p p2p.Broadcaster
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
@@ -107,17 +107,15 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
seenIndex map[[32]byte][fieldparams.MaxBlobsPerBlock]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
// TODO: Separate Data Columns from blobs
|
||||
/*
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}*/
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
@@ -131,7 +129,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -145,7 +143,7 @@ func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -171,7 +169,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -97,14 +97,13 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
WithPayloadIDCache(cache.NewPayloadIDCache()),
|
||||
WithClockSynchronizer(startup.NewClockSynchronizer()),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
}
|
||||
|
||||
chainService, err := NewService(ctx, opts...)
|
||||
@@ -588,7 +587,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -19,10 +19,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -47,11 +45,6 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
@@ -72,11 +65,6 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
@@ -134,7 +122,6 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithSyncChecker(mock.MockChecker{}),
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -702,11 +702,6 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumn(_ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -8,7 +8,6 @@ go_library(
|
||||
"attestation_data.go",
|
||||
"balance_cache_key.go",
|
||||
"checkpoint_state.go",
|
||||
"column_subnet_ids.go",
|
||||
"committee.go",
|
||||
"committee_disabled.go", # keep
|
||||
"committees.go",
|
||||
|
||||
70
beacon-chain/cache/column_subnet_ids.go
vendored
70
beacon-chain/cache/column_subnet_ids.go
vendored
@@ -1,70 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
type columnSubnetIDs struct {
|
||||
colSubCache *cache.Cache
|
||||
colSubLock sync.RWMutex
|
||||
}
|
||||
|
||||
// ColumnSubnetIDs for column subnet participants
|
||||
var ColumnSubnetIDs = newColumnSubnetIDs()
|
||||
|
||||
const columnKey = "columns"
|
||||
|
||||
func newColumnSubnetIDs() *columnSubnetIDs {
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
epochDuration := time.Duration(slotsPerEpoch.Mul(secondsPerSlot))
|
||||
|
||||
// Set the default duration of a column subnet subscription as the column expiry period.
|
||||
minEpochsForDataColumnSidecarsRequest := time.Duration(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
subLength := epochDuration * minEpochsForDataColumnSidecarsRequest
|
||||
|
||||
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
|
||||
return &columnSubnetIDs{colSubCache: persistentCache}
|
||||
}
|
||||
|
||||
// GetColumnSubnets retrieves the data column subnets.
|
||||
func (s *columnSubnetIDs) GetColumnSubnets() ([]uint64, bool, time.Time) {
|
||||
s.colSubLock.RLock()
|
||||
defer s.colSubLock.RUnlock()
|
||||
|
||||
id, duration, ok := s.colSubCache.GetWithExpiration(columnKey)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
// Retrieve indices from the cache.
|
||||
idxs, ok := id.([]uint64)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
|
||||
return idxs, ok, duration
|
||||
}
|
||||
|
||||
// AddColumnSubnets adds the relevant data column subnets.
|
||||
func (s *columnSubnetIDs) AddColumnSubnets(colIdx []uint64) {
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Set(columnKey, colIdx, 0)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
// entries on them. This should only ever be used for testing, in normal
|
||||
// production, handling of the relevant subnets for each role is done
|
||||
// separately.
|
||||
func (s *columnSubnetIDs) EmptyAllCaches() {
|
||||
// Clear the cache.
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Flush()
|
||||
}
|
||||
@@ -187,12 +187,11 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
|
||||
// return Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// effective_balance=effective_balance,
|
||||
// slashed=False,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// effective_balance=effective_balance,
|
||||
// )
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
|
||||
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
@@ -203,11 +202,10 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
EffectiveBalance: effectiveBalance,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: effectiveBalance,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -448,7 +448,6 @@ func TestValidateIndexedAttestation_AboveMaxLength(t *testing.T) {
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: primitives.Epoch(i),
|
||||
},
|
||||
Source: ðpb.Checkpoint{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -490,7 +489,6 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
Target: ðpb.Checkpoint{
|
||||
Root: []byte{},
|
||||
},
|
||||
Source: ðpb.Checkpoint{},
|
||||
},
|
||||
Signature: sig.Marshal(),
|
||||
AggregationBits: list,
|
||||
|
||||
@@ -61,9 +61,6 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
|
||||
if body == nil {
|
||||
return false, errors.New("nil block body")
|
||||
}
|
||||
if body.Version() >= version.Capella {
|
||||
return true, nil
|
||||
}
|
||||
payload, err := body.Execution()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
@@ -205,24 +202,24 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
|
||||
// block_hash=payload.block_hash,
|
||||
// transactions_root=hash_tree_root(payload.transactions),
|
||||
// )
|
||||
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) (state.BeaconState, error) {
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if err := verifyBlobCommitmentCount(body); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if err := ValidatePayload(st, payload); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(payload); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return nil
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
|
||||
@@ -253,8 +253,7 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.IsExecutionBlock(wrappedBlock.Body())
|
||||
require.NoError(t, err)
|
||||
// #14614
|
||||
require.Equal(t, true, got)
|
||||
require.Equal(t, false, got)
|
||||
}
|
||||
|
||||
func Test_IsExecutionEnabled(t *testing.T) {
|
||||
@@ -588,7 +587,8 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
ExecutionPayload: tt.payload,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if err := blocks.ProcessPayload(st, body); err != nil {
|
||||
st, err := blocks.ProcessPayload(st, body)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
@@ -619,7 +619,8 @@ func Test_ProcessPayloadCapella(t *testing.T) {
|
||||
ExecutionPayload: payload,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.ProcessPayload(st, body))
|
||||
_, err = blocks.ProcessPayload(st, body)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
@@ -676,7 +677,8 @@ func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
ExecutionPayloadHeader: p,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if err := blocks.ProcessPayload(st, body); err != nil {
|
||||
st, err := blocks.ProcessPayload(st, body)
|
||||
if err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
|
||||
@@ -96,24 +96,6 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
|
||||
@@ -193,7 +193,7 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
|
||||
}
|
||||
|
||||
if st.Version() >= version.Electra {
|
||||
if err := st.DequeuePendingPartialWithdrawals(processedPartialWithdrawalsCount); err != nil {
|
||||
if err := st.DequeuePartialWithdrawals(processedPartialWithdrawalsCount); err != nil {
|
||||
return nil, fmt.Errorf("unable to dequeue partial withdrawals from state: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -386,14 +386,8 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
return errors.Wrap(err, "batch signature verification failed")
|
||||
}
|
||||
|
||||
pubKeyMap := make(map[[48]byte]struct{}, len(pendingDeposits))
|
||||
|
||||
// Process each deposit individually
|
||||
for _, pendingDeposit := range pendingDeposits {
|
||||
_, found := pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)]
|
||||
if !found {
|
||||
pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)] = struct{}{}
|
||||
}
|
||||
validSignature := allSignaturesVerified
|
||||
|
||||
// If batch verification failed, check the individual deposit signature
|
||||
@@ -411,16 +405,9 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
|
||||
// Add validator to the registry if the signature is valid
|
||||
if validSignature {
|
||||
if found {
|
||||
index, _ := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
|
||||
if err := helpers.IncreaseBalance(state, index, pendingDeposit.Amount); err != nil {
|
||||
return errors.Wrap(err, "could not increase balance")
|
||||
}
|
||||
} else {
|
||||
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to add validator to registry")
|
||||
}
|
||||
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to add validator to registry")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -521,12 +508,11 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
|
||||
// validator = Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// effective_balance=Gwei(0),
|
||||
// slashed=False,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// effective_balance=Gwei(0),
|
||||
// )
|
||||
//
|
||||
// # [Modified in Electra:EIP7251]
|
||||
@@ -538,12 +524,11 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
|
||||
validator := ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
EffectiveBalance: 0,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: 0,
|
||||
}
|
||||
v, err := state_native.NewValidator(validator)
|
||||
if err != nil {
|
||||
@@ -573,7 +558,7 @@ func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState,
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// processDepositRequest processes the specific deposit request
|
||||
// processDepositRequest processes the specific deposit receipt
|
||||
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||
//
|
||||
// # Set deposit request start index
|
||||
@@ -603,8 +588,8 @@ func processDepositRequest(beaconState state.BeaconState, request *enginev1.Depo
|
||||
}
|
||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: bytesutil.SafeCopyBytes(request.Pubkey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(request.WithdrawalCredentials),
|
||||
Amount: request.Amount,
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(request.WithdrawalCredentials),
|
||||
Signature: bytesutil.SafeCopyBytes(request.Signature),
|
||||
Slot: beaconState.Slot(),
|
||||
}); err != nil {
|
||||
|
||||
@@ -22,40 +22,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessPendingDepositsMultiplesSameDeposits(t *testing.T) {
|
||||
st := stateWithActiveBalanceETH(t, 1000)
|
||||
deps := make([]*eth.PendingDeposit, 2) // Make same deposit twice
|
||||
validators := st.Validators()
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
validators[i].PublicKey = sk.PublicKey().Marshal()
|
||||
validators[i].WithdrawalCredentials = wc
|
||||
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, 32, bytesutil.ToBytes32(wc), 0)
|
||||
}
|
||||
require.NoError(t, st.SetPendingDeposits(deps))
|
||||
|
||||
err = electra.ProcessPendingDeposits(context.TODO(), st, 10000)
|
||||
require.NoError(t, err)
|
||||
|
||||
val := st.Validators()
|
||||
seenPubkeys := make(map[string]struct{})
|
||||
for i := 0; i < len(val); i += 1 {
|
||||
if len(val[i].PublicKey) == 0 {
|
||||
continue
|
||||
}
|
||||
_, ok := seenPubkeys[string(val[i].PublicKey)]
|
||||
if ok {
|
||||
t.Fatalf("duplicated pubkeys")
|
||||
} else {
|
||||
seenPubkeys[string(val[i].PublicKey)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessPendingDeposits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -319,7 +285,7 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) {
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
validDep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0)
|
||||
invalidDep := ð.PendingDeposit{PublicKey: make([]byte, 48)}
|
||||
invalidDep := ð.PendingDeposit{}
|
||||
// have a combination of valid and invalid deposits
|
||||
deps := []*eth.PendingDeposit{validDep, invalidDep}
|
||||
require.NoError(t, electra.BatchProcessNewPendingDeposits(context.Background(), st, deps))
|
||||
|
||||
@@ -29,6 +29,7 @@ var (
|
||||
ProcessParticipationFlagUpdates = altair.ProcessParticipationFlagUpdates
|
||||
ProcessSyncCommitteeUpdates = altair.ProcessSyncCommitteeUpdates
|
||||
AttestationsDelta = altair.AttestationsDelta
|
||||
ProcessSyncAggregate = altair.ProcessSyncAggregate
|
||||
)
|
||||
|
||||
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.
|
||||
|
||||
@@ -84,11 +84,11 @@ func ProcessOperations(
|
||||
}
|
||||
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit requests")
|
||||
return nil, errors.Wrap(err, "could not process deposit receipts")
|
||||
}
|
||||
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawal requests")
|
||||
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
|
||||
}
|
||||
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, fmt.Errorf("could not process consolidation requests: %w", err)
|
||||
|
||||
@@ -32,9 +32,6 @@ const (
|
||||
|
||||
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
|
||||
AttesterSlashingReceived = 8
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 9
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -80,7 +77,3 @@ type ProposerSlashingReceivedData struct {
|
||||
type AttesterSlashingReceivedData struct {
|
||||
AttesterSlashing ethpb.AttSlashing
|
||||
}
|
||||
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
@@ -31,8 +31,6 @@ const (
|
||||
LightClientFinalityUpdate
|
||||
// LightClientOptimisticUpdate event
|
||||
LightClientOptimisticUpdate
|
||||
// PayloadAttributes events are fired upon a missed slot or new head.
|
||||
PayloadAttributes
|
||||
)
|
||||
|
||||
// BlockProcessedData is the data sent with BlockProcessed events.
|
||||
|
||||
@@ -23,8 +23,11 @@ var (
|
||||
// Access to these nil fields will result in run time panic,
|
||||
// it is recommended to run these checks as first line of defense.
|
||||
func ValidateNilAttestation(attestation ethpb.Att) error {
|
||||
if attestation == nil || attestation.IsNil() {
|
||||
return errors.New("attestation is nil")
|
||||
if attestation == nil {
|
||||
return errors.New("attestation can't be nil")
|
||||
}
|
||||
if attestation.GetData() == nil {
|
||||
return errors.New("attestation's data can't be nil")
|
||||
}
|
||||
if attestation.GetData().Source == nil {
|
||||
return errors.New("attestation's source can't be nil")
|
||||
|
||||
@@ -260,12 +260,12 @@ func TestValidateNilAttestation(t *testing.T) {
|
||||
{
|
||||
name: "nil attestation",
|
||||
attestation: nil,
|
||||
errString: "attestation is nil",
|
||||
errString: "attestation can't be nil",
|
||||
},
|
||||
{
|
||||
name: "nil attestation data",
|
||||
attestation: ðpb.Attestation{},
|
||||
errString: "attestation is nil",
|
||||
errString: "attestation's data can't be nil",
|
||||
},
|
||||
{
|
||||
name: "nil attestation source",
|
||||
|
||||
@@ -78,7 +78,6 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -265,7 +264,6 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"helpers.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["helpers_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,631 +0,0 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/holiman/uint256"
|
||||
errors "github.com/pkg/errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
CustodySubnetCountEnrKey = "csc"
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
|
||||
type Csc uint64
|
||||
|
||||
func (Csc) ENRKey() string { return CustodySubnetCountEnrKey }
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count")
|
||||
errIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
errMismatchLength = errors.New("mismatch in the length of the commitments and proofs")
|
||||
errRecordNil = errors.New("record is nil")
|
||||
errCannotLoadCustodySubnetCount = errors.New("cannot load the custody subnet count from peer")
|
||||
|
||||
// maxUint256 is the maximum value of a uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
// CustodyColumnSubnets computes the subnets the node should participate in for custody.
|
||||
func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Check if the custody subnet count is larger than the data column sidecar subnet count.
|
||||
if custodySubnetCount > dataColumnSidecarSubnetCount {
|
||||
return nil, errCustodySubnetCountTooLarge
|
||||
}
|
||||
|
||||
// First, compute the subnet IDs that the node should participate in.
|
||||
subnetIds := make(map[uint64]bool, custodySubnetCount)
|
||||
|
||||
one := uint256.NewInt(1)
|
||||
|
||||
for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(subnetIds)) < custodySubnetCount; currentId.Add(currentId, one) {
|
||||
// Convert to big endian bytes.
|
||||
currentIdBytesBigEndian := currentId.Bytes32()
|
||||
|
||||
// Convert to little endian.
|
||||
currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:])
|
||||
|
||||
// Hash the result.
|
||||
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
|
||||
|
||||
// Get the subnet ID.
|
||||
subnetId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % dataColumnSidecarSubnetCount
|
||||
|
||||
// Add the subnet to the map.
|
||||
subnetIds[subnetId] = true
|
||||
|
||||
// Overflow prevention.
|
||||
if currentId.Cmp(maxUint256) == 0 {
|
||||
currentId = uint256.NewInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
return subnetIds, nil
|
||||
}
|
||||
|
||||
// CustodyColumns computes the columns the node should custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#helper-functions
|
||||
func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Compute the custody subnets.
|
||||
subnetIds, err := CustodyColumnSubnets(nodeId, custodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody subnets")
|
||||
}
|
||||
|
||||
columnsPerSubnet := fieldparams.NumberOfColumns / dataColumnSidecarSubnetCount
|
||||
|
||||
// Knowing the subnet ID and the number of columns per subnet, select all the columns the node should custody.
|
||||
// Columns belonging to the same subnet are contiguous.
|
||||
columnIndices := make(map[uint64]bool, custodySubnetCount*columnsPerSubnet)
|
||||
for i := uint64(0); i < columnsPerSubnet; i++ {
|
||||
for subnetId := range subnetIds {
|
||||
columnIndex := dataColumnSidecarSubnetCount*i + subnetId
|
||||
columnIndices[columnIndex] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columnIndices, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block and blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []kzg.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
startTime := time.Now()
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the signed block header.
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
// Get the block body.
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
|
||||
// Get the blob KZG commitments.
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the KZG commitments inclusion proof.
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobsCount)
|
||||
|
||||
eg, _ := errgroup.WithContext(context.Background())
|
||||
for i := range blobs {
|
||||
blobIndex := i
|
||||
eg.Go(func() error {
|
||||
blob := &blobs[blobIndex]
|
||||
blobCellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cellsAndProofs[blobIndex] = blobCellsAndProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
dataColumnComputationTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// populateAndFilterIndices returns a sorted slices of indices, setting all indices if none are provided,
|
||||
// and filtering out indices higher than the blob count.
|
||||
func populateAndFilterIndices(indices map[uint64]bool, blobCount uint64) []uint64 {
|
||||
// If no indices are provided, provide all blobs.
|
||||
if len(indices) == 0 {
|
||||
for i := range blobCount {
|
||||
indices[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Filter blobs index higher than the blob count.
|
||||
filteredIndices := make(map[uint64]bool, len(indices))
|
||||
for i := range indices {
|
||||
if i < blobCount {
|
||||
filteredIndices[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Transform set to slice.
|
||||
indicesSlice := make([]uint64, 0, len(filteredIndices))
|
||||
for i := range filteredIndices {
|
||||
indicesSlice = append(indicesSlice, i)
|
||||
}
|
||||
|
||||
// Sort the indices.
|
||||
slices.Sort[[]uint64](indicesSlice)
|
||||
|
||||
return indicesSlice
|
||||
}
|
||||
|
||||
// Blobs extract blobs from `dataColumnsSidecar`.
|
||||
// This can be seen as the reciprocal function of DataColumnSidecars.
|
||||
// `dataColumnsSidecar` needs to contain the datacolumns corresponding to the non-extended matrix,
|
||||
// else an error will be returned.
|
||||
// (`dataColumnsSidecar` can contain extra columns, but they will be ignored.)
|
||||
func Blobs(indices map[uint64]bool, dataColumnsSidecar []*ethpb.DataColumnSidecar) ([]*blocks.VerifiedROBlob, error) {
|
||||
columnCount := fieldparams.NumberOfColumns
|
||||
|
||||
neededColumnCount := columnCount / 2
|
||||
|
||||
// Check if all needed columns are present.
|
||||
sliceIndexFromColumnIndex := make(map[uint64]int, len(dataColumnsSidecar))
|
||||
for i := range dataColumnsSidecar {
|
||||
dataColumnSideCar := dataColumnsSidecar[i]
|
||||
columnIndex := dataColumnSideCar.ColumnIndex
|
||||
|
||||
if columnIndex < uint64(neededColumnCount) {
|
||||
sliceIndexFromColumnIndex[columnIndex] = i
|
||||
}
|
||||
}
|
||||
|
||||
actualColumnCount := len(sliceIndexFromColumnIndex)
|
||||
|
||||
// Get missing columns.
|
||||
if actualColumnCount < neededColumnCount {
|
||||
missingColumns := make(map[int]bool, neededColumnCount-actualColumnCount)
|
||||
for i := range neededColumnCount {
|
||||
if _, ok := sliceIndexFromColumnIndex[uint64(i)]; !ok {
|
||||
missingColumns[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
missingColumnsSlice := make([]int, 0, len(missingColumns))
|
||||
for i := range missingColumns {
|
||||
missingColumnsSlice = append(missingColumnsSlice, i)
|
||||
}
|
||||
|
||||
slices.Sort[[]int](missingColumnsSlice)
|
||||
return nil, errors.Errorf("some columns are missing: %v", missingColumnsSlice)
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first column since we already checked that `dataColumnsSidecar` is not empty.
|
||||
firstDataColumnSidecar := dataColumnsSidecar[0]
|
||||
|
||||
blobCount := uint64(len(firstDataColumnSidecar.DataColumn))
|
||||
|
||||
// Check all colums have te same length.
|
||||
for i := range dataColumnsSidecar {
|
||||
if uint64(len(dataColumnsSidecar[i].DataColumn)) != blobCount {
|
||||
return nil, errors.Errorf("mismatch in the length of the data columns, expected %d, got %d", blobCount, len(dataColumnsSidecar[i].DataColumn))
|
||||
}
|
||||
}
|
||||
|
||||
// Reconstruct verified RO blobs from columns.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Populate and filter indices.
|
||||
indicesSlice := populateAndFilterIndices(indices, blobCount)
|
||||
|
||||
for _, blobIndex := range indicesSlice {
|
||||
var blob kzg.Blob
|
||||
|
||||
// Compute the content of the blob.
|
||||
for columnIndex := range neededColumnCount {
|
||||
sliceIndex, ok := sliceIndexFromColumnIndex[uint64(columnIndex)]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing column %d, this should never happen", columnIndex)
|
||||
}
|
||||
|
||||
dataColumnSideCar := dataColumnsSidecar[sliceIndex]
|
||||
cell := dataColumnSideCar.DataColumn[blobIndex]
|
||||
|
||||
for i := 0; i < len(cell); i++ {
|
||||
blob[columnIndex*kzg.BytesPerCell+i] = cell[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the blob KZG commitment.
|
||||
blobKZGCommitment := kzg.Commitment(firstDataColumnSidecar.KzgCommitments[blobIndex])
|
||||
|
||||
// Compute the blob KZG proof.
|
||||
blobKzgProof, err := kzg.ComputeBlobKZGProof(&blob, blobKZGCommitment)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute blob KZG proof")
|
||||
}
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: blobIndex,
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: firstDataColumnSidecar.SignedBlockHeader,
|
||||
CommitmentInclusionProof: firstDataColumnSidecar.KzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new RO blob")
|
||||
}
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
return verifiedROBlobs, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecarsForReconstruct is a TEMPORARY function until there is an official specification for it.
|
||||
// It is scheduled for deletion.
|
||||
func DataColumnSidecarsForReconstruct(
|
||||
blobKzgCommitments [][]byte,
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
cellsAndProofs []kzg.CellsAndProofs,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Each CellsAndProofs corresponds to a Blob
|
||||
// So we can get the BlobCount by checking the length of CellsAndProofs
|
||||
blobsCount := len(cellsAndProofs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// VerifyDataColumnsSidecarKZGProofs verifies the provided KZG Proofs of data columns.
|
||||
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) (bool, error) {
|
||||
// Retrieve the number of columns.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Compute the total count.
|
||||
count := 0
|
||||
for _, sidecar := range sidecars {
|
||||
count += len(sidecar.DataColumn)
|
||||
}
|
||||
|
||||
commitments := make([]kzg.Bytes48, 0, count)
|
||||
indices := make([]uint64, 0, count)
|
||||
cells := make([]kzg.Cell, 0, count)
|
||||
proofs := make([]kzg.Bytes48, 0, count)
|
||||
|
||||
for _, sidecar := range sidecars {
|
||||
// Check if the columns index is not too large
|
||||
if sidecar.ColumnIndex >= numberOfColumns {
|
||||
return false, errIndexTooLarge
|
||||
}
|
||||
|
||||
// Check if the KZG commitments size and data column size match.
|
||||
if len(sidecar.DataColumn) != len(sidecar.KzgCommitments) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
// Check if the KZG proofs size and data column size match.
|
||||
if len(sidecar.DataColumn) != len(sidecar.KzgProof) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
for i := range sidecar.DataColumn {
|
||||
commitments = append(commitments, kzg.Bytes48(sidecar.KzgCommitments[i]))
|
||||
indices = append(indices, sidecar.ColumnIndex)
|
||||
cells = append(cells, kzg.Cell(sidecar.DataColumn[i]))
|
||||
proofs = append(proofs, kzg.Bytes48(sidecar.KzgProof[i]))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all the batch at once.
|
||||
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify cell KZG proof batch")
|
||||
}
|
||||
|
||||
return verified, nil
|
||||
}
|
||||
|
||||
// CustodySubnetCount returns the number of subnets the node should participate in for custody.
|
||||
func CustodySubnetCount() uint64 {
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
return params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
}
|
||||
|
||||
return params.BeaconConfig().CustodyRequirement
|
||||
}
|
||||
|
||||
// SubnetSamplingSize returns the number of subnets the node should sample from.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#subnet-sampling
|
||||
func SubnetSamplingSize() uint64 {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
custodySubnetCount := CustodySubnetCount()
|
||||
|
||||
return max(samplesPerSlot, custodySubnetCount)
|
||||
}
|
||||
|
||||
// CustodyColumnCount returns the number of columns the node should custody.
|
||||
func CustodyColumnCount() uint64 {
|
||||
// Get the number of subnets.
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Compute the number of columns per subnet.
|
||||
columnsPerSubnet := fieldparams.NumberOfColumns / dataColumnSidecarSubnetCount
|
||||
|
||||
// Get the number of subnets we custody
|
||||
custodySubnetCount := CustodySubnetCount()
|
||||
|
||||
// Finally, compute the number of columns we should custody.
|
||||
custodyColumnCount := custodySubnetCount * columnsPerSubnet
|
||||
|
||||
return custodyColumnCount
|
||||
}
|
||||
|
||||
// HypergeomCDF computes the hypergeometric cumulative distribution function.
|
||||
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
func HypergeomCDF(k, M, n, N uint64) float64 {
|
||||
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
|
||||
denominator := new(big.Float).SetInt(denominatorInt)
|
||||
|
||||
rBig := big.NewFloat(0)
|
||||
|
||||
for i := uint64(0); i < k+1; i++ {
|
||||
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
|
||||
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
|
||||
numeratorInt := new(big.Int).Mul(a, b)
|
||||
numerator := new(big.Float).SetInt(numeratorInt)
|
||||
item := new(big.Float).Quo(numerator, denominator)
|
||||
rBig.Add(rBig, item)
|
||||
}
|
||||
|
||||
r, _ := rBig.Float64()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
|
||||
// number of samples we should actually query from peers.
|
||||
// TODO: Add link to the specification once it is available.
|
||||
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
|
||||
// Retrieve the columns count
|
||||
columnsCount := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If half of the columns are missing, we are able to reconstruct the data.
|
||||
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
|
||||
// This is the smallest worst case.
|
||||
worstCaseMissing := columnsCount/2 + 1
|
||||
|
||||
// Compute the false positive threshold.
|
||||
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
|
||||
|
||||
var sampleCount uint64
|
||||
|
||||
// Finally, compute the extended sample count.
|
||||
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
|
||||
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sampleCount
|
||||
}
|
||||
|
||||
func CustodyCountFromRecord(record *enr.Record) (uint64, error) {
|
||||
// By default, we assume the peer custodies the minimum number of subnets.
|
||||
if record == nil {
|
||||
return 0, errRecordNil
|
||||
}
|
||||
|
||||
// Load the `custody_subnet_count`
|
||||
var csc Csc
|
||||
if err := record.Load(&csc); err != nil {
|
||||
return 0, errCannotLoadCustodySubnetCount
|
||||
}
|
||||
|
||||
return uint64(csc), nil
|
||||
}
|
||||
|
||||
func CanSelfReconstruct(numCol uint64) bool {
|
||||
total := params.BeaconConfig().NumberOfColumns
|
||||
// if total is odd, then we need total / 2 + 1 columns to reconstruct
|
||||
// if total is even, then we need total / 2 columns to reconstruct
|
||||
columnsNeeded := total/2 + total%2
|
||||
return numCol >= columnsNeeded
|
||||
}
|
||||
|
||||
// RecoverCellsAndProofs recovers the cells and proofs from the data column sidecars.
|
||||
func RecoverCellsAndProofs(
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
) ([]kzg.CellsAndProofs, error) {
|
||||
var wg errgroup.Group
|
||||
|
||||
dataColumnSideCarsCount := len(dataColumnSideCars)
|
||||
|
||||
if dataColumnSideCarsCount == 0 {
|
||||
return nil, errors.New("no data column sidecars")
|
||||
}
|
||||
|
||||
// Check if all columns have the same length.
|
||||
blobCount := len(dataColumnSideCars[0].DataColumn)
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
length := len(sidecar.DataColumn)
|
||||
|
||||
if length != blobCount {
|
||||
return nil, errors.New("columns do not have the same length")
|
||||
}
|
||||
}
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
recoveredCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
|
||||
for blobIndex := 0; blobIndex < blobCount; blobIndex++ {
|
||||
bIndex := blobIndex
|
||||
wg.Go(func() error {
|
||||
start := time.Now()
|
||||
|
||||
cellsIndices := make([]uint64, 0, dataColumnSideCarsCount)
|
||||
cells := make([]kzg.Cell, 0, dataColumnSideCarsCount)
|
||||
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
// Build the cell indices.
|
||||
cellsIndices = append(cellsIndices, sidecar.ColumnIndex)
|
||||
|
||||
// Get the cell.
|
||||
column := sidecar.DataColumn
|
||||
cell := column[bIndex]
|
||||
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
}
|
||||
|
||||
// Recover the cells and proofs for the corresponding blob
|
||||
cellsAndProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", bIndex)
|
||||
}
|
||||
|
||||
recoveredCellsAndProofs[bIndex] = cellsAndProofs
|
||||
log.WithFields(logrus.Fields{
|
||||
"elapsed": time.Since(start),
|
||||
"index": bIndex,
|
||||
"root": fmt.Sprintf("%x", blockRoot),
|
||||
}).Debug("Recovered cells and proofs")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return recoveredCellsAndProofs, nil
|
||||
}
|
||||
@@ -1,544 +0,0 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func GetRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func GetRandBlob(seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := GetRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func GenerateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
|
||||
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
var (
|
||||
comms [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := int64(0); i < 6; i++ {
|
||||
blob := GetRandBlob(i)
|
||||
commitment, _, err := GenerateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
comms = append(comms, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
dbBlock.Block.Body.BlobKzgCommitments = comms
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
sCars, err := peerdas.DataColumnSidecars(sBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, sidecar := range sCars {
|
||||
roCol, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
verified, err := peerdas.VerifyDataColumnsSidecarKZGProofs([]blocks.RODataColumn{roCol})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, fmt.Sprintf("sidecar %d failed", i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
var expected []*ethpb.DataColumnSidecar = nil
|
||||
actual, err := peerdas.DataColumnSidecars(nil, []kzg.Blob{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestBlobs(t *testing.T) {
|
||||
blobsIndice := map[uint64]bool{}
|
||||
|
||||
almostAllColumns := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns/2)
|
||||
for i := 2; i < fieldparams.NumberOfColumns/2+2; i++ {
|
||||
almostAllColumns = append(almostAllColumns, ðpb.DataColumnSidecar{
|
||||
ColumnIndex: uint64(i),
|
||||
})
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
input []*ethpb.DataColumnSidecar
|
||||
expected []*blocks.VerifiedROBlob
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "empty input",
|
||||
input: []*ethpb.DataColumnSidecar{},
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63]"),
|
||||
},
|
||||
{
|
||||
name: "missing columns",
|
||||
input: almostAllColumns,
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1]"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := peerdas.Blobs(blobsIndice, tc.input)
|
||||
if tc.err != nil {
|
||||
require.Equal(t, tc.err.Error(), err.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.DeepSSZEqual(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnsSidecarsBlobsRoundtrip(t *testing.T) {
|
||||
const blobCount = 5
|
||||
blobsIndex := map[uint64]bool{}
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Generate random blobs and their corresponding commitments and proofs.
|
||||
blobs := make([]kzg.Blob, 0, blobCount)
|
||||
blobKzgCommitments := make([]*kzg.Commitment, 0, blobCount)
|
||||
blobKzgProofs := make([]*kzg.Proof, 0, blobCount)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
// Create a random blob.
|
||||
blob := GetRandBlob(int64(blobIndex))
|
||||
blobs = append(blobs, blob)
|
||||
|
||||
// Generate a blobKZGCommitment for the blob.
|
||||
blobKZGCommitment, proof, err := GenerateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobKzgCommitments = append(blobKzgCommitments, blobKZGCommitment)
|
||||
blobKzgProofs = append(blobKzgProofs, proof)
|
||||
}
|
||||
|
||||
// Set the commitments into the block.
|
||||
blobZkgCommitmentsBytes := make([][]byte, 0, blobCount)
|
||||
for _, blobKZGCommitment := range blobKzgCommitments {
|
||||
blobZkgCommitmentsBytes = append(blobZkgCommitmentsBytes, blobKZGCommitment[:])
|
||||
}
|
||||
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobZkgCommitmentsBytes
|
||||
|
||||
// Generate verified RO blobs.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
commitmentInclusionProof, err := blocks.MerkleProofKZGCommitments(signedBeaconBlock.Block().Body())
|
||||
require.NoError(t, err)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
blob := blobs[blobIndex]
|
||||
blobKZGCommitment := blobKzgCommitments[blobIndex]
|
||||
blobKzgProof := blobKzgProofs[blobIndex]
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: uint64(blobIndex),
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: signedBeaconBlockHeader,
|
||||
CommitmentInclusionProof: commitmentInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
// Compute data columns sidecars from the signed beacon block and from the blobs.
|
||||
dataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute the blobs from the data columns sidecar.
|
||||
roundtripBlobs, err := peerdas.Blobs(blobsIndex, dataColumnsSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the blobs are the same.
|
||||
require.DeepSSZEqual(t, verifiedROBlobs, roundtripBlobs)
|
||||
}
|
||||
|
||||
func TestCustodySubnetCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllSubnets bool
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribeToAllSubnets=false",
|
||||
subscribeToAllSubnets: false,
|
||||
expected: params.BeaconConfig().CustodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "subscribeToAllSubnets=true",
|
||||
subscribeToAllSubnets: true,
|
||||
expected: params.BeaconConfig().DataColumnSidecarSubnetCount,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set flags.
|
||||
resetFlags := flags.Get()
|
||||
defer func() {
|
||||
flags.Init(resetFlags)
|
||||
}()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = tc.subscribeToAllSubnets
|
||||
flags.Init(gFlags)
|
||||
|
||||
// Get the custody subnet count.
|
||||
actual := peerdas.CustodySubnetCount()
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyColumnCount(t *testing.T) {
|
||||
const expected uint64 = 8
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.DataColumnSidecarSubnetCount = 32
|
||||
config.CustodyRequirement = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
actual := peerdas.CustodyColumnCount()
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestHypergeomCDF(t *testing.T) {
|
||||
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
|
||||
// Expected result: 0.072
|
||||
const (
|
||||
expected = 0.0796665913283742
|
||||
margin = 0.000001
|
||||
)
|
||||
|
||||
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
|
||||
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
|
||||
}
|
||||
|
||||
func TestExtendedSampleCount(t *testing.T) {
|
||||
const samplesPerSlot = 16
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
allowedMissings uint64
|
||||
extendedSampleCount uint64
|
||||
}{
|
||||
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
|
||||
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
|
||||
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
|
||||
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
|
||||
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
|
||||
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
|
||||
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
|
||||
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
|
||||
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
|
||||
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
|
||||
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
|
||||
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
|
||||
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
|
||||
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
|
||||
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
|
||||
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
|
||||
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
|
||||
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
|
||||
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
|
||||
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
|
||||
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
|
||||
require.Equal(t, tc.extendedSampleCount, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyCountFromRecord(t *testing.T) {
|
||||
const expected uint64 = 7
|
||||
|
||||
// Create an Ethereum record.
|
||||
record := &enr.Record{}
|
||||
record.Set(peerdas.Csc(expected))
|
||||
|
||||
actual, err := peerdas.CustodyCountFromRecord(record)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestCanSelfReconstruct(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
totalNumberOfColumns uint64
|
||||
custodyNumberOfColumns uint64
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "totalNumberOfColumns=64, custodyNumberOfColumns=31",
|
||||
totalNumberOfColumns: 64,
|
||||
custodyNumberOfColumns: 31,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfColumns=64, custodyNumberOfColumns=32",
|
||||
totalNumberOfColumns: 64,
|
||||
custodyNumberOfColumns: 32,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfColumns=65, custodyNumberOfColumns=32",
|
||||
totalNumberOfColumns: 65,
|
||||
custodyNumberOfColumns: 32,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfColumns=63, custodyNumberOfColumns=33",
|
||||
totalNumberOfColumns: 65,
|
||||
custodyNumberOfColumns: 33,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the total number of columns.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfColumns = tc.totalNumberOfColumns
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Check if reconstuction is possible.
|
||||
actual := peerdas.CanSelfReconstruct(tc.custodyNumberOfColumns)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconstructionRoundTrip(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
const blobCount = 5
|
||||
|
||||
var blockRoot [fieldparams.RootLength]byte
|
||||
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
// Generate random blobs and their corresponding commitments.
|
||||
var (
|
||||
blobsKzgCommitments [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := range blobCount {
|
||||
blob := GetRandBlob(int64(i))
|
||||
commitment, _, err := GenerateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobsKzgCommitments = append(blobsKzgCommitments, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
// Generate a signed beacon block.
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobsKzgCommitments
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert data columns sidecars from signed block and blobs.
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create verified RO data columns.
|
||||
verifiedRoDataColumns := make([]*blocks.VerifiedRODataColumn, 0, blobCount)
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumn(dataColumnSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumns = append(verifiedRoDataColumns, &verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
verifiedRoDataColumn := verifiedRoDataColumns[0]
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
var noDataColumns []*ethpb.DataColumnSidecar
|
||||
dataColumnsWithDifferentLengths := []*ethpb.DataColumnSidecar{
|
||||
{DataColumn: [][]byte{{}, {}}},
|
||||
{DataColumn: [][]byte{{}}},
|
||||
}
|
||||
notEnoughDataColumns := dataColumnSidecars[:numberOfColumns/2-1]
|
||||
originalDataColumns := dataColumnSidecars[:numberOfColumns/2]
|
||||
extendedDataColumns := dataColumnSidecars[numberOfColumns/2:]
|
||||
evenDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
|
||||
oddDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
|
||||
allDataColumns := dataColumnSidecars
|
||||
|
||||
for i, dataColumn := range dataColumnSidecars {
|
||||
if i%2 == 0 {
|
||||
evenDataColumns = append(evenDataColumns, dataColumn)
|
||||
} else {
|
||||
oddDataColumns = append(oddDataColumns, dataColumn)
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
dataColumnsSidecar []*ethpb.DataColumnSidecar
|
||||
isError bool
|
||||
}{
|
||||
{
|
||||
name: "No data columns sidecars",
|
||||
dataColumnsSidecar: noDataColumns,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "Data columns sidecar with different lengths",
|
||||
dataColumnsSidecar: dataColumnsWithDifferentLengths,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "All columns are present (no actual need to reconstruct)",
|
||||
dataColumnsSidecar: allDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only original columns are present",
|
||||
dataColumnsSidecar: originalDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only extended columns are present",
|
||||
dataColumnsSidecar: extendedDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only even columns are present",
|
||||
dataColumnsSidecar: evenDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only odd columns are present",
|
||||
dataColumnsSidecar: oddDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Not enough columns to reconstruct",
|
||||
dataColumnsSidecar: notEnoughDataColumns,
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Recover cells and proofs from available data columns sidecars.
|
||||
cellsAndProofs, err := peerdas.RecoverCellsAndProofs(tc.dataColumnsSidecar, blockRoot)
|
||||
isError := (err != nil)
|
||||
require.Equal(t, tc.isError, isError)
|
||||
|
||||
if isError {
|
||||
return
|
||||
}
|
||||
|
||||
// Recover all data columns sidecars from cells and proofs.
|
||||
reconstructedDataColumnsSideCars, err := peerdas.DataColumnSidecarsForReconstruct(
|
||||
blobsKzgCommitments,
|
||||
signedBeaconBlockHeader,
|
||||
verifiedRoDataColumn.KzgCommitmentsInclusionProof,
|
||||
cellsAndProofs,
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := dataColumnSidecars
|
||||
actual := reconstructedDataColumnsSideCars
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
package peerdas
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "peerdas")
|
||||
@@ -1,14 +0,0 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
@@ -53,11 +53,6 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
|
||||
func PeerDASIsActive(slot primitives.Slot) bool {
|
||||
return params.PeerDASEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().Eip7594ForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH
|
||||
|
||||
@@ -333,7 +333,8 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
}
|
||||
if err = b.ProcessPayload(state, blk.Body()); err != nil {
|
||||
state, err = b.ProcessPayload(state, blk.Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution data")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -698,45 +698,3 @@ func TestProcessSlotsConditionally(t *testing.T) {
|
||||
assert.Equal(t, primitives.Slot(6), s.Slot())
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkProcessSlots_Capella(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateCapella(b, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkProcessSlots_Deneb(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateDeneb(b, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkProcessSlots_Electra(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(b, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability.go",
|
||||
"availability_columns.go",
|
||||
"cache.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
@@ -12,7 +11,6 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -22,7 +20,6 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -31,7 +28,6 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_columns_test.go",
|
||||
"availability_test.go",
|
||||
"cache_test.go",
|
||||
],
|
||||
@@ -39,7 +35,6 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -48,7 +43,6 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
@@ -81,7 +80,7 @@ func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.RO
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, _ enode.ID, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, current)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could check data availability for block %#x", b.Root())
|
||||
|
||||
@@ -1,182 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *cache
|
||||
}
|
||||
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.BlobStorage) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newCache(),
|
||||
}
|
||||
}
|
||||
|
||||
// Persist do nothing at the moment.
|
||||
// TODO: Very Ugly, change interface to allow for columns and blobs
|
||||
func (*LazilyPersistentStoreColumn) Persist(_ primitives.Slot, _ ...blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) PersistColumns(current primitives.Slot, sc ...blocks.RODataColumn) error {
|
||||
if len(sc) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sc) > 1 {
|
||||
first := sc[0].BlockRoot()
|
||||
for i := 1; i < len(sc); i++ {
|
||||
if first != sc[i].BlockRoot() {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sc[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromColumn(sc[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for i := range sc {
|
||||
if err := entry.stashColumns(&sc[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(
|
||||
ctx context.Context,
|
||||
nodeID enode.ID,
|
||||
currentSlot primitives.Slot,
|
||||
block blocks.ROBlock,
|
||||
) error {
|
||||
blockCommitments, err := fullCommitmentsToCheck(nodeID, block, currentSlot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
||||
}
|
||||
|
||||
// Return early for blocks that do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the cache key for the block.
|
||||
key := keyFromBlock(block)
|
||||
|
||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
// Delete the cache entry for the block at the end.
|
||||
defer s.cache.delete(key)
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Wait for the summarizer to be ready before proceeding.
|
||||
summarizer, err := s.store.WaitForSummarizer(ctx)
|
||||
if err != nil {
|
||||
log.
|
||||
WithField("root", fmt.Sprintf("%#x", blockRoot)).
|
||||
WithError(err).
|
||||
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
|
||||
} else {
|
||||
// Get the summary for the block, and set it in the cache entry.
|
||||
summary := summarizer.Summary(blockRoot)
|
||||
entry.setDiskSummary(summary)
|
||||
}
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
roDataColumns, err := entry.filterColumns(blockRoot, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete BlobSidecar batch")
|
||||
}
|
||||
|
||||
// Create verified RO data columns from RO data columns.
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumns))
|
||||
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
// Ensure that each column sidecar is written to disk.
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
if err := s.store.SaveDataColumn(verifiedRODataColumn); err != nil {
|
||||
return errors.Wrapf(err, "save data columns for index `%d` for block `%#x`", verifiedRODataColumn.ColumnIndex, blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// All ColumnSidecars are persisted - data availability check succeeds.
|
||||
return nil
|
||||
}
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
// Return early for blocks that are pre-deneb.
|
||||
if block.Version() < version.Deneb {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Compute the block epoch.
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current spoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments for the block.
|
||||
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Return early if there are no commitments in the block.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the custody columns.
|
||||
custodySubnetCount := peerdas.CustodySubnetCount()
|
||||
custodyColumns, err := peerdas.CustodyColumns(nodeID, custodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// Create a safe commitments array for the custody columns.
|
||||
commitmentsArray := &safeCommitmentsArray{}
|
||||
for column := range custodyColumns {
|
||||
commitmentsArray[column] = kzgCommitments
|
||||
}
|
||||
|
||||
return commitmentsArray, nil
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "pre deneb",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
bb := util.NewBeaconBlockBellatrix()
|
||||
sb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "commitments within da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Slot = 100
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
commits: commits,
|
||||
slot: 100,
|
||||
},
|
||||
{
|
||||
name: "commitments outside da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
b := c.block(t)
|
||||
co, err := fullCommitmentsToCheck(enode.ID{}, b, c.slot)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for i := 0; i < len(co); i++ {
|
||||
require.DeepEqual(t, c.commits, co[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
@@ -125,18 +124,18 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[2]))
|
||||
err := as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
err = as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, enode.ID{}, 1, blk))
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
@@ -151,7 +150,7 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
err := as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
@@ -39,10 +38,6 @@ func keyFromSidecar(sc blocks.ROBlob) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
func keyFromColumn(sc blocks.RODataColumn) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value.
|
||||
func keyFromBlock(b blocks.ROBlock) cacheKey {
|
||||
return cacheKey{slot: b.Block().Slot(), root: b.Root()}
|
||||
@@ -66,7 +61,6 @@ func (c *cache) delete(key cacheKey) {
|
||||
// cacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type cacheEntry struct {
|
||||
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
|
||||
colScs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.BlobStorageSummary
|
||||
}
|
||||
|
||||
@@ -88,17 +82,6 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) stashColumns(sc *blocks.RODataColumn) error {
|
||||
if sc.ColumnIndex >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.ColumnIndex)
|
||||
}
|
||||
if e.colScs[sc.ColumnIndex] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.ColumnIndex, sc.KzgCommitments)
|
||||
}
|
||||
e.colScs[sc.ColumnIndex] = sc
|
||||
return nil
|
||||
}
|
||||
|
||||
// filter evicts sidecars that are not committed to by the block and returns custom
|
||||
// errors if the cache is missing any of the commitments, or if the commitments in
|
||||
// the cache do not match those found in the block. If err is nil, then all expected
|
||||
@@ -134,39 +117,6 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
|
||||
return scs, nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) filterColumns(root [32]byte, commitmentsArray *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
nonEmptyIndices := commitmentsArray.nonEmptyIndices()
|
||||
if e.diskSummary.AllDataColumnsAvailable(nonEmptyIndices) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
commitmentsCount := commitmentsArray.count()
|
||||
sidecars := make([]blocks.RODataColumn, 0, commitmentsCount)
|
||||
|
||||
for i := uint64(0); i < fieldparams.NumberOfColumns; i++ {
|
||||
// Skip if we arleady store this data column.
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
|
||||
if commitmentsArray[i] == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.colScs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(commitmentsArray[i], e.colScs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.colScs[i].KzgCommitments, commitmentsArray[i])
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, *e.colScs[i])
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// safeCommitmentArray is a fixed size array of commitment byte slices. This is helpful for avoiding
|
||||
// gratuitous bounds checks.
|
||||
type safeCommitmentArray [fieldparams.MaxBlobsPerBlock][]byte
|
||||
@@ -179,33 +129,3 @@ func (s safeCommitmentArray) count() int {
|
||||
}
|
||||
return fieldparams.MaxBlobsPerBlock
|
||||
}
|
||||
|
||||
// safeCommitmentsArray is a fixed size array of commitments.
|
||||
// This is helpful for avoiding gratuitous bounds checks.
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
// count returns the number of commitments in the array.
|
||||
func (s *safeCommitmentsArray) count() int {
|
||||
count := 0
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// nonEmptyIndices returns a map of indices that are non-nil in the array.
|
||||
func (s *safeCommitmentsArray) nonEmptyIndices() map[uint64]bool {
|
||||
columns := make(map[uint64]bool)
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
columns[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package das
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -15,6 +14,6 @@ import (
|
||||
// IsDataAvailable guarantees that all blobs committed to in the block have been
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, nodeID enode.ID, current primitives.Slot, b blocks.ROBlock) error
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package das
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -17,7 +16,7 @@ type MockAvailabilityStore struct {
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
|
||||
// IsDataAvailable satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, _ enode.ID, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
if m.VerifyAvailabilityCallback != nil {
|
||||
return m.VerifyAvailabilityCallback(ctx, current, b)
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -40,15 +39,8 @@ const (
|
||||
directoryPermissions = 0700
|
||||
)
|
||||
|
||||
type (
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
RootIndexPair struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
Index uint64
|
||||
}
|
||||
)
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
type BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
// WithBasePath is a required option that sets the base path of blob storage.
|
||||
func WithBasePath(base string) BlobStorageOption {
|
||||
@@ -78,10 +70,7 @@ func WithSaveFsync(fsync bool) BlobStorageOption {
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
b := &BlobStorage{
|
||||
DataColumnFeed: new(event.Feed),
|
||||
}
|
||||
|
||||
b := &BlobStorage{}
|
||||
for _, o := range opts {
|
||||
if err := o(b); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create blob storage")
|
||||
@@ -110,7 +99,6 @@ type BlobStorage struct {
|
||||
fsync bool
|
||||
fs afero.Fs
|
||||
pruner *blobPruner
|
||||
DataColumnFeed *event.Feed
|
||||
}
|
||||
|
||||
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
|
||||
@@ -233,112 +221,6 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveDataColumn saves a data column to our local filesystem.
|
||||
func (bs *BlobStorage) SaveDataColumn(column blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
fname := namerForDataColumn(column)
|
||||
sszPath := fname.path()
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
log.Trace("Ignoring a duplicate data column sidecar save attempt")
|
||||
return nil
|
||||
}
|
||||
|
||||
if bs.pruner != nil {
|
||||
hRoot, err := column.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bs.pruner.notify(hRoot, column.SignedBlockHeader.Header.Slot, column.ColumnIndex); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", hRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the ethpb.DataColumnSidecar to binary data using SSZ.
|
||||
sidecarData, err := column.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
} else if len(sidecarData) == 0 {
|
||||
return errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
partialMoved := false
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
if partialMoved {
|
||||
return
|
||||
}
|
||||
// It's expected to error if the save is successful.
|
||||
err = bs.fs.Remove(partPath)
|
||||
if err == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"partPath": partPath,
|
||||
}).Debugf("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := partialFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return errEmptyBlobWritten
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
partialMoved = true
|
||||
|
||||
// Notify the data column notifier that a new data column has been saved.
|
||||
if bs.DataColumnFeed != nil {
|
||||
bs.DataColumnFeed.Send(RootIndexPair{
|
||||
Root: column.BlockRoot(),
|
||||
Index: column.ColumnIndex,
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: Use new metrics for data columns
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a single BlobSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
@@ -364,20 +246,6 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
}
|
||||
|
||||
// GetColumn retrieves a single DataColumnSidecar by its root and index.
|
||||
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (*ethpb.DataColumnSidecar, error) {
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.path())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.DataColumnSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Remove removes all blobs for a given root.
|
||||
func (bs *BlobStorage) Remove(root [32]byte) error {
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
@@ -421,61 +289,6 @@ func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]boo
|
||||
return mask, nil
|
||||
}
|
||||
|
||||
// ColumnIndices retrieve the stored column indexes from our filesystem.
|
||||
func (bs *BlobStorage) ColumnIndices(root [32]byte) (map[uint64]bool, error) {
|
||||
custody := make(map[uint64]bool, fieldparams.NumberOfColumns)
|
||||
|
||||
// Get all the files in the directory.
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
entries, err := afero.ReadDir(bs.fs, rootDir)
|
||||
if err != nil {
|
||||
// If the directory does not exist, we do not custody any columns.
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "read directory")
|
||||
}
|
||||
|
||||
// Iterate over all the entries in the directory.
|
||||
for _, entry := range entries {
|
||||
// If the entry is a directory, skip it.
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the entry does not have the correct extension, skip it.
|
||||
name := entry.Name()
|
||||
if !strings.HasSuffix(name, sszExt) {
|
||||
continue
|
||||
}
|
||||
|
||||
// The file should be in the `<index>.<extension>` format.
|
||||
// Skip the file if it does not match the format.
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the column index from the file name.
|
||||
columnIndexStr := parts[0]
|
||||
columnIndex, err := strconv.ParseUint(columnIndexStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
|
||||
}
|
||||
|
||||
// If the column index is out of bounds, return an error.
|
||||
if columnIndex >= fieldparams.NumberOfColumns {
|
||||
return nil, errors.Wrapf(errIndexOutOfBounds, "invalid index %d", columnIndex)
|
||||
}
|
||||
|
||||
// Mark the column index as in custody.
|
||||
custody[columnIndex] = true
|
||||
}
|
||||
|
||||
return custody, nil
|
||||
}
|
||||
|
||||
// Clear deletes all files on the filesystem.
|
||||
func (bs *BlobStorage) Clear() error {
|
||||
dirs, err := listDir(bs.fs, ".")
|
||||
@@ -508,10 +321,6 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func namerForDataColumn(col blocks.VerifiedRODataColumn) blobNamer {
|
||||
return blobNamer{root: col.BlockRoot(), index: col.ColumnIndex}
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return rootString(p.root)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
|
||||
type blobIndexMask [fieldparams.NumberOfColumns]bool
|
||||
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
|
||||
|
||||
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
|
||||
type BlobStorageSummary struct {
|
||||
@@ -26,15 +26,6 @@ func (s BlobStorageSummary) HasIndex(idx uint64) bool {
|
||||
return s.mask[idx]
|
||||
}
|
||||
|
||||
// HasDataColumnIndex true if the DataColumnSidecar at the given index is available in the filesystem.
|
||||
func (s BlobStorageSummary) HasDataColumnIndex(idx uint64) bool {
|
||||
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
|
||||
if idx >= fieldparams.NumberOfColumns {
|
||||
return false
|
||||
}
|
||||
return s.mask[idx]
|
||||
}
|
||||
|
||||
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
|
||||
func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
if count > fieldparams.MaxBlobsPerBlock {
|
||||
@@ -48,21 +39,6 @@ func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// AllDataColumnsAvailable returns true if we have all datacolumns for corresponding indices.
|
||||
func (s BlobStorageSummary) AllDataColumnsAvailable(indices map[uint64]bool) bool {
|
||||
if uint64(len(indices)) > fieldparams.NumberOfColumns {
|
||||
return false
|
||||
}
|
||||
|
||||
for indice := range indices {
|
||||
if !s.mask[indice] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// BlobStorageSummarizer can be used to receive a summary of metadata about blobs on disk for a given root.
|
||||
// The BlobStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type BlobStorageSummarizer interface {
|
||||
@@ -92,12 +68,9 @@ func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) ensure(key [32]byte, slot primitives.Slot, idx uint64) error {
|
||||
// TODO: Separate blob index checks from data column index checks
|
||||
/*
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
*/
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v := s.cache[key]
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
)
|
||||
|
||||
func TestSlotByRoot_Summary(t *testing.T) {
|
||||
t.Skip("Use new test for data columns")
|
||||
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
|
||||
firstSet[0] = true
|
||||
lastSet[len(lastSet)-1] = true
|
||||
@@ -149,108 +148,3 @@ func TestAllAvailable(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasDataColumnIndex(t *testing.T) {
|
||||
storedIndices := map[uint64]bool{
|
||||
1: true,
|
||||
3: true,
|
||||
5: true,
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
idx uint64
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "index is too high",
|
||||
idx: fieldparams.NumberOfColumns,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "non existing index",
|
||||
idx: 2,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "existing index",
|
||||
idx: 3,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
var mask blobIndexMask
|
||||
|
||||
for idx := range storedIndices {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
sum := BlobStorageSummary{mask: mask}
|
||||
require.Equal(t, c.expected, sum.HasDataColumnIndex(c.idx))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllDataColumnAvailable(t *testing.T) {
|
||||
tooManyColumns := make(map[uint64]bool, fieldparams.NumberOfColumns+1)
|
||||
for i := uint64(0); i < fieldparams.NumberOfColumns+1; i++ {
|
||||
tooManyColumns[i] = true
|
||||
}
|
||||
|
||||
columns346 := map[uint64]bool{
|
||||
3: true,
|
||||
4: true,
|
||||
6: true,
|
||||
}
|
||||
|
||||
columns36 := map[uint64]bool{
|
||||
3: true,
|
||||
6: true,
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
storedIndices map[uint64]bool
|
||||
testedIndices map[uint64]bool
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "no tested indices",
|
||||
storedIndices: columns346,
|
||||
testedIndices: map[uint64]bool{},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "too many tested indices",
|
||||
storedIndices: columns346,
|
||||
testedIndices: tooManyColumns,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "not all tested indices are stored",
|
||||
storedIndices: columns36,
|
||||
testedIndices: columns346,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "all tested indices are stored",
|
||||
storedIndices: columns346,
|
||||
testedIndices: columns36,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
var mask blobIndexMask
|
||||
|
||||
for idx := range c.storedIndices {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
sum := BlobStorageSummary{mask: mask}
|
||||
require.Equal(t, c.expected, sum.AllDataColumnsAvailable(c.testedIndices))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,10 +23,10 @@ import (
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// Used to represent errors for inconsistent slot ranges.
|
||||
// used to represent errors for inconsistent slot ranges.
|
||||
var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
|
||||
|
||||
// Block retrieval by root. Return nil if block is not found.
|
||||
// Block retrieval by root.
|
||||
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
|
||||
defer span.End()
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -140,6 +139,7 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
|
||||
}
|
||||
multipleEncs[i] = stateBytes
|
||||
}
|
||||
log.Infof("Total serialization: %s", time.Since(startTime).String())
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateBucket)
|
||||
@@ -604,14 +604,14 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
|
||||
// marshal versioned state from struct type down to bytes.
|
||||
func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, error) {
|
||||
switch st.Version() {
|
||||
case version.Phase0:
|
||||
switch st.ToProtoUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconState)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
return encode(ctx, rState)
|
||||
case version.Altair:
|
||||
case *ethpb.BeaconStateAltair:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateAltair)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
@@ -624,7 +624,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(altairKey, rawObj...)), nil
|
||||
case version.Bellatrix:
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateBellatrix)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
@@ -637,7 +637,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(bellatrixKey, rawObj...)), nil
|
||||
case version.Capella:
|
||||
case *ethpb.BeaconStateCapella:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateCapella)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
@@ -650,7 +650,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
|
||||
case version.Deneb:
|
||||
case *ethpb.BeaconStateDeneb:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
@@ -663,7 +663,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
|
||||
case version.Electra:
|
||||
case *ethpb.BeaconStateElectra:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
|
||||
@@ -688,7 +688,7 @@ func decodeSlasherChunk(enc []byte) ([]uint16, error) {
|
||||
// Encode attestation record to bytes.
|
||||
// The output encoded attestation record consists in the signing root concatenated with the compressed attestation record.
|
||||
func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byte, error) {
|
||||
if att == nil || att.IndexedAttestation == nil || att.IndexedAttestation.IsNil() {
|
||||
if att == nil || att.IndexedAttestation == nil {
|
||||
return []byte{}, errors.New("nil proposal record")
|
||||
}
|
||||
|
||||
|
||||
@@ -623,7 +623,13 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the sidecar KZG proof
|
||||
v := s.blobVerifier(roBlob, verification.ELMemPoolRequirements)
|
||||
if err := v.SidecarKzgProofVerified(); err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify KZG proof for sidecar")
|
||||
continue
|
||||
}
|
||||
|
||||
verifiedBlob, err := v.VerifiedROBlob()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify RO blob")
|
||||
|
||||
@@ -108,7 +108,7 @@ func testBlindedBlockFixtures(t *testing.T) *blindedBlockFixtures {
|
||||
}
|
||||
|
||||
func TestPayloadBodiesViaUnblinder(t *testing.T) {
|
||||
defer util.HackElectraEIP7549Maxuint(t)()
|
||||
defer util.HackElectraMaxuint(t)()
|
||||
fx := testBlindedBlockFixtures(t)
|
||||
t.Run("mix of non-empty and empty", func(t *testing.T) {
|
||||
cli, srv := newMockEngine(t)
|
||||
@@ -145,7 +145,7 @@ func TestPayloadBodiesViaUnblinder(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFixtureEquivalence(t *testing.T) {
|
||||
defer util.HackElectraEIP7549Maxuint(t)()
|
||||
defer util.HackElectraMaxuint(t)()
|
||||
fx := testBlindedBlockFixtures(t)
|
||||
t.Run("full and blinded block equivalence", func(t *testing.T) {
|
||||
testAssertReconstructedEquivalent(t, fx.denebBlock.blinded.block, fx.denebBlock.full)
|
||||
@@ -248,7 +248,7 @@ func TestComputeRanges(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReconstructBlindedBlockBatchFallbackToRange(t *testing.T) {
|
||||
defer util.HackElectraEIP7549Maxuint(t)()
|
||||
defer util.HackElectraMaxuint(t)()
|
||||
ctx := context.Background()
|
||||
t.Run("fallback fails", func(t *testing.T) {
|
||||
cli, srv := newMockEngine(t)
|
||||
@@ -334,7 +334,7 @@ func TestReconstructBlindedBlockBatchFallbackToRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReconstructBlindedBlockBatchDenebAndElectra(t *testing.T) {
|
||||
defer util.HackElectraEIP7549Maxuint(t)()
|
||||
defer util.HackElectraMaxuint(t)()
|
||||
t.Run("deneb and electra", func(t *testing.T) {
|
||||
cli, srv := newMockEngine(t)
|
||||
fx := testBlindedBlockFixtures(t)
|
||||
|
||||
@@ -53,7 +53,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
// Only reorg blocks that arrive late
|
||||
early, err := head.arrivedEarly(f.store.genesisTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not check if block arrived early")
|
||||
log.WithError(err).Error("could not check if block arrived early")
|
||||
return
|
||||
}
|
||||
if early {
|
||||
|
||||
@@ -192,13 +192,20 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
|
||||
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen)
|
||||
|
||||
pa := peers.NewAssigner(beacon.fetchP2P().Peers(), beacon.forkChoicer)
|
||||
|
||||
beacon.BackfillOpts = append(
|
||||
beacon.BackfillOpts,
|
||||
backfill.WithVerifierWaiter(beacon.verifyInitWaiter),
|
||||
backfill.WithInitSyncWaiter(initSyncWaiter(ctx, beacon.initialSyncComplete)),
|
||||
)
|
||||
|
||||
if err := registerServices(cliCtx, beacon, synchronizer, bfs); err != nil {
|
||||
bf, err := backfill.NewService(ctx, bfs, beacon.BlobStorage, beacon.clockWaiter, beacon.fetchP2P(), pa, beacon.BackfillOpts...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error initializing backfill service")
|
||||
}
|
||||
|
||||
if err := registerServices(cliCtx, beacon, synchronizer, bf, bfs); err != nil {
|
||||
return nil, errors.Wrap(err, "could not register services")
|
||||
}
|
||||
|
||||
@@ -285,6 +292,11 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
return nil, errors.Wrap(err, "could not start slashing DB")
|
||||
}
|
||||
|
||||
log.Debugln("Registering P2P Service")
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
return nil, errors.Wrap(err, "could not register P2P service")
|
||||
}
|
||||
|
||||
bfs, err := backfill.NewUpdater(ctx, beacon.db)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create backfill updater")
|
||||
@@ -303,15 +315,9 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
return bfs, nil
|
||||
}
|
||||
|
||||
func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *startup.ClockSynchronizer, bfs *backfill.Store) error {
|
||||
log.Debugln("Registering P2P Service")
|
||||
if err := beacon.registerP2P(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not register P2P service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Backfill Service")
|
||||
if err := beacon.RegisterBackfillService(cliCtx, bfs); err != nil {
|
||||
return errors.Wrap(err, "could not register Back Fill service")
|
||||
func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *startup.ClockSynchronizer, bf *backfill.Service, bfs *backfill.Store) error {
|
||||
if err := beacon.services.RegisterService(bf); err != nil {
|
||||
return errors.Wrap(err, "could not register backfill service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering POW Chain Service")
|
||||
@@ -969,7 +975,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -1131,16 +1136,6 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
|
||||
return b.services.RegisterService(svc)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
|
||||
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error initializing backfill service")
|
||||
}
|
||||
|
||||
return b.services.RegisterService(bf)
|
||||
}
|
||||
|
||||
func hasNetworkFlag(cliCtx *cli.Context) bool {
|
||||
for _, flag := range features.NetworkFlags {
|
||||
for _, name := range flag.Names() {
|
||||
|
||||
@@ -49,12 +49,12 @@ func TestKV_Aggregated_SaveAggregatedAttestation(t *testing.T) {
|
||||
{
|
||||
name: "nil attestation",
|
||||
att: nil,
|
||||
wantErrString: "attestation is nil",
|
||||
wantErrString: "attestation can't be nil",
|
||||
},
|
||||
{
|
||||
name: "nil attestation data",
|
||||
att: ðpb.Attestation{},
|
||||
wantErrString: "attestation is nil",
|
||||
wantErrString: "attestation's data can't be nil",
|
||||
},
|
||||
{
|
||||
name: "not aggregated",
|
||||
@@ -206,7 +206,7 @@ func TestKV_Aggregated_AggregatedAttestations(t *testing.T) {
|
||||
func TestKV_Aggregated_DeleteAggregatedAttestation(t *testing.T) {
|
||||
t.Run("nil attestation", func(t *testing.T) {
|
||||
cache := NewAttCaches()
|
||||
assert.ErrorContains(t, "attestation is nil", cache.DeleteAggregatedAttestation(nil))
|
||||
assert.ErrorContains(t, "attestation can't be nil", cache.DeleteAggregatedAttestation(nil))
|
||||
att := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b10101}, Data: ðpb.AttestationData{Slot: 2}})
|
||||
assert.NoError(t, cache.DeleteAggregatedAttestation(att))
|
||||
})
|
||||
@@ -288,7 +288,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
name: "nil attestation",
|
||||
input: nil,
|
||||
want: false,
|
||||
err: errors.New("is nil"),
|
||||
err: errors.New("can't be nil"),
|
||||
},
|
||||
{
|
||||
name: "nil attestation data",
|
||||
@@ -296,7 +296,7 @@ func TestKV_Aggregated_HasAggregatedAttestation(t *testing.T) {
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
},
|
||||
want: false,
|
||||
err: errors.New("is nil"),
|
||||
err: errors.New("can't be nil"),
|
||||
},
|
||||
{
|
||||
name: "empty cache aggregated",
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
// SaveBlockAttestation saves an block attestation in cache.
|
||||
func (c *AttCaches) SaveBlockAttestation(att ethpb.Att) error {
|
||||
if att == nil || att.IsNil() {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -53,9 +53,10 @@ func (c *AttCaches) BlockAttestations() []ethpb.Att {
|
||||
|
||||
// DeleteBlockAttestation deletes a block attestation in cache.
|
||||
func (c *AttCaches) DeleteBlockAttestation(att ethpb.Att) error {
|
||||
if att == nil || att.IsNil() {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create attestation ID")
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
// SaveForkchoiceAttestation saves an forkchoice attestation in cache.
|
||||
func (c *AttCaches) SaveForkchoiceAttestation(att ethpb.Att) error {
|
||||
if att == nil || att.IsNil() {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func (c *AttCaches) ForkchoiceAttestations() []ethpb.Att {
|
||||
|
||||
// DeleteForkchoiceAttestation deletes a forkchoice attestation in cache.
|
||||
func (c *AttCaches) DeleteForkchoiceAttestation(att ethpb.Att) error {
|
||||
if att == nil || att.IsNil() {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
// SaveUnaggregatedAttestation saves an unaggregated attestation in cache.
|
||||
func (c *AttCaches) SaveUnaggregatedAttestation(att ethpb.Att) error {
|
||||
if att == nil || att.IsNil() {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
if helpers.IsAggregated(att) {
|
||||
@@ -130,10 +130,9 @@ func (c *AttCaches) UnaggregatedAttestationsBySlotIndexElectra(
|
||||
|
||||
// DeleteUnaggregatedAttestation deletes the unaggregated attestations in cache.
|
||||
func (c *AttCaches) DeleteUnaggregatedAttestation(att ethpb.Att) error {
|
||||
if att == nil || att.IsNil() {
|
||||
if att == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if helpers.IsAggregated(att) {
|
||||
return errors.New("attestation is aggregated")
|
||||
}
|
||||
@@ -162,7 +161,7 @@ func (c *AttCaches) DeleteSeenUnaggregatedAttestations() (int, error) {
|
||||
|
||||
count := 0
|
||||
for r, att := range c.unAggregatedAtt {
|
||||
if att == nil || att.IsNil() || helpers.IsAggregated(att) {
|
||||
if att == nil || helpers.IsAggregated(att) {
|
||||
continue
|
||||
}
|
||||
if seen, err := c.hasSeenBit(att); err == nil && seen {
|
||||
|
||||
@@ -7,7 +7,6 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/mock",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
|
||||
@@ -3,17 +3,13 @@ package mock
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var _ attestations.Pool = &PoolMock{}
|
||||
|
||||
// PoolMock --
|
||||
type PoolMock struct {
|
||||
AggregatedAtts []ethpb.Att
|
||||
UnaggregatedAtts []ethpb.Att
|
||||
AggregatedAtts []*ethpb.Attestation
|
||||
}
|
||||
|
||||
// AggregateUnaggregatedAttestations --
|
||||
@@ -27,18 +23,18 @@ func (*PoolMock) AggregateUnaggregatedAttestationsBySlotIndex(_ context.Context,
|
||||
}
|
||||
|
||||
// SaveAggregatedAttestation --
|
||||
func (*PoolMock) SaveAggregatedAttestation(_ ethpb.Att) error {
|
||||
func (*PoolMock) SaveAggregatedAttestation(_ *ethpb.Attestation) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveAggregatedAttestations --
|
||||
func (m *PoolMock) SaveAggregatedAttestations(atts []ethpb.Att) error {
|
||||
func (m *PoolMock) SaveAggregatedAttestations(atts []*ethpb.Attestation) error {
|
||||
m.AggregatedAtts = append(m.AggregatedAtts, atts...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AggregatedAttestations --
|
||||
func (m *PoolMock) AggregatedAttestations() []ethpb.Att {
|
||||
func (m *PoolMock) AggregatedAttestations() []*ethpb.Attestation {
|
||||
return m.AggregatedAtts
|
||||
}
|
||||
|
||||
@@ -47,18 +43,13 @@ func (*PoolMock) AggregatedAttestationsBySlotIndex(_ context.Context, _ primitiv
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// AggregatedAttestationsBySlotIndexElectra --
|
||||
func (*PoolMock) AggregatedAttestationsBySlotIndexElectra(_ context.Context, _ primitives.Slot, _ primitives.CommitteeIndex) []*ethpb.AttestationElectra {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// DeleteAggregatedAttestation --
|
||||
func (*PoolMock) DeleteAggregatedAttestation(_ ethpb.Att) error {
|
||||
func (*PoolMock) DeleteAggregatedAttestation(_ *ethpb.Attestation) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// HasAggregatedAttestation --
|
||||
func (*PoolMock) HasAggregatedAttestation(_ ethpb.Att) (bool, error) {
|
||||
func (*PoolMock) HasAggregatedAttestation(_ *ethpb.Attestation) (bool, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@@ -68,19 +59,18 @@ func (*PoolMock) AggregatedAttestationCount() int {
|
||||
}
|
||||
|
||||
// SaveUnaggregatedAttestation --
|
||||
func (*PoolMock) SaveUnaggregatedAttestation(_ ethpb.Att) error {
|
||||
func (*PoolMock) SaveUnaggregatedAttestation(_ *ethpb.Attestation) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveUnaggregatedAttestations --
|
||||
func (m *PoolMock) SaveUnaggregatedAttestations(atts []ethpb.Att) error {
|
||||
m.UnaggregatedAtts = append(m.UnaggregatedAtts, atts...)
|
||||
return nil
|
||||
func (*PoolMock) SaveUnaggregatedAttestations(_ []*ethpb.Attestation) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// UnaggregatedAttestations --
|
||||
func (m *PoolMock) UnaggregatedAttestations() ([]ethpb.Att, error) {
|
||||
return m.UnaggregatedAtts, nil
|
||||
func (*PoolMock) UnaggregatedAttestations() ([]*ethpb.Attestation, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// UnaggregatedAttestationsBySlotIndex --
|
||||
@@ -88,13 +78,8 @@ func (*PoolMock) UnaggregatedAttestationsBySlotIndex(_ context.Context, _ primit
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// UnaggregatedAttestationsBySlotIndexElectra --
|
||||
func (*PoolMock) UnaggregatedAttestationsBySlotIndexElectra(_ context.Context, _ primitives.Slot, _ primitives.CommitteeIndex) []*ethpb.AttestationElectra {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// DeleteUnaggregatedAttestation --
|
||||
func (*PoolMock) DeleteUnaggregatedAttestation(_ ethpb.Att) error {
|
||||
func (*PoolMock) DeleteUnaggregatedAttestation(_ *ethpb.Attestation) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
@@ -109,42 +94,42 @@ func (*PoolMock) UnaggregatedAttestationCount() int {
|
||||
}
|
||||
|
||||
// SaveBlockAttestation --
|
||||
func (*PoolMock) SaveBlockAttestation(_ ethpb.Att) error {
|
||||
func (*PoolMock) SaveBlockAttestation(_ *ethpb.Attestation) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveBlockAttestations --
|
||||
func (*PoolMock) SaveBlockAttestations(_ []ethpb.Att) error {
|
||||
func (*PoolMock) SaveBlockAttestations(_ []*ethpb.Attestation) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// BlockAttestations --
|
||||
func (*PoolMock) BlockAttestations() []ethpb.Att {
|
||||
func (*PoolMock) BlockAttestations() []*ethpb.Attestation {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// DeleteBlockAttestation --
|
||||
func (*PoolMock) DeleteBlockAttestation(_ ethpb.Att) error {
|
||||
func (*PoolMock) DeleteBlockAttestation(_ *ethpb.Attestation) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveForkchoiceAttestation --
|
||||
func (*PoolMock) SaveForkchoiceAttestation(_ ethpb.Att) error {
|
||||
func (*PoolMock) SaveForkchoiceAttestation(_ *ethpb.Attestation) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// SaveForkchoiceAttestations --
|
||||
func (*PoolMock) SaveForkchoiceAttestations(_ []ethpb.Att) error {
|
||||
func (*PoolMock) SaveForkchoiceAttestations(_ []*ethpb.Attestation) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// ForkchoiceAttestations --
|
||||
func (*PoolMock) ForkchoiceAttestations() []ethpb.Att {
|
||||
func (*PoolMock) ForkchoiceAttestations() []*ethpb.Attestation {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// DeleteForkchoiceAttestation --
|
||||
func (*PoolMock) DeleteForkchoiceAttestation(_ ethpb.Att) error {
|
||||
func (*PoolMock) DeleteForkchoiceAttestation(_ *ethpb.Attestation) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ go_library(
|
||||
"broadcaster.go",
|
||||
"config.go",
|
||||
"connection_gater.go",
|
||||
"custody.go",
|
||||
"dial_relay_node.go",
|
||||
"discovery.go",
|
||||
"doc.go",
|
||||
@@ -18,6 +17,7 @@ go_library(
|
||||
"handshake.go",
|
||||
"info.go",
|
||||
"interfaces.go",
|
||||
"iterator.go",
|
||||
"log.go",
|
||||
"message_id.go",
|
||||
"monitoring.go",
|
||||
@@ -46,7 +46,6 @@ go_library(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -57,7 +56,6 @@ go_library(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -77,8 +75,6 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_btcsuite_btcd_btcec_v2//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
@@ -119,7 +115,6 @@ go_test(
|
||||
"addr_factory_test.go",
|
||||
"broadcaster_test.go",
|
||||
"connection_gater_test.go",
|
||||
"custody_test.go",
|
||||
"dial_relay_node_test.go",
|
||||
"discovery_test.go",
|
||||
"fork_test.go",
|
||||
@@ -141,11 +136,9 @@ go_test(
|
||||
flaky = True,
|
||||
tags = ["requires-network"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -158,7 +151,6 @@ go_test(
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
@@ -169,12 +161,13 @@ go_test(
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
@@ -97,12 +96,7 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastAttestation(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
att ethpb.Att,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att, forkDigest [4]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -158,7 +152,7 @@ func (s *Service) internalBroadcastAttestation(
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -234,12 +228,7 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastBlob(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
blobSidecar *ethpb.BlobSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -254,7 +243,7 @@ func (s *Service) internalBroadcastBlob(
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
blobSidecarBroadcastAttempts.Inc()
|
||||
blobSidecarCommitteeBroadcastAttempts.Inc()
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
@@ -263,7 +252,7 @@ func (s *Service) internalBroadcastBlob(
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
blobSidecarBroadcasts.Inc()
|
||||
blobSidecarCommitteeBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
@@ -279,120 +268,6 @@ func (s *Service) internalBroadcastBlob(
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
// TODO: Add tests
|
||||
func (s *Service) BroadcastDataColumn(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
if dataColumnSidecar == nil {
|
||||
return errors.Errorf("attempted to broadcast nil data column sidecar at subnet %d", columnSubnet)
|
||||
}
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "current fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumn(ctx, root, columnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumn(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Inc()
|
||||
|
||||
// Clear parent context / deadline.
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
|
||||
// Define a one-slot length context timeout.
|
||||
oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
// Build the topic corresponding to this column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
|
||||
|
||||
// Check if we have peers with this subnet.
|
||||
hasPeer := func() bool {
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
defer s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
return s.hasPeerWithSubnet(topic)
|
||||
}()
|
||||
|
||||
// If no peers are found, attempt to find peers with this subnet.
|
||||
if !hasPeer {
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
ok, err := s.FindPeersWithSubnet(ctx, topic, columnSubnet, 1 /*threshold*/)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "find peers for subnet")
|
||||
}
|
||||
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast data column sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
|
||||
header := dataColumnSidecar.SignedBlockHeader.GetHeader()
|
||||
slot := header.GetSlot()
|
||||
|
||||
slotStartTime, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to convert slot to time")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"timeSinceSlotStart": time.Since(slotStartTime),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnSubnet": columnSubnet,
|
||||
}).Debug("Broadcasted data column sidecar")
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
dataColumnSidecarBroadcasts.Inc()
|
||||
}
|
||||
|
||||
// method to broadcast messages to other peers in our gossip mesh.
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
@@ -422,18 +297,14 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
return nil
|
||||
}
|
||||
|
||||
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -12,16 +12,11 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -29,6 +24,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestService_Broadcast(t *testing.T) {
|
||||
@@ -229,11 +225,11 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Use smaller batch size for testing.
|
||||
currentBatchSize := batchSize
|
||||
batchSize = 2
|
||||
// Use shorter period for testing.
|
||||
currentPeriod := pollingPeriod
|
||||
pollingPeriod = 1 * time.Second
|
||||
defer func() {
|
||||
batchSize = currentBatchSize
|
||||
pollingPeriod = currentPeriod
|
||||
}()
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
@@ -523,71 +519,3 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
require.NoError(t, p.BroadcastBlob(ctx, subnet, blobSidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
require.NoError(t, kzg.Start())
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
require.NotEqual(t, 0, len(p1.BHost.Network().Peers()), "No peers")
|
||||
|
||||
p := &Service{
|
||||
host: p1.BHost,
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
}
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockElectra())
|
||||
require.NoError(t, err)
|
||||
blobs := make([]kzg.Blob, fieldparams.MaxBlobsPerBlock)
|
||||
sidecars, err := peerdas.DataColumnSidecars(b, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
sidecar := sidecars[0]
|
||||
subnet := uint64(0)
|
||||
topic := DataColumnSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(sidecar)] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
msg, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ðpb.DataColumnSidecar{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(msg.Data, result))
|
||||
require.DeepEqual(t, result, sidecar)
|
||||
}(t)
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
ctx := context.Background()
|
||||
var root [fieldparams.RootLength]byte
|
||||
require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastDataColumn(ctx, root, subnet, nil))
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
require.NoError(t, p.BroadcastDataColumn(ctx, root, subnet, sidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ func (*Service) InterceptPeerDial(_ peer.ID) (allow bool) {
|
||||
// multiaddr for the given peer.
|
||||
func (s *Service) InterceptAddrDial(pid peer.ID, m multiaddr.Multiaddr) (allow bool) {
|
||||
// Disallow bad peers from dialing in.
|
||||
if s.peers.IsBad(pid) != nil {
|
||||
if s.peers.IsBad(pid) {
|
||||
return false
|
||||
}
|
||||
return filterConnections(s.addrFilter, m)
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestPeer_AtMaxLimit(t *testing.T) {
|
||||
}()
|
||||
|
||||
for i := 0; i < highWatermarkBuffer; i++ {
|
||||
addPeer(t, s.peers, peers.Connected, false)
|
||||
addPeer(t, s.peers, peers.PeerConnected, false)
|
||||
}
|
||||
|
||||
// create alternate host
|
||||
@@ -159,7 +159,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
|
||||
inboundLimit += 1
|
||||
// Add in up to inbound peer limit.
|
||||
for i := 0; i < int(inboundLimit); i++ {
|
||||
addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
valid = s.InterceptAccept(&maEndpoints{raddr: multiAddress})
|
||||
if valid {
|
||||
|
||||
@@ -1,146 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
// DataColumnsAdmissibleCustodyPeers returns a list of peers that custody a super set of the local node's custody columns.
|
||||
func (s *Service) DataColumnsAdmissibleCustodyPeers(peers []peer.ID) ([]peer.ID, error) {
|
||||
localCustodySubnetCount := peerdas.CustodySubnetCount()
|
||||
return s.dataColumnsAdmissiblePeers(peers, localCustodySubnetCount)
|
||||
}
|
||||
|
||||
// DataColumnsAdmissibleSubnetSamplingPeers returns a list of peers that custody a super set of the local node's sampling columns.
|
||||
func (s *Service) DataColumnsAdmissibleSubnetSamplingPeers(peers []peer.ID) ([]peer.ID, error) {
|
||||
localSubnetSamplingSize := peerdas.SubnetSamplingSize()
|
||||
return s.dataColumnsAdmissiblePeers(peers, localSubnetSamplingSize)
|
||||
}
|
||||
|
||||
// dataColumnsAdmissiblePeers computes the first columns of the local node corresponding to `subnetCount`, then
|
||||
// filters out `peers` that do not custody a super set of these columns.
|
||||
func (s *Service) dataColumnsAdmissiblePeers(peers []peer.ID, subnetCount uint64) ([]peer.ID, error) {
|
||||
// Get the total number of columns.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Retrieve the local node ID.
|
||||
localNodeId := s.NodeID()
|
||||
|
||||
// Retrieve the needed columns.
|
||||
neededColumns, err := peerdas.CustodyColumns(localNodeId, subnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody columns for local node")
|
||||
}
|
||||
|
||||
// Get the number of needed columns.
|
||||
localneededColumnsCount := uint64(len(neededColumns))
|
||||
|
||||
// Find the valid peers.
|
||||
validPeers := make([]peer.ID, 0, len(peers))
|
||||
|
||||
loop:
|
||||
for _, pid := range peers {
|
||||
// Get the custody subnets count of the remote peer.
|
||||
remoteCustodySubnetCount := s.DataColumnsCustodyCountFromRemotePeer(pid)
|
||||
|
||||
// Get the remote node ID from the peer ID.
|
||||
remoteNodeID, err := ConvertPeerIDToNodeID(pid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "convert peer ID to node ID")
|
||||
}
|
||||
|
||||
// Get the custody columns of the remote peer.
|
||||
remoteCustodyColumns, err := peerdas.CustodyColumns(remoteNodeID, remoteCustodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
remoteCustodyColumnsCount := uint64(len(remoteCustodyColumns))
|
||||
|
||||
// If the remote peer custodies less columns than the local node needs, skip it.
|
||||
if remoteCustodyColumnsCount < localneededColumnsCount {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the remote peers custodies all the possible columns, add it to the list.
|
||||
if remoteCustodyColumnsCount == numberOfColumns {
|
||||
copiedId := pid
|
||||
validPeers = append(validPeers, copiedId)
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter out invalid peers.
|
||||
for c := range neededColumns {
|
||||
if !remoteCustodyColumns[c] {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
|
||||
copiedId := pid
|
||||
|
||||
// Add valid peer to list
|
||||
validPeers = append(validPeers, copiedId)
|
||||
}
|
||||
|
||||
return validPeers, nil
|
||||
}
|
||||
|
||||
func (s *Service) custodyCountFromRemotePeerEnr(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of subnets.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
record, err := s.peers.ENR(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
}).Debug("Failed to retrieve ENR for peer, defaulting to the default value")
|
||||
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
// Retrieve the custody subnets count from the ENR.
|
||||
custodyCount, err := peerdas.CustodyCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
}).Debug("Failed to retrieve custody count from ENR for peer, defaulting to the default value")
|
||||
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
|
||||
// DataColumnsCustodyCountFromRemotePeer retrieves the custody count from a remote peer.
|
||||
func (s *Service) DataColumnsCustodyCountFromRemotePeer(pid peer.ID) uint64 {
|
||||
// Try to get the custody count from the peer's metadata.
|
||||
metadata, err := s.peers.Metadata(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Debug("Failed to retrieve metadata for peer, defaulting to the ENR value")
|
||||
return s.custodyCountFromRemotePeerEnr(pid)
|
||||
}
|
||||
|
||||
// If the metadata is nil, default to the ENR value.
|
||||
if metadata == nil {
|
||||
log.WithField("peerID", pid).Debug("Metadata is nil, defaulting to the ENR value")
|
||||
return s.custodyCountFromRemotePeerEnr(pid)
|
||||
}
|
||||
|
||||
// Get the custody subnets count from the metadata.
|
||||
custodyCount := metadata.CustodySubnetCount()
|
||||
|
||||
// If the custody count is null, default to the ENR value.
|
||||
if custodyCount == 0 {
|
||||
log.WithField("peerID", pid).Debug("The custody count extracted from the metadata equals to 0, defaulting to the ENR value")
|
||||
return s.custodyCountFromRemotePeerEnr(pid)
|
||||
}
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
@@ -1,201 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
prysmNetwork "github.com/prysmaticlabs/prysm/v5/network"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func createPeer(t *testing.T, privateKeyOffset int, custodyCount uint64) (*enr.Record, peer.ID, *ecdsa.PrivateKey) {
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(privateKeyOffset + i)
|
||||
}
|
||||
|
||||
unmarshalledPrivateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
privateKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(unmarshalledPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
peerID, err := peer.IDFromPrivateKey(unmarshalledPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := &enr.Record{}
|
||||
record.Set(peerdas.Csc(custodyCount))
|
||||
record.Set(enode.Secp256k1(privateKey.PublicKey))
|
||||
|
||||
return record, peerID, privateKey
|
||||
}
|
||||
|
||||
func TestDataColumnsAdmissibleCustodyPeers(t *testing.T) {
|
||||
genesisValidatorRoot := make([]byte, 32)
|
||||
|
||||
for i := 0; i < 32; i++ {
|
||||
genesisValidatorRoot[i] = byte(i)
|
||||
}
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: genesisValidatorRoot,
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
}
|
||||
|
||||
ipAddrString, err := prysmNetwork.ExternalIPv4()
|
||||
require.NoError(t, err)
|
||||
ipAddr := net.ParseIP(ipAddrString)
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Peer 1 custodies exactly the same columns than us.
|
||||
// (We use the same keys pair than ours for simplicity)
|
||||
peer1Record, peer1ID, localPrivateKey := createPeer(t, 1, custodyRequirement)
|
||||
|
||||
// Peer 2 custodies all the columns.
|
||||
peer2Record, peer2ID, _ := createPeer(t, 2, dataColumnSidecarSubnetCount)
|
||||
|
||||
// Peer 3 custodies different columns than us (but the same count).
|
||||
// (We use the same public key than peer 2 for simplicity)
|
||||
peer3Record, peer3ID, _ := createPeer(t, 3, custodyRequirement)
|
||||
|
||||
// Peer 4 custodies less columns than us.
|
||||
peer4Record, peer4ID, _ := createPeer(t, 4, custodyRequirement-1)
|
||||
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return service.createListener(ipAddr, localPrivateKey)
|
||||
}
|
||||
|
||||
listener, err := newListener(createListener)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.dv5Listener = listener
|
||||
|
||||
service.peers.Add(peer1Record, peer1ID, nil, network.DirOutbound)
|
||||
service.peers.Add(peer2Record, peer2ID, nil, network.DirOutbound)
|
||||
service.peers.Add(peer3Record, peer3ID, nil, network.DirOutbound)
|
||||
service.peers.Add(peer4Record, peer4ID, nil, network.DirOutbound)
|
||||
|
||||
actual, err := service.DataColumnsAdmissibleCustodyPeers([]peer.ID{peer1ID, peer2ID, peer3ID, peer4ID})
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := []peer.ID{peer1ID, peer2ID}
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestDataColumnsCustodyCountFromRemotePeer(t *testing.T) {
|
||||
const (
|
||||
expectedENR uint64 = 7
|
||||
expectedMetadata uint64 = 8
|
||||
pid = "test-id"
|
||||
)
|
||||
|
||||
csc := peerdas.Csc(expectedENR)
|
||||
|
||||
// Define a nil record
|
||||
var nilRecord *enr.Record = nil
|
||||
|
||||
// Define an empty record (record with non `csc` entry)
|
||||
emptyRecord := &enr.Record{}
|
||||
|
||||
// Define a nominal record
|
||||
nominalRecord := &enr.Record{}
|
||||
nominalRecord.Set(csc)
|
||||
|
||||
// Define a metadata with zero custody.
|
||||
zeroMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodySubnetCount: 0,
|
||||
})
|
||||
|
||||
// Define a nominal metadata.
|
||||
nominalMetadata := wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodySubnetCount: expectedMetadata,
|
||||
})
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
record *enr.Record
|
||||
metadata metadata.Metadata
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "No metadata - No ENR",
|
||||
record: nilRecord,
|
||||
expected: params.BeaconConfig().CustodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "No metadata - Empty ENR",
|
||||
record: emptyRecord,
|
||||
expected: params.BeaconConfig().CustodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "No Metadata - ENR",
|
||||
record: nominalRecord,
|
||||
expected: expectedENR,
|
||||
},
|
||||
{
|
||||
name: "Metadata with 0 value - ENR",
|
||||
record: nominalRecord,
|
||||
metadata: zeroMetadata,
|
||||
expected: expectedENR,
|
||||
},
|
||||
{
|
||||
name: "Metadata - ENR",
|
||||
record: nominalRecord,
|
||||
metadata: nominalMetadata,
|
||||
expected: expectedMetadata,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create peers status.
|
||||
peers := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
|
||||
// Set the metadata.
|
||||
if tc.metadata != nil {
|
||||
peers.SetMetadata(pid, tc.metadata)
|
||||
}
|
||||
|
||||
// Add a new peer with the record.
|
||||
peers.Add(tc.record, pid, nil, network.DirOutbound)
|
||||
|
||||
// Create a new service.
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
metaData: tc.metadata,
|
||||
}
|
||||
|
||||
// Retrieve the custody count from the remote peer.
|
||||
actual := service.DataColumnsCustodyCountFromRemotePeer(pid)
|
||||
|
||||
// Verify the result.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -15,10 +15,7 @@ import (
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -50,12 +47,10 @@ const (
|
||||
udp6
|
||||
)
|
||||
|
||||
const quickProtocolEnrKey = "quic"
|
||||
|
||||
type quicProtocol uint16
|
||||
|
||||
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
|
||||
func (quicProtocol) ENRKey() string { return quickProtocolEnrKey }
|
||||
func (quicProtocol) ENRKey() string { return "quic" }
|
||||
|
||||
type listenerWrapper struct {
|
||||
mu sync.RWMutex
|
||||
@@ -138,169 +133,68 @@ func (l *listenerWrapper) RebootListener() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshPersistentSubnets checks that we are tracking our local persistent subnets for a variety of gossip topics.
|
||||
// This routine checks for our attestation, sync committee and data column subnets and updates them if they have
|
||||
// been rotated.
|
||||
func (s *Service) RefreshPersistentSubnets() {
|
||||
// Return early if discv5 service isn't running.
|
||||
// RefreshENR uses an epoch to refresh the enr entry for our node
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
func (s *Service) RefreshENR() {
|
||||
// return early if discv5 isn't running
|
||||
if s.dv5Listener == nil || !s.isInitialized() {
|
||||
return
|
||||
}
|
||||
|
||||
// Get the current epoch.
|
||||
currentSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Get our node ID.
|
||||
nodeID := s.dv5Listener.LocalNode().ID()
|
||||
|
||||
// Get our node record.
|
||||
record := s.dv5Listener.Self().Record()
|
||||
|
||||
// Get the version of our metadata.
|
||||
metadataVersion := s.Metadata().Version()
|
||||
|
||||
// Initialize persistent subnets.
|
||||
if err := initializePersistentSubnets(nodeID, currentEpoch); err != nil {
|
||||
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
|
||||
if err := initializePersistentSubnets(s.dv5Listener.LocalNode().ID(), currEpoch); err != nil {
|
||||
log.WithError(err).Error("Could not initialize persistent subnets")
|
||||
return
|
||||
}
|
||||
|
||||
// Initialize persistent column subnets.
|
||||
if err := initializePersistentColumnSubnets(nodeID); err != nil {
|
||||
log.WithError(err).Error("Could not initialize persistent column subnets")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the current attestation subnet bitfield.
|
||||
bitV := bitfield.NewBitvector64()
|
||||
attestationCommittees := cache.SubnetIDs.GetAllSubnets()
|
||||
for _, idx := range attestationCommittees {
|
||||
committees := cache.SubnetIDs.GetAllSubnets()
|
||||
for _, idx := range committees {
|
||||
bitV.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
// Get the attestation subnet bitfield we store in our record.
|
||||
inRecordBitV, err := attBitvector(record)
|
||||
currentBitV, err := attBitvector(s.dv5Listener.Self().Record())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve att bitfield")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the attestation subnet bitfield in our metadata.
|
||||
inMetadataBitV := s.Metadata().AttnetsBitfield()
|
||||
|
||||
// Is our attestation bitvector record up to date?
|
||||
isBitVUpToDate := bytes.Equal(bitV, inRecordBitV) && bytes.Equal(bitV, inMetadataBitV)
|
||||
|
||||
// Compare current epoch with Altair fork epoch
|
||||
// Compare current epoch with our fork epochs
|
||||
altairForkEpoch := params.BeaconConfig().AltairForkEpoch
|
||||
|
||||
if currentEpoch < altairForkEpoch {
|
||||
switch {
|
||||
case currEpoch < altairForkEpoch:
|
||||
// Phase 0 behaviour.
|
||||
if isBitVUpToDate {
|
||||
// Return early if bitfield hasn't changed.
|
||||
if bytes.Equal(bitV, currentBitV) {
|
||||
// return early if bitfield hasn't changed
|
||||
return
|
||||
}
|
||||
|
||||
// Some data changed. Update the record and the metadata.
|
||||
s.updateSubnetRecordWithMetadata(bitV)
|
||||
|
||||
// Ping all peers.
|
||||
s.pingPeersAndLogEnr()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Get the current sync subnet bitfield.
|
||||
bitS := bitfield.Bitvector4{byte(0x00)}
|
||||
syncCommittees := cache.SyncSubnetIDs.GetAllSubnets(currentEpoch)
|
||||
for _, idx := range syncCommittees {
|
||||
bitS.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
// Get the sync subnet bitfield we store in our record.
|
||||
inRecordBitS, err := syncBitvector(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve sync bitfield")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the sync subnet bitfield in our metadata.
|
||||
currentBitSInMetadata := s.Metadata().SyncnetsBitfield()
|
||||
|
||||
isBitSUpToDate := bytes.Equal(bitS, inRecordBitS) && bytes.Equal(bitS, currentBitSInMetadata)
|
||||
|
||||
// Compare current epoch with EIP-7594 fork epoch.
|
||||
eip7594ForkEpoch := params.BeaconConfig().Eip7594ForkEpoch
|
||||
|
||||
if currentEpoch < eip7594ForkEpoch {
|
||||
// Altair behaviour.
|
||||
if metadataVersion == version.Altair && isBitVUpToDate && isBitSUpToDate {
|
||||
// Nothing to do, return early.
|
||||
default:
|
||||
// Retrieve sync subnets from application level
|
||||
// cache.
|
||||
bitS := bitfield.Bitvector4{byte(0x00)}
|
||||
committees = cache.SyncSubnetIDs.GetAllSubnets(currEpoch)
|
||||
for _, idx := range committees {
|
||||
bitS.SetBitAt(idx, true)
|
||||
}
|
||||
currentBitS, err := syncBitvector(s.dv5Listener.Self().Record())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve sync bitfield")
|
||||
return
|
||||
}
|
||||
if bytes.Equal(bitV, currentBitV) && bytes.Equal(bitS, currentBitS) &&
|
||||
s.Metadata().Version() == version.Altair {
|
||||
// return early if bitfields haven't changed
|
||||
return
|
||||
}
|
||||
|
||||
// Some data have changed, update our record and metadata.
|
||||
s.updateSubnetRecordWithMetadataV2(bitV, bitS)
|
||||
|
||||
// Ping all peers to inform them of new metadata
|
||||
s.pingPeersAndLogEnr()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Get the current custody subnet count.
|
||||
custodySubnetCount := peerdas.CustodySubnetCount()
|
||||
|
||||
// Get the custody subnet count we store in our record.
|
||||
inRecordCustodySubnetCount, err := peerdas.CustodyCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody subnet count")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the custody subnet count in our metadata.
|
||||
inMetadataCustodySubnetCount := s.Metadata().CustodySubnetCount()
|
||||
|
||||
isCustodySubnetCountUpToDate := (custodySubnetCount == inRecordCustodySubnetCount && custodySubnetCount == inMetadataCustodySubnetCount)
|
||||
|
||||
if isBitVUpToDate && isBitSUpToDate && isCustodySubnetCountUpToDate {
|
||||
// Nothing to do, return early.
|
||||
return
|
||||
}
|
||||
|
||||
// Some data changed. Update the record and the metadata.
|
||||
s.updateSubnetRecordWithMetadataV3(bitV, bitS, custodySubnetCount)
|
||||
|
||||
// Ping all peers.
|
||||
s.pingPeersAndLogEnr()
|
||||
// ping all peers to inform them of new metadata
|
||||
s.pingPeers()
|
||||
}
|
||||
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
func (s *Service) listenForNewNodes() {
|
||||
const (
|
||||
minLogInterval = 1 * time.Minute
|
||||
thresholdLimit = 5
|
||||
)
|
||||
|
||||
peersSummary := func(threshold uint) (uint, uint) {
|
||||
// Retrieve how many active peers we have.
|
||||
activePeers := s.Peers().Active()
|
||||
activePeerCount := uint(len(activePeers))
|
||||
|
||||
// Compute how many peers we are missing to reach the threshold.
|
||||
if activePeerCount >= threshold {
|
||||
return activePeerCount, 0
|
||||
}
|
||||
|
||||
missingPeerCount := threshold - activePeerCount
|
||||
|
||||
return activePeerCount, missingPeerCount
|
||||
}
|
||||
|
||||
var lastLogTime time.Time
|
||||
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
iterator := filterNodes(s.ctx, s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
defer iterator.Close()
|
||||
connectivityTicker := time.NewTicker(1 * time.Minute)
|
||||
thresholdCount := 0
|
||||
@@ -309,31 +203,25 @@ func (s *Service) listenForNewNodes() {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
|
||||
case <-connectivityTicker.C:
|
||||
// Skip the connectivity check if not enabled.
|
||||
if !features.Get().EnableDiscoveryReboot {
|
||||
continue
|
||||
}
|
||||
|
||||
if !s.isBelowOutboundPeerThreshold() {
|
||||
// Reset counter if we are beyond the threshold
|
||||
thresholdCount = 0
|
||||
continue
|
||||
}
|
||||
|
||||
thresholdCount++
|
||||
|
||||
// Reboot listener if connectivity drops
|
||||
if thresholdCount > thresholdLimit {
|
||||
outBoundConnectedCount := len(s.peers.OutboundConnected())
|
||||
log.WithField("outboundConnectionCount", outBoundConnectedCount).Warn("Rebooting discovery listener, reached threshold.")
|
||||
if thresholdCount > 5 {
|
||||
log.WithField("outboundConnectionCount", len(s.peers.OutboundConnected())).Warn("Rebooting discovery listener, reached threshold.")
|
||||
if err := s.dv5Listener.RebootListener(); err != nil {
|
||||
log.WithError(err).Error("Could not reboot listener")
|
||||
continue
|
||||
}
|
||||
|
||||
iterator = s.dv5Listener.RandomNodes()
|
||||
iterator = filterNodes(s.ctx, s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
thresholdCount = 0
|
||||
}
|
||||
default:
|
||||
@@ -344,35 +232,17 @@ func (s *Service) listenForNewNodes() {
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
// Compute the number of new peers we want to dial.
|
||||
activePeerCount, missingPeerCount := peersSummary(s.cfg.MaxPeers)
|
||||
|
||||
fields := logrus.Fields{
|
||||
"currentPeerCount": activePeerCount,
|
||||
"targetPeerCount": s.cfg.MaxPeers,
|
||||
}
|
||||
|
||||
if missingPeerCount == 0 {
|
||||
wantedCount := s.wantedPeerDials()
|
||||
if wantedCount == 0 {
|
||||
log.Trace("Not looking for peers, at peer limit")
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
if time.Since(lastLogTime) > minLogInterval {
|
||||
lastLogTime = time.Now()
|
||||
log.WithFields(fields).Debug("Searching for new active peers")
|
||||
}
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials := uint(flags.Get().MaxConcurrentDials)
|
||||
missingPeerCount = min(missingPeerCount, maxConcurrentDials)
|
||||
wantedCount = min(wantedCount, flags.Get().MaxConcurrentDials)
|
||||
}
|
||||
|
||||
// Search for new peers.
|
||||
wantedNodes := searchForPeers(iterator, batchSize, missingPeerCount, s.filterPeer)
|
||||
|
||||
wantedNodes := enode.ReadNodes(iterator, wantedCount)
|
||||
wg := new(sync.WaitGroup)
|
||||
for i := 0; i < len(wantedNodes); i++ {
|
||||
node := wantedNodes[i]
|
||||
@@ -497,11 +367,6 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if params.PeerDASEnabled() {
|
||||
custodySubnetCount := peerdas.CustodySubnetCount()
|
||||
localNode.Set(peerdas.Csc(custodySubnetCount))
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
@@ -587,14 +452,12 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
}
|
||||
|
||||
// Ignore bad nodes.
|
||||
if s.peers.IsBad(peerData.ID) != nil {
|
||||
if s.peers.IsBad(peerData.ID) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ignore nodes that are already active.
|
||||
if s.peers.IsActive(peerData.ID) {
|
||||
// Constantly update enr for known peers
|
||||
s.peers.UpdateENR(node.Record(), peerData.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -663,6 +526,17 @@ func (s *Service) isBelowOutboundPeerThreshold() bool {
|
||||
return outBoundCount < outBoundThreshold
|
||||
}
|
||||
|
||||
func (s *Service) wantedPeerDials() int {
|
||||
maxPeers := int(s.cfg.MaxPeers)
|
||||
|
||||
activePeers := len(s.Peers().Active())
|
||||
wantedCount := 0
|
||||
if maxPeers > activePeers {
|
||||
wantedCount = maxPeers - activePeers
|
||||
}
|
||||
return wantedCount
|
||||
}
|
||||
|
||||
// PeersFromStringAddrs converts peer raw ENRs into multiaddrs for p2p.
|
||||
func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
var allAddrs []ma.Multiaddr
|
||||
|
||||
@@ -16,15 +16,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/discover"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
@@ -33,12 +30,13 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
prysmNetwork "github.com/prysmaticlabs/prysm/v5/network"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -133,10 +131,6 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCreateLocalNode(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.Eip7594ForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
testCases := []struct {
|
||||
name string
|
||||
cfg *Config
|
||||
@@ -233,11 +227,6 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
syncSubnets := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, syncSubnets)))
|
||||
require.DeepSSZEqual(t, []byte{0}, *syncSubnets)
|
||||
|
||||
// Check custody_subnet_count config.
|
||||
custodySubnetCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(peerdas.CustodySubnetCountEnrKey, custodySubnetCount)))
|
||||
require.Equal(t, params.BeaconConfig().CustodyRequirement, *custodySubnetCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -389,14 +378,14 @@ func TestInboundPeerLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 0; i < 30; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isPeerAtLimit(false), "not at limit for outbound peers")
|
||||
require.Equal(t, false, s.isPeerAtLimit(true), "at limit for inbound peers")
|
||||
|
||||
for i := 0; i < highWatermarkBuffer; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), false)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isPeerAtLimit(true), "not at limit for inbound peers")
|
||||
@@ -415,13 +404,13 @@ func TestOutboundPeerThreshold(t *testing.T) {
|
||||
}
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
}
|
||||
|
||||
require.Equal(t, true, s.isBelowOutboundPeerThreshold(), "not at outbound peer threshold")
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
_ = addPeer(t, s.peers, peerdata.ConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED), true)
|
||||
}
|
||||
|
||||
require.Equal(t, false, s.isBelowOutboundPeerThreshold(), "still at outbound peer threshold")
|
||||
@@ -488,7 +477,7 @@ func TestCorrectUDPVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
// addPeer is a helper to add a peer with a given connection state)
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState, outbound bool) peer.ID {
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState, outbound bool) peer.ID {
|
||||
// Set up some peers with different states
|
||||
mhBytes := []byte{0x11, 0x04}
|
||||
idBytes := make([]byte, 4)
|
||||
@@ -510,317 +499,192 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.ConnectionState, outb
|
||||
return id
|
||||
}
|
||||
|
||||
func createAndConnectPeer(t *testing.T, p2pService *testp2p.TestP2P, offset int) {
|
||||
// Create the private key.
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(offset + i)
|
||||
}
|
||||
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the peer.
|
||||
peer := testp2p.NewTestP2P(t, libp2p.Identity(privateKey))
|
||||
|
||||
// Add the peer and connect it.
|
||||
p2pService.Peers().Add(&enr.Record{}, peer.PeerID(), nil, network.DirOutbound)
|
||||
p2pService.Peers().SetConnectionState(peer.PeerID(), peers.Connected)
|
||||
p2pService.Connect(peer)
|
||||
}
|
||||
|
||||
// Define the ping count.
|
||||
var actualPingCount int
|
||||
|
||||
type check struct {
|
||||
pingCount int
|
||||
metadataSequenceNumber uint64
|
||||
attestationSubnets []uint64
|
||||
syncSubnets []uint64
|
||||
custodySubnetCount *uint64
|
||||
}
|
||||
|
||||
func checkPingCountCacheMetadataRecord(
|
||||
t *testing.T,
|
||||
service *Service,
|
||||
expected check,
|
||||
) {
|
||||
// Check the ping count.
|
||||
require.Equal(t, expected.pingCount, actualPingCount)
|
||||
|
||||
// Check the attestation subnets in the cache.
|
||||
actualAttestationSubnets := cache.SubnetIDs.GetAllSubnets()
|
||||
require.DeepSSZEqual(t, expected.attestationSubnets, actualAttestationSubnets)
|
||||
|
||||
// Check the metadata sequence number.
|
||||
actualMetadataSequenceNumber := service.metaData.SequenceNumber()
|
||||
require.Equal(t, expected.metadataSequenceNumber, actualMetadataSequenceNumber)
|
||||
|
||||
// Compute expected attestation subnets bits.
|
||||
expectedBitV := bitfield.NewBitvector64()
|
||||
exists := false
|
||||
|
||||
for _, idx := range expected.attestationSubnets {
|
||||
exists = true
|
||||
expectedBitV.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
// Check attnets in ENR.
|
||||
var actualBitVENR bitfield.Bitvector64
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(attSubnetEnrKey, &actualBitVENR))
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedBitV, actualBitVENR)
|
||||
|
||||
// Check attnets in metadata.
|
||||
if !exists {
|
||||
expectedBitV = nil
|
||||
}
|
||||
|
||||
actualBitVMetadata := service.metaData.AttnetsBitfield()
|
||||
require.DeepSSZEqual(t, expectedBitV, actualBitVMetadata)
|
||||
|
||||
if expected.syncSubnets != nil {
|
||||
// Compute expected sync subnets bits.
|
||||
expectedBitS := bitfield.NewBitvector4()
|
||||
exists = false
|
||||
|
||||
for _, idx := range expected.syncSubnets {
|
||||
exists = true
|
||||
expectedBitS.SetBitAt(idx, true)
|
||||
}
|
||||
|
||||
// Check syncnets in ENR.
|
||||
var actualBitSENR bitfield.Bitvector4
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, &actualBitSENR))
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedBitS, actualBitSENR)
|
||||
|
||||
// Check syncnets in metadata.
|
||||
if !exists {
|
||||
expectedBitS = nil
|
||||
}
|
||||
|
||||
actualBitSMetadata := service.metaData.SyncnetsBitfield()
|
||||
require.DeepSSZEqual(t, expectedBitS, actualBitSMetadata)
|
||||
}
|
||||
|
||||
if expected.custodySubnetCount != nil {
|
||||
// Check custody subnet count in ENR.
|
||||
var actualCustodySubnetCount uint64
|
||||
err := service.dv5Listener.LocalNode().Node().Record().Load(enr.WithEntry(peerdas.CustodySubnetCountEnrKey, &actualCustodySubnetCount))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *expected.custodySubnetCount, actualCustodySubnetCount)
|
||||
|
||||
// Check custody subnet count in metadata.
|
||||
actualCustodySubnetCountMetadata := service.metaData.CustodySubnetCount()
|
||||
require.Equal(t, *expected.custodySubnetCount, actualCustodySubnetCountMetadata)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
func TestRefreshENR_ForkBoundaries(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
// Clean up caches after usage.
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
|
||||
const (
|
||||
altairForkEpoch = 5
|
||||
eip7594ForkEpoch = 10
|
||||
)
|
||||
|
||||
custodySubnetCount := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Set up epochs.
|
||||
defaultCfg := params.BeaconConfig()
|
||||
cfg := defaultCfg.Copy()
|
||||
cfg.AltairForkEpoch = altairForkEpoch
|
||||
cfg.Eip7594ForkEpoch = eip7594ForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the number of seconds per epoch.
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
secondsPerEpoch := secondsPerSlot * uint64(slotsPerEpoch)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
epochSinceGenesis uint64
|
||||
checks []check
|
||||
tests := []struct {
|
||||
name string
|
||||
svcBuilder func(t *testing.T) *Service
|
||||
postValidation func(t *testing.T, s *Service)
|
||||
}{
|
||||
{
|
||||
name: "Phase0",
|
||||
epochSinceGenesis: 0,
|
||||
checks: []check{
|
||||
{
|
||||
pingCount: 0,
|
||||
metadataSequenceNumber: 0,
|
||||
attestationSubnets: []uint64{},
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
},
|
||||
name: "metadata no change",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
|
||||
subs, err := computeSubscribedSubnets(s.dv5Listener.LocalNode().ID(), currEpoch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for _, idx := range subs {
|
||||
bitV.SetBitAt(idx, true)
|
||||
}
|
||||
assert.DeepEqual(t, bitV, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Altair",
|
||||
epochSinceGenesis: altairForkEpoch,
|
||||
checks: []check{
|
||||
{
|
||||
pingCount: 0,
|
||||
metadataSequenceNumber: 0,
|
||||
attestationSubnets: []uint64{},
|
||||
syncSubnets: nil,
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: nil,
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
},
|
||||
name: "metadata updated",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "PeerDAS",
|
||||
epochSinceGenesis: eip7594ForkEpoch,
|
||||
checks: []check{
|
||||
{
|
||||
pingCount: 0,
|
||||
metadataSequenceNumber: 0,
|
||||
attestationSubnets: []uint64{},
|
||||
syncSubnets: nil,
|
||||
},
|
||||
{
|
||||
pingCount: 1,
|
||||
metadataSequenceNumber: 1,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: nil,
|
||||
custodySubnetCount: &custodySubnetCount,
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
custodySubnetCount: &custodySubnetCount,
|
||||
},
|
||||
{
|
||||
pingCount: 2,
|
||||
metadataSequenceNumber: 2,
|
||||
attestationSubnets: []uint64{40, 41},
|
||||
syncSubnets: []uint64{1, 2},
|
||||
custodySubnetCount: &custodySubnetCount,
|
||||
},
|
||||
name: "metadata updated at fork epoch",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated at fork epoch with no bitfield",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-5 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x00}, s.metaData.MetadataObjV1().Syncnets)
|
||||
currEpoch := slots.ToEpoch(slots.CurrentSlot(uint64(s.genesisTime.Unix())))
|
||||
subs, err := computeSubscribedSubnets(s.dv5Listener.LocalNode().ID(), currEpoch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for _, idx := range subs {
|
||||
bitV.SetBitAt(idx, true)
|
||||
}
|
||||
assert.DeepEqual(t, bitV, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata updated past fork epoch with bitfields",
|
||||
svcBuilder: func(t *testing.T) *Service {
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
genesisTime: time.Now().Add(-6 * oneEpochDuration()),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
}
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return s.createListener(ipAddr, pkey)
|
||||
}
|
||||
listener, err := newListener(createListener)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Update params
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
s.updateSubnetRecordWithMetadata([]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00})
|
||||
cache.SubnetIDs.AddPersistentCommittee([]uint64{1, 2, 3, 23}, 0)
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte{'A'}, 0, []uint64{0, 1}, 0)
|
||||
return s
|
||||
},
|
||||
postValidation: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, version.Altair, s.metaData.Version())
|
||||
assert.DeepEqual(t, bitfield.Bitvector4{0x03}, s.metaData.MetadataObjV1().Syncnets)
|
||||
assert.DeepEqual(t, bitfield.Bitvector64{0xe, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0}, s.metaData.AttnetsBitfield())
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const peerOffset = 1
|
||||
|
||||
// Initialize the ping count.
|
||||
actualPingCount = 0
|
||||
|
||||
// Create the private key.
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(i)
|
||||
}
|
||||
|
||||
unmarshalledPrivateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
privateKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(unmarshalledPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a p2p service.
|
||||
p2p := testp2p.NewTestP2P(t)
|
||||
|
||||
// Create and connect a peer.
|
||||
createAndConnectPeer(t, p2p, peerOffset)
|
||||
|
||||
// Create a service.
|
||||
service := &Service{
|
||||
pingMethod: func(_ context.Context, _ peer.ID) error {
|
||||
actualPingCount++
|
||||
return nil
|
||||
},
|
||||
cfg: &Config{UDPPort: 2000},
|
||||
peers: p2p.Peers(),
|
||||
genesisTime: time.Now().Add(-time.Duration(tc.epochSinceGenesis*secondsPerEpoch) * time.Second),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
}
|
||||
|
||||
// Set the listener and the metadata.
|
||||
createListener := func() (*discover.UDPv5, error) {
|
||||
return service.createListener(nil, privateKey)
|
||||
}
|
||||
|
||||
listener, err := newListener(createListener)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.dv5Listener = listener
|
||||
service.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[0])
|
||||
|
||||
// Refresh the persistent subnets.
|
||||
service.RefreshPersistentSubnets()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[1])
|
||||
|
||||
// Add a sync committee subnet.
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte{'a'}, altairForkEpoch, []uint64{1, 2}, 1*time.Hour)
|
||||
|
||||
// Refresh the persistent subnets.
|
||||
service.RefreshPersistentSubnets()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[2])
|
||||
|
||||
// Refresh the persistent subnets.
|
||||
service.RefreshPersistentSubnets()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Run a check.
|
||||
checkPingCountCacheMetadataRecord(t, service, tc.checks[3])
|
||||
|
||||
// Clean the test.
|
||||
service.dv5Listener.Close()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcBuilder(t)
|
||||
s.RefreshENR()
|
||||
tt.postValidation(t, s)
|
||||
s.dv5Listener.Close()
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
})
|
||||
}
|
||||
|
||||
// Reset the config.
|
||||
params.OverrideBeaconConfig(defaultCfg)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package p2p
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
@@ -15,17 +14,11 @@ func (s *Service) forkWatcher() {
|
||||
select {
|
||||
case currSlot := <-slotTicker.C():
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
|
||||
forkEpochs := map[primitives.Epoch]bool{
|
||||
params.BeaconConfig().AltairForkEpoch: true,
|
||||
params.BeaconConfig().BellatrixForkEpoch: true,
|
||||
params.BeaconConfig().CapellaForkEpoch: true,
|
||||
params.BeaconConfig().DenebForkEpoch: true,
|
||||
params.BeaconConfig().ElectraForkEpoch: true,
|
||||
params.BeaconConfig().Eip7594ForkEpoch: true,
|
||||
}
|
||||
|
||||
if forkEpochs[currEpoch] {
|
||||
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().CapellaForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().DenebForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().ElectraForkEpoch {
|
||||
// If we are in the fork epoch, we update our enr with
|
||||
// the updated fork digest. These repeatedly does
|
||||
// this over the epoch, which might be slightly wasteful
|
||||
|
||||
@@ -121,7 +121,7 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultAttesterSlashingTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
|
||||
return defaultBlsToExecutionChangeTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage), strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
// TODO(Deneb): Using the default block scoring. But this should be updated.
|
||||
return defaultBlockTopicParams(), nil
|
||||
default:
|
||||
|
||||
@@ -22,7 +22,6 @@ var gossipTopicMappings = map[string]func() proto.Message{
|
||||
SyncCommitteeSubnetTopicFormat: func() proto.Message { return ðpb.SyncCommitteeMessage{} },
|
||||
BlsToExecutionChangeSubnetTopicFormat: func() proto.Message { return ðpb.SignedBLSToExecutionChange{} },
|
||||
BlobSubnetTopicFormat: func() proto.Message { return ðpb.BlobSidecar{} },
|
||||
DataColumnSubnetTopicFormat: func() proto.Message { return ðpb.DataColumnSidecar{} },
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -30,20 +30,17 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
capellaForkEpoch := primitives.Epoch(300)
|
||||
denebForkEpoch := primitives.Epoch(400)
|
||||
electraForkEpoch := primitives.Epoch(500)
|
||||
eip7594ForkEpoch := primitives.Epoch(600)
|
||||
|
||||
bCfg.AltairForkEpoch = altairForkEpoch
|
||||
bCfg.BellatrixForkEpoch = bellatrixForkEpoch
|
||||
bCfg.CapellaForkEpoch = capellaForkEpoch
|
||||
bCfg.DenebForkEpoch = denebForkEpoch
|
||||
bCfg.ElectraForkEpoch = electraForkEpoch
|
||||
bCfg.Eip7594ForkEpoch = eip7594ForkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = altairForkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.BellatrixForkVersion)] = bellatrixForkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.CapellaForkVersion)] = capellaForkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.DenebForkVersion)] = denebForkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.ElectraForkVersion)] = electraForkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.Eip7594ForkVersion)] = eip7594ForkEpoch
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = primitives.Epoch(100)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.BellatrixForkVersion)] = primitives.Epoch(200)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.CapellaForkVersion)] = primitives.Epoch(300)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.DenebForkVersion)] = primitives.Epoch(400)
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.ElectraForkVersion)] = primitives.Epoch(500)
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
|
||||
// Phase 0
|
||||
@@ -56,15 +53,9 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ProposerSlashingSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.ProposerSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ExitSubnetTopicFormat, 0)
|
||||
_, ok = pMessage.(*ethpb.SignedVoluntaryExit)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Altair Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, altairForkEpoch)
|
||||
@@ -76,21 +67,9 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ProposerSlashingSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.ProposerSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ExitSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedVoluntaryExit)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(SyncCommitteeSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SyncCommitteeMessage)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(SyncContributionAndProofSubnetTopicFormat, altairForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedContributionAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Bellatrix Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, bellatrixForkEpoch)
|
||||
@@ -102,21 +81,9 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ProposerSlashingSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.ProposerSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ExitSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedVoluntaryExit)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(SyncCommitteeSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SyncCommitteeMessage)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(SyncContributionAndProofSubnetTopicFormat, bellatrixForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedContributionAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Capella Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, capellaForkEpoch)
|
||||
@@ -128,24 +95,9 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ProposerSlashingSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.ProposerSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ExitSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedVoluntaryExit)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(SyncCommitteeSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SyncCommitteeMessage)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(SyncContributionAndProofSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedContributionAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(BlsToExecutionChangeSubnetTopicFormat, capellaForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBLSToExecutionChange)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Deneb Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, denebForkEpoch)
|
||||
@@ -157,27 +109,9 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ProposerSlashingSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.ProposerSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ExitSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedVoluntaryExit)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(SyncCommitteeSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SyncCommitteeMessage)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(SyncContributionAndProofSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedContributionAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(BlsToExecutionChangeSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBLSToExecutionChange)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(BlobSubnetTopicFormat, denebForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.BlobSidecar)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// Electra Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, electraForkEpoch)
|
||||
@@ -189,58 +123,7 @@ func TestGossipTopicMappings_CorrectType(t *testing.T) {
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashingElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ProposerSlashingSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.ProposerSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProofElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ExitSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedVoluntaryExit)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(SyncCommitteeSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SyncCommitteeMessage)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(SyncContributionAndProofSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedContributionAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(BlsToExecutionChangeSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBLSToExecutionChange)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(BlobSubnetTopicFormat, electraForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.BlobSidecar)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
// EIP-7594 Fork
|
||||
pMessage = GossipTopicMappings(BlockSubnetTopicFormat, eip7594ForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBeaconBlockElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttestationSubnetTopicFormat, eip7594ForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttestationElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AttesterSlashingSubnetTopicFormat, eip7594ForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.AttesterSlashingElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ProposerSlashingSubnetTopicFormat, eip7594ForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.ProposerSlashing)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(AggregateAndProofSubnetTopicFormat, eip7594ForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedAggregateAttestationAndProofElectra)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(ExitSubnetTopicFormat, eip7594ForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedVoluntaryExit)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(SyncCommitteeSubnetTopicFormat, eip7594ForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SyncCommitteeMessage)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(SyncContributionAndProofSubnetTopicFormat, eip7594ForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedContributionAndProof)
|
||||
assert.Equal(t, true, ok)
|
||||
pMessage = GossipTopicMappings(BlsToExecutionChangeSubnetTopicFormat, eip7594ForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.SignedBLSToExecutionChange)
|
||||
assert.Equal(t, true, ok)
|
||||
// Note: BlobSidecar removal from EIP-7594 fork.
|
||||
pMessage = GossipTopicMappings(DataColumnSubnetTopicFormat, eip7594ForkEpoch)
|
||||
_, ok = pMessage.(*ethpb.DataColumnSidecar)
|
||||
assert.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
@@ -9,7 +10,6 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
@@ -22,87 +22,7 @@ const (
|
||||
)
|
||||
|
||||
func peerMultiaddrString(conn network.Conn) string {
|
||||
remoteMultiaddr := conn.RemoteMultiaddr().String()
|
||||
remotePeerID := conn.RemotePeer().String()
|
||||
return fmt.Sprintf("%s/p2p/%s", remoteMultiaddr, remotePeerID)
|
||||
}
|
||||
|
||||
func (s *Service) connectToPeer(conn network.Conn) {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connected)
|
||||
// Go through the handshake process.
|
||||
log.WithFields(logrus.Fields{
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Debug("Initiate peer connection")
|
||||
}
|
||||
|
||||
func (s *Service) disconnectFromPeer(
|
||||
conn network.Conn,
|
||||
goodByeFunc func(ctx context.Context, id peer.ID) error,
|
||||
badPeerErr error,
|
||||
) {
|
||||
// Get the remote peer ID.
|
||||
remotePeerID := conn.RemotePeer()
|
||||
|
||||
// Get the direction of the connection.
|
||||
direction := conn.Stat().Direction.String()
|
||||
|
||||
// Get the remote peer multiaddr.
|
||||
remotePeerMultiAddr := peerMultiaddrString(conn)
|
||||
|
||||
// Set the peer to disconnecting state.
|
||||
s.peers.SetConnectionState(remotePeerID, peers.Disconnecting)
|
||||
|
||||
// Only attempt a goodbye if we are still connected to the peer.
|
||||
if s.host.Network().Connectedness(remotePeerID) == network.Connected {
|
||||
if err := goodByeFunc(context.TODO(), remotePeerID); err != nil {
|
||||
log.WithError(err).Error("Unable to disconnect from peer")
|
||||
}
|
||||
}
|
||||
|
||||
// Get the remaining active peers.
|
||||
activePeerCount := len(s.peers.Active())
|
||||
log.
|
||||
WithError(badPeerErr).
|
||||
WithFields(logrus.Fields{
|
||||
"multiaddr": remotePeerMultiAddr,
|
||||
"direction": direction,
|
||||
"remainingActivePeers": activePeerCount,
|
||||
}).
|
||||
Debug("Initiate peer disconnection")
|
||||
|
||||
s.peers.SetConnectionState(remotePeerID, peers.Disconnected)
|
||||
}
|
||||
|
||||
func (s *Service) disconnectFromPeerOnError(
|
||||
conn network.Conn,
|
||||
goodByeFunc func(ctx context.Context, id peer.ID) error,
|
||||
badPeerErr error,
|
||||
) {
|
||||
// Get the remote peer ID.
|
||||
remotePeerID := conn.RemotePeer()
|
||||
|
||||
// Set the peer to disconnecting state.
|
||||
s.peers.SetConnectionState(remotePeerID, peers.Disconnecting)
|
||||
|
||||
// Only attempt a goodbye if we are still connected to the peer.
|
||||
if s.host.Network().Connectedness(remotePeerID) == network.Connected {
|
||||
if err := goodByeFunc(context.TODO(), remotePeerID); err != nil {
|
||||
log.WithError(err).Error("Unable to disconnect from peer")
|
||||
}
|
||||
}
|
||||
|
||||
log.
|
||||
WithError(badPeerErr).
|
||||
WithFields(logrus.Fields{
|
||||
"multiaddr": peerMultiaddrString(conn),
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
"remainingActivePeers": len(s.peers.Active()),
|
||||
}).
|
||||
Debug("Initiate peer disconnection")
|
||||
|
||||
s.peers.SetConnectionState(remotePeerID, peers.Disconnected)
|
||||
return fmt.Sprintf("%s/p2p/%s", conn.RemoteMultiaddr().String(), conn.RemotePeer().String())
|
||||
}
|
||||
|
||||
// AddConnectionHandler adds a callback function which handles the connection with a
|
||||
@@ -137,9 +57,18 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
}
|
||||
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
ConnectedF: func(_ network.Network, conn network.Conn) {
|
||||
ConnectedF: func(net network.Network, conn network.Conn) {
|
||||
remotePeer := conn.RemotePeer()
|
||||
|
||||
disconnectFromPeer := func() {
|
||||
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnecting)
|
||||
// Only attempt a goodbye if we are still connected to the peer.
|
||||
if s.host.Network().Connectedness(remotePeer) == network.Connected {
|
||||
if err := goodByeFunc(context.TODO(), remotePeer); err != nil {
|
||||
log.WithError(err).Error("Unable to disconnect from peer")
|
||||
}
|
||||
}
|
||||
s.peers.SetConnectionState(remotePeer, peers.PeerDisconnected)
|
||||
}
|
||||
// Connection handler must be non-blocking as part of libp2p design.
|
||||
go func() {
|
||||
if peerHandshaking(remotePeer) {
|
||||
@@ -148,21 +77,28 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
return
|
||||
}
|
||||
defer peerFinished(remotePeer)
|
||||
|
||||
// Handle the various pre-existing conditions that will result in us not handshaking.
|
||||
peerConnectionState, err := s.peers.ConnectionState(remotePeer)
|
||||
if err == nil && (peerConnectionState == peers.Connected || peerConnectionState == peers.Connecting) {
|
||||
if err == nil && (peerConnectionState == peers.PeerConnected || peerConnectionState == peers.PeerConnecting) {
|
||||
log.WithField("currentState", peerConnectionState).WithField("reason", "already active").Trace("Ignoring connection request")
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.Add(nil /* ENR */, remotePeer, conn.RemoteMultiaddr(), conn.Stat().Direction)
|
||||
|
||||
// Defensive check in the event we still get a bad peer.
|
||||
if err := s.peers.IsBad(remotePeer); err != nil {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
|
||||
if s.peers.IsBad(remotePeer) {
|
||||
log.WithField("reason", "bad peer").Trace("Ignoring connection request")
|
||||
disconnectFromPeer()
|
||||
return
|
||||
}
|
||||
validPeerConnection := func() {
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnected)
|
||||
// Go through the handshake process.
|
||||
log.WithFields(logrus.Fields{
|
||||
"direction": conn.Stat().Direction,
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Debug("Peer connected")
|
||||
}
|
||||
|
||||
// Do not perform handshake on inbound dials.
|
||||
if conn.Stat().Direction == network.DirInbound {
|
||||
@@ -181,80 +117,63 @@ func (s *Service) AddConnectionHandler(reqFunc, goodByeFunc func(ctx context.Con
|
||||
// If peer hasn't sent a status request, we disconnect with them
|
||||
if _, err := s.peers.ChainState(remotePeer); errors.Is(err, peerdata.ErrPeerUnknown) || errors.Is(err, peerdata.ErrNoPeerStatus) {
|
||||
statusMessageMissing.Inc()
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state"))
|
||||
disconnectFromPeer()
|
||||
return
|
||||
}
|
||||
|
||||
if peerExists {
|
||||
updated, err := s.peers.ChainStateLastUpdated(remotePeer)
|
||||
if err != nil {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.Wrap(err, "chain state last updated"))
|
||||
disconnectFromPeer()
|
||||
return
|
||||
}
|
||||
|
||||
// Exit if we don't receive any current status messages from peer.
|
||||
if updated.IsZero() {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("is zero"))
|
||||
return
|
||||
}
|
||||
|
||||
if updated.Before(currentTime) {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, errors.New("did not update"))
|
||||
// exit if we don't receive any current status messages from
|
||||
// peer.
|
||||
if updated.IsZero() || !updated.After(currentTime) {
|
||||
disconnectFromPeer()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s.connectToPeer(conn)
|
||||
validPeerConnection()
|
||||
return
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.Connecting)
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerConnecting)
|
||||
if err := reqFunc(context.TODO(), conn.RemotePeer()); err != nil && !errors.Is(err, io.EOF) {
|
||||
s.disconnectFromPeerOnError(conn, goodByeFunc, err)
|
||||
log.WithError(err).Trace("Handshake failed")
|
||||
disconnectFromPeer()
|
||||
return
|
||||
}
|
||||
|
||||
s.connectToPeer(conn)
|
||||
validPeerConnection()
|
||||
}()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
|
||||
// AddDisconnectionHandler disconnects from peers. It handles updating the peer status.
|
||||
// This also calls the handler responsible for maintaining other parts of the sync or p2p system.
|
||||
func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id peer.ID) error) {
|
||||
s.host.Network().Notify(&network.NotifyBundle{
|
||||
DisconnectedF: func(net network.Network, conn network.Conn) {
|
||||
peerID := conn.RemotePeer()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"multiAddr": peerMultiaddrString(conn),
|
||||
"direction": conn.Stat().Direction.String(),
|
||||
})
|
||||
log := log.WithField("multiAddr", peerMultiaddrString(conn))
|
||||
// Must be handled in a goroutine as this callback cannot be blocking.
|
||||
go func() {
|
||||
// Exit early if we are still connected to the peer.
|
||||
if net.Connectedness(peerID) == network.Connected {
|
||||
if net.Connectedness(conn.RemotePeer()) == network.Connected {
|
||||
return
|
||||
}
|
||||
|
||||
priorState, err := s.peers.ConnectionState(peerID)
|
||||
priorState, err := s.peers.ConnectionState(conn.RemotePeer())
|
||||
if err != nil {
|
||||
// Can happen if the peer has already disconnected, so...
|
||||
priorState = peers.Disconnected
|
||||
priorState = peers.PeerDisconnected
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.Disconnecting)
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnecting)
|
||||
if err := handler(context.TODO(), conn.RemotePeer()); err != nil {
|
||||
log.WithError(err).Error("Disconnect handler failed")
|
||||
}
|
||||
|
||||
s.peers.SetConnectionState(peerID, peers.Disconnected)
|
||||
|
||||
s.peers.SetConnectionState(conn.RemotePeer(), peers.PeerDisconnected)
|
||||
// Only log disconnections if we were fully connected.
|
||||
if priorState == peers.Connected {
|
||||
activePeersCount := len(s.peers.Active())
|
||||
log.WithField("remainingActivePeers", activePeersCount).Debug("Peer disconnected")
|
||||
if priorState == peers.PeerConnected {
|
||||
log.WithField("activePeers", len(s.peers.Active())).Debug("Peer disconnected")
|
||||
}
|
||||
}()
|
||||
},
|
||||
|
||||
@@ -3,7 +3,6 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
@@ -13,7 +12,6 @@ import (
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -30,12 +28,6 @@ type P2P interface {
|
||||
ConnectionHandler
|
||||
PeersProvider
|
||||
MetadataProvider
|
||||
DataColumnsHandler
|
||||
}
|
||||
|
||||
type Acceser interface {
|
||||
Broadcaster
|
||||
PeerManager
|
||||
}
|
||||
|
||||
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
|
||||
@@ -44,7 +36,6 @@ type Broadcaster interface {
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastDataColumn(ctx context.Context, root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
@@ -90,9 +81,8 @@ type PeerManager interface {
|
||||
PeerID() peer.ID
|
||||
Host() host.Host
|
||||
ENR() *enr.Record
|
||||
NodeID() enode.ID
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshPersistentSubnets()
|
||||
RefreshENR()
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
}
|
||||
@@ -112,9 +102,3 @@ type MetadataProvider interface {
|
||||
Metadata() metadata.Metadata
|
||||
MetadataSeq() uint64
|
||||
}
|
||||
|
||||
type DataColumnsHandler interface {
|
||||
DataColumnsCustodyCountFromRemotePeer(peer.ID) uint64
|
||||
DataColumnsAdmissibleCustodyPeers([]peer.ID) ([]peer.ID, error)
|
||||
DataColumnsAdmissibleSubnetSamplingPeers([]peer.ID) ([]peer.ID, error)
|
||||
}
|
||||
|
||||
36
beacon-chain/p2p/iterator.go
Normal file
36
beacon-chain/p2p/iterator.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
// filterNodes wraps an iterator such that Next only returns nodes for which
|
||||
// the 'check' function returns true. This custom implementation also
|
||||
// checks for context deadlines so that in the event the parent context has
|
||||
// expired, we do exit from the search rather than perform more network
|
||||
// lookups for additional peers.
|
||||
func filterNodes(ctx context.Context, it enode.Iterator, check func(*enode.Node) bool) enode.Iterator {
|
||||
return &filterIter{ctx, it, check}
|
||||
}
|
||||
|
||||
type filterIter struct {
|
||||
context.Context
|
||||
enode.Iterator
|
||||
check func(*enode.Node) bool
|
||||
}
|
||||
|
||||
// Next looks up for the next valid node according to our
|
||||
// filter criteria.
|
||||
func (f *filterIter) Next() bool {
|
||||
for f.Iterator.Next() {
|
||||
if f.Context.Err() != nil {
|
||||
return false
|
||||
}
|
||||
if f.check(f.Node()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -29,7 +29,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
|
||||
// never be hit.
|
||||
msg := make([]byte, 20)
|
||||
copy(msg, "invalid")
|
||||
return bytesutil.UnsafeCastToString(msg)
|
||||
return string(msg)
|
||||
}
|
||||
digest, err := ExtractGossipDigest(*pmsg.Topic)
|
||||
if err != nil {
|
||||
@@ -37,7 +37,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
|
||||
// never be hit.
|
||||
msg := make([]byte, 20)
|
||||
copy(msg, "invalid")
|
||||
return bytesutil.UnsafeCastToString(msg)
|
||||
return string(msg)
|
||||
}
|
||||
_, fEpoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
@@ -45,7 +45,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
|
||||
// never be hit.
|
||||
msg := make([]byte, 20)
|
||||
copy(msg, "invalid")
|
||||
return bytesutil.UnsafeCastToString(msg)
|
||||
return string(msg)
|
||||
}
|
||||
if fEpoch >= params.BeaconConfig().AltairForkEpoch {
|
||||
return postAltairMsgID(pmsg, fEpoch)
|
||||
@@ -54,11 +54,11 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
|
||||
if err != nil {
|
||||
combinedData := append(params.BeaconConfig().MessageDomainInvalidSnappy[:], pmsg.Data...)
|
||||
h := hash.Hash(combinedData)
|
||||
return bytesutil.UnsafeCastToString(h[:20])
|
||||
return string(h[:20])
|
||||
}
|
||||
combinedData := append(params.BeaconConfig().MessageDomainValidSnappy[:], decodedData...)
|
||||
h := hash.Hash(combinedData)
|
||||
return bytesutil.UnsafeCastToString(h[:20])
|
||||
return string(h[:20])
|
||||
}
|
||||
|
||||
// Spec:
|
||||
@@ -93,13 +93,13 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
|
||||
// should never happen
|
||||
msg := make([]byte, 20)
|
||||
copy(msg, "invalid")
|
||||
return bytesutil.UnsafeCastToString(msg)
|
||||
return string(msg)
|
||||
}
|
||||
if uint64(totalLength) > gossipPubSubSize {
|
||||
// this should never happen
|
||||
msg := make([]byte, 20)
|
||||
copy(msg, "invalid")
|
||||
return bytesutil.UnsafeCastToString(msg)
|
||||
return string(msg)
|
||||
}
|
||||
combinedData := make([]byte, 0, totalLength)
|
||||
combinedData = append(combinedData, params.BeaconConfig().MessageDomainInvalidSnappy[:]...)
|
||||
@@ -107,7 +107,7 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
|
||||
combinedData = append(combinedData, topic...)
|
||||
combinedData = append(combinedData, pmsg.Data...)
|
||||
h := hash.Hash(combinedData)
|
||||
return bytesutil.UnsafeCastToString(h[:20])
|
||||
return string(h[:20])
|
||||
}
|
||||
totalLength, err := math.AddInt(
|
||||
len(params.BeaconConfig().MessageDomainValidSnappy),
|
||||
@@ -120,7 +120,7 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
|
||||
// should never happen
|
||||
msg := make([]byte, 20)
|
||||
copy(msg, "invalid")
|
||||
return bytesutil.UnsafeCastToString(msg)
|
||||
return string(msg)
|
||||
}
|
||||
combinedData := make([]byte, 0, totalLength)
|
||||
combinedData = append(combinedData, params.BeaconConfig().MessageDomainValidSnappy[:]...)
|
||||
@@ -128,5 +128,5 @@ func postAltairMsgID(pmsg *pubsubpb.Message, fEpoch primitives.Epoch) string {
|
||||
combinedData = append(combinedData, topic...)
|
||||
combinedData = append(combinedData, decodedData...)
|
||||
h := hash.Hash(combinedData)
|
||||
return bytesutil.UnsafeCastToString(h[:20])
|
||||
return string(h[:20])
|
||||
}
|
||||
|
||||
@@ -60,25 +60,17 @@ var (
|
||||
"the subnet. The beacon node increments this counter when the broadcast is blocked " +
|
||||
"until a subnet peer can be found.",
|
||||
})
|
||||
blobSidecarBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
blobSidecarCommitteeBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_broadcasts",
|
||||
Help: "The number of blob sidecar messages that were broadcast with no peer on.",
|
||||
Help: "The number of blob sidecar committee messages that were broadcast with no peer on.",
|
||||
})
|
||||
syncCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
|
||||
Help: "The number of sync committee that were attempted to be broadcast.",
|
||||
})
|
||||
blobSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
blobSidecarCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_attempted_broadcasts",
|
||||
Help: "The number of blob sidecar messages that were attempted to be broadcast.",
|
||||
})
|
||||
dataColumnSidecarBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_data_column_sidecar_broadcasts",
|
||||
Help: "The number of data column sidecar messages that were broadcasted.",
|
||||
})
|
||||
dataColumnSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_data_column_sidecar_attempted_broadcasts",
|
||||
Help: "The number of data column sidecar messages that were attempted to be broadcast.",
|
||||
Help: "The number of blob sidecar committee messages that were attempted to be broadcast.",
|
||||
})
|
||||
|
||||
// Gossip Tracer Metrics
|
||||
|
||||
@@ -23,8 +23,8 @@ var (
|
||||
ErrNoPeerStatus = errors.New("no chain status for peer")
|
||||
)
|
||||
|
||||
// ConnectionState is the state of the connection.
|
||||
type ConnectionState ethpb.ConnectionState
|
||||
// PeerConnectionState is the state of the connection.
|
||||
type PeerConnectionState ethpb.ConnectionState
|
||||
|
||||
// StoreConfig holds peer store parameters.
|
||||
type StoreConfig struct {
|
||||
@@ -49,7 +49,7 @@ type PeerData struct {
|
||||
// Network related data.
|
||||
Address ma.Multiaddr
|
||||
Direction network.Direction
|
||||
ConnState ConnectionState
|
||||
ConnState PeerConnectionState
|
||||
Enr *enr.Record
|
||||
NextValidTime time.Time
|
||||
// Chain related data.
|
||||
|
||||
@@ -20,7 +20,6 @@ go_library(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
|
||||
|
||||
// scoreNoLock is a lock-free version of Score.
|
||||
func (s *BadResponsesScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
@@ -116,25 +116,18 @@ func (s *BadResponsesScorer) Increment(pid peer.ID) {
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
|
||||
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) error {
|
||||
func (s *BadResponsesScorer) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
// if peerData, ok := s.store.PeerData(pid); ok {
|
||||
// TODO: Remote this out of devnet
|
||||
// if peerData.BadResponses >= s.config.Threshold {
|
||||
// return errors.Errorf("peer exceeded bad responses threshold: got %d, threshold %d", peerData.BadResponses, s.config.Threshold)
|
||||
// }
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
return nil
|
||||
func (s *BadResponsesScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.BadResponses >= s.config.Threshold
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -144,7 +137,7 @@ func (s *BadResponsesScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package scorers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
@@ -13,41 +14,40 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
// const pid = "peer1"
|
||||
func TestScorers_BadResponses_Score(t *testing.T) {
|
||||
const pid = "peer1"
|
||||
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{
|
||||
// BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
// Threshold: 4,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 4,
|
||||
},
|
||||
},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
|
||||
// assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
assert.Equal(t, 0., scorer.Score(pid), "Unexpected score for unregistered peer")
|
||||
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -2.5, scorer.Score(pid))
|
||||
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-5), scorer.Score(pid))
|
||||
|
||||
// scorer.Increment(pid)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, float64(-7.5), scorer.Score(pid))
|
||||
|
||||
// scorer.Increment(pid)
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid))
|
||||
// assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
// }
|
||||
scorer.Increment(pid)
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid))
|
||||
assert.Equal(t, -100.0, scorer.Score(pid))
|
||||
}
|
||||
|
||||
func TestScorers_BadResponses_ParamsThreshold(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -142,60 +142,58 @@ func TestScorers_BadResponses_Decay(t *testing.T) {
|
||||
assert.Equal(t, 1, badResponses, "unexpected bad responses for pid3")
|
||||
}
|
||||
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
func TestScorers_BadResponses_IsBadPeer(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pid := peer.ID("peer1")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pid := peer.ID("peer1")
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
|
||||
// peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid))
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid))
|
||||
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pid)
|
||||
// if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// } else {
|
||||
// assert.NoError(t, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pid)
|
||||
if i == scorers.DefaultBadResponsesThreshold-1 {
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
} else {
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pid), "Unexpected peer status")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Uncomment when out of devnet
|
||||
// func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// defer cancel()
|
||||
func TestScorers_BadResponses_BadPeers(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
// PeerLimit: 30,
|
||||
// ScorerParams: &scorers.Config{},
|
||||
// })
|
||||
// scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
// pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
// for i := 0; i < len(pids); i++ {
|
||||
// peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
// }
|
||||
// for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
// scorer.Increment(pids[1])
|
||||
// scorer.Increment(pids[2])
|
||||
// scorer.Increment(pids[4])
|
||||
// }
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
// assert.NoError(t, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
// assert.NotNil(t, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
// want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
// badPeers := scorer.BadPeers()
|
||||
// sort.Slice(badPeers, func(i, j int) bool {
|
||||
// return badPeers[i] < badPeers[j]
|
||||
// })
|
||||
// assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
// }
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BadResponsesScorer()
|
||||
pids := []peer.ID{peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3"), peer.ID("peer4"), peer.ID("peer5")}
|
||||
for i := 0; i < len(pids); i++ {
|
||||
peerStatuses.Add(nil, pids[i], nil, network.DirUnknown)
|
||||
}
|
||||
for i := 0; i < scorers.DefaultBadResponsesThreshold; i++ {
|
||||
scorer.Increment(pids[1])
|
||||
scorer.Increment(pids[2])
|
||||
scorer.Increment(pids[4])
|
||||
}
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pids[0]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[1]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[2]), "Invalid peer status")
|
||||
assert.Equal(t, false, scorer.IsBadPeer(pids[3]), "Invalid peer status")
|
||||
assert.Equal(t, true, scorer.IsBadPeer(pids[4]), "Invalid peer status")
|
||||
want := []peer.ID{pids[1], pids[2], pids[4]}
|
||||
badPeers := scorer.BadPeers()
|
||||
sort.Slice(badPeers, func(i, j int) bool {
|
||||
return badPeers[i] < badPeers[j]
|
||||
})
|
||||
assert.DeepEqual(t, want, badPeers, "Unexpected list of bad peers")
|
||||
}
|
||||
|
||||
@@ -177,8 +177,8 @@ func (s *BlockProviderScorer) processedBlocksNoLock(pid peer.ID) uint64 {
|
||||
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
|
||||
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
|
||||
// out low-scorers (see WeightSorted method).
|
||||
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) error {
|
||||
return nil
|
||||
func (*BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
|
||||
@@ -119,7 +119,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
@@ -224,7 +224,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "no peers",
|
||||
update: func(*scorers.BlockProviderScorer) {},
|
||||
update: func(s *scorers.BlockProviderScorer) {},
|
||||
have: []peer.ID{},
|
||||
want: []peer.ID{},
|
||||
},
|
||||
@@ -451,7 +451,7 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
|
||||
})
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(*testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
peerStatuses := peerStatusGen()
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
if tt.update != nil {
|
||||
@@ -481,8 +481,8 @@ func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
|
||||
scorer.IncrementProcessedBlocks("peer1", 64)
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"))
|
||||
assert.Equal(t, 0, len(scorer.BadPeers()))
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package scorers
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -52,24 +51,19 @@ func (s *GossipScorer) scoreNoLock(pid peer.ID) float64 {
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
func (s *GossipScorer) IsBadPeer(pid peer.ID) error {
|
||||
func (s *GossipScorer) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeerNoLock(pid)
|
||||
}
|
||||
|
||||
// isBadPeerNoLock is lock-free version of IsBadPeer.
|
||||
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) error {
|
||||
func (s *GossipScorer) isBadPeerNoLock(pid peer.ID) bool {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
|
||||
if peerData.GossipScore < gossipThreshold {
|
||||
return errors.Errorf("gossip score below threshold: got %f - threshold %f", peerData.GossipScore, gossipThreshold)
|
||||
}
|
||||
|
||||
return nil
|
||||
return peerData.GossipScore < gossipThreshold
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
@@ -79,7 +73,7 @@ func (s *GossipScorer) BadPeers() []peer.ID {
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeerNoLock(pid) != nil {
|
||||
if s.isBadPeerNoLock(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user