mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Compare commits
51 Commits
c6c9414d8b
...
testRebase
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fb53a3817e | ||
|
|
577838ebfb | ||
|
|
8efd97d527 | ||
|
|
d682840291 | ||
|
|
59726e8ea7 | ||
|
|
279a0a1cb7 | ||
|
|
8af287765b | ||
|
|
ccc49e0c36 | ||
|
|
2ce5b45752 | ||
|
|
f6ef7d51c1 | ||
|
|
3f43586628 | ||
|
|
d82d1c33ac | ||
|
|
594a8fffff | ||
|
|
f6f9ce74a0 | ||
|
|
e5c39be32c | ||
|
|
c942a79093 | ||
|
|
915041c485 | ||
|
|
1b48083776 | ||
|
|
b3c54ec2e6 | ||
|
|
970908723f | ||
|
|
faa26117de | ||
|
|
9d3472237f | ||
|
|
6f0b0ff6b4 | ||
|
|
f7a4636c40 | ||
|
|
e99ef55928 | ||
|
|
ecf41951fb | ||
|
|
4ab8721f78 | ||
|
|
83e1c5b9df | ||
|
|
085fd6283b | ||
|
|
663ac056ab | ||
|
|
89ada9f8a6 | ||
|
|
c3cdb36481 | ||
|
|
0e9f683562 | ||
|
|
f3e5e7f495 | ||
|
|
264547bb4a | ||
|
|
f03726f7b3 | ||
|
|
a274bdfd3c | ||
|
|
4a83ab6d69 | ||
|
|
38d5a2bb13 | ||
|
|
c7def8ae77 | ||
|
|
2083c70ccf | ||
|
|
3301b5107a | ||
|
|
ec3164474a | ||
|
|
777f430ae8 | ||
|
|
b28fc2965b | ||
|
|
4392b80154 | ||
|
|
0b5f5f58d5 | ||
|
|
9a9323d13a | ||
|
|
3ad99f64b6 | ||
|
|
fd95f27356 | ||
|
|
36411c4f2a |
1
.bazelrc
1
.bazelrc
@@ -22,6 +22,7 @@ coverage --define=coverage_enabled=1
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --compilation_mode=opt
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -48,6 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -159,6 +161,7 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -33,6 +33,7 @@ var (
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kzg.go",
|
||||
"trusted_setup.go",
|
||||
"validation.go",
|
||||
],
|
||||
@@ -12,6 +13,9 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
109
beacon-chain/blockchain/kzg/kzg.go
Normal file
109
beacon-chain/blockchain/kzg/kzg.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// BytesPerBlob is the number of bytes in a single blob.
|
||||
const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
|
||||
// Blob represents a serialized chunk of data.
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
|
||||
// Commitment represent a KZG commitment to a Blob.
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
|
||||
// Bytes32 is a 32-byte array.
|
||||
type Bytes32 = ckzg4844.Bytes32
|
||||
|
||||
// CellsAndProofs represents the Cells and Proofs corresponding to
|
||||
// a single blob.
|
||||
type CellsAndProofs struct {
|
||||
Cells []Cell
|
||||
Proofs []Proof
|
||||
}
|
||||
|
||||
func BlobToKZGCommitment(blob *Blob) (Commitment, error) {
|
||||
comm, err := kzg4844.BlobToCommitment(kzg4844.Blob(*blob))
|
||||
if err != nil {
|
||||
return Commitment{}, err
|
||||
}
|
||||
return Commitment(comm), nil
|
||||
}
|
||||
|
||||
func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) {
|
||||
proof, err := kzg4844.ComputeBlobProof(kzg4844.Blob(*blob), kzg4844.Commitment(commitment))
|
||||
if err != nil {
|
||||
return [48]byte{}, err
|
||||
}
|
||||
return Proof(proof), nil
|
||||
}
|
||||
|
||||
func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) {
|
||||
ckzgBlob := (*ckzg4844.Blob)(blob)
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(ckzgBlob)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
for i := range partialCells {
|
||||
ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i])
|
||||
}
|
||||
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
// Convert cells/proofs to the CellsAndProofs type defined in this package.
|
||||
func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) {
|
||||
if len(ckzgCells) != len(ckzgProofs) {
|
||||
return CellsAndProofs{}, errors.New("different number of cells/proofs")
|
||||
}
|
||||
|
||||
var cells []Cell
|
||||
var proofs []Proof
|
||||
for i := range ckzgCells {
|
||||
cells = append(cells, Cell(ckzgCells[i]))
|
||||
proofs = append(proofs, Proof(ckzgProofs[i]))
|
||||
}
|
||||
|
||||
return CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
}, nil
|
||||
}
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
CKZG "github.com/ethereum/c-kzg-4844/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -12,17 +14,53 @@ var (
|
||||
//go:embed trusted_setup.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
kzgLoaded bool
|
||||
)
|
||||
|
||||
type TrustedSetup struct {
|
||||
G1Monomial [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_monomial"`
|
||||
G1Lagrange [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_lagrange"`
|
||||
G2Monomial [65]GoKZG.G2CompressedHexStr `json:"g2_monomial"`
|
||||
}
|
||||
|
||||
func Start() error {
|
||||
parsedSetup := GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
|
||||
trustedSetup := &TrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, trustedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse trusted setup JSON")
|
||||
}
|
||||
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
|
||||
kzgContext, err = GoKZG.NewContext4096(&GoKZG.JSONTrustedSetup{
|
||||
SetupG2: trustedSetup.G2Monomial[:],
|
||||
SetupG1Lagrange: trustedSetup.G1Lagrange})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize go-kzg context")
|
||||
}
|
||||
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1MonomialBytes := make([]byte, len(trustedSetup.G1Monomial)*(len(trustedSetup.G1Monomial[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Monomial {
|
||||
copy(g1MonomialBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1LagrangeBytes := make([]byte, len(trustedSetup.G1Lagrange)*(len(trustedSetup.G1Lagrange[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Lagrange {
|
||||
copy(g1LagrangeBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G2 point, converted from hex to binary.
|
||||
g2MonomialBytes := make([]byte, len(trustedSetup.G2Monomial)*(len(trustedSetup.G2Monomial[0])-2)/2)
|
||||
for i, g2 := range &trustedSetup.G2Monomial {
|
||||
copy(g2MonomialBytes[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
|
||||
}
|
||||
if !kzgLoaded {
|
||||
// TODO: Provide a configuration option for this.
|
||||
var precompute uint = 0
|
||||
|
||||
// Free the current trusted setup before running this method. CKZG
|
||||
// panics if the same setup is run multiple times.
|
||||
if err = CKZG.LoadTrustedSetup(g1MonomialBytes, g1LagrangeBytes, g2MonomialBytes, precompute); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
kzgLoaded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -118,9 +118,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
func WithP2PBroadcaster(p p2p.Acceser) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
s.cfg.P2P = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,10 +6,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
@@ -499,7 +503,7 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
}
|
||||
indices, err := bs.Indices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "indices")
|
||||
}
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
for i := range expected {
|
||||
@@ -513,12 +517,35 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
indices, err := bs.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for col := range expected {
|
||||
if !indices[col] {
|
||||
missing[col] = true
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if coreTime.PeerDASIsActive(signed.Block().Slot()) {
|
||||
return s.isDataAvailableDataColumns(ctx, root, signed)
|
||||
}
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -548,7 +575,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
@@ -567,8 +594,13 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
@@ -590,12 +622,100 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
func (s *Service) isDataAvailableDataColumns(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
colMap, err := peerdas.CustodyColumns(s.cfg.P2P.NodeID(), peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Expected is the number of custodied data columnns a node is expected to have.
|
||||
expected := len(colMap)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe to newsly data columns stored in the database.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
subscription := s.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missing, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": len(missing),
|
||||
}).Error("Still waiting for data columns DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case rootIndex := <-rootIndexChan:
|
||||
if rootIndex.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missing, rootIndex.Index)
|
||||
|
||||
// Exit if there is no more missing data columns.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
missingIndexes := make([]uint64, 0, len(missing))
|
||||
for val := range missing {
|
||||
copiedVal := val
|
||||
missingIndexes = append(missingIndexes, copiedVal)
|
||||
}
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndexes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -51,6 +51,12 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(context.Context, blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
|
||||
16
beacon-chain/blockchain/receive_data_column.go
Normal file
16
beacon-chain/blockchain/receive_data_column.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
func (s *Service) ReceiveDataColumn(ctx context.Context, ds blocks.VerifiedRODataColumn) error {
|
||||
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
|
||||
return errors.Wrap(err, "save data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -81,7 +81,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
P2P p2p.Acceser
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
@@ -106,15 +106,17 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][fieldparams.MaxBlobsPerBlock]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}
|
||||
// TODO: Separate Data Columns from blobs
|
||||
/*
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}*/
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
@@ -128,7 +130,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -142,7 +144,7 @@ func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -168,7 +170,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -97,7 +97,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
@@ -580,7 +580,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -45,6 +46,11 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
@@ -65,6 +71,11 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
|
||||
@@ -628,6 +628,11 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (c *ChainService) ReceiveDataColumn(_ context.Context, _ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -8,6 +8,7 @@ go_library(
|
||||
"attestation_data.go",
|
||||
"balance_cache_key.go",
|
||||
"checkpoint_state.go",
|
||||
"column_subnet_ids.go",
|
||||
"committee.go",
|
||||
"committee_disabled.go", # keep
|
||||
"committees.go",
|
||||
|
||||
65
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
65
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
type columnSubnetIDs struct {
|
||||
colSubCache *cache.Cache
|
||||
colSubLock sync.RWMutex
|
||||
}
|
||||
|
||||
// ColumnSubnetIDs for column subnet participants
|
||||
var ColumnSubnetIDs = newColumnSubnetIDs()
|
||||
|
||||
const columnKey = "columns"
|
||||
|
||||
func newColumnSubnetIDs() *columnSubnetIDs {
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
// Set the default duration of a column subnet subscription as the column expiry period.
|
||||
subLength := epochDuration * time.Duration(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
|
||||
return &columnSubnetIDs{colSubCache: persistentCache}
|
||||
}
|
||||
|
||||
// GetColumnSubnets retrieves the data column subnets.
|
||||
func (s *columnSubnetIDs) GetColumnSubnets() ([]uint64, bool, time.Time) {
|
||||
s.colSubLock.RLock()
|
||||
defer s.colSubLock.RUnlock()
|
||||
|
||||
id, duration, ok := s.colSubCache.GetWithExpiration(columnKey)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
// Retrieve indices from the cache.
|
||||
idxs, ok := id.([]uint64)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
|
||||
return idxs, ok, duration
|
||||
}
|
||||
|
||||
// AddColumnSubnets adds the relevant data column subnets.
|
||||
func (s *columnSubnetIDs) AddColumnSubnets(colIdx []uint64) {
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Set(columnKey, colIdx, 0)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
// entries on them. This should only ever be used for testing, in normal
|
||||
// production, handling of the relevant subnets for each role is done
|
||||
// separately.
|
||||
func (s *columnSubnetIDs) EmptyAllCaches() {
|
||||
// Clear the cache.
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Flush()
|
||||
}
|
||||
@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
|
||||
@@ -32,6 +32,9 @@ const (
|
||||
|
||||
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
|
||||
AttesterSlashingReceived = 8
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 9
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -77,3 +80,7 @@ type ProposerSlashingReceivedData struct {
|
||||
type AttesterSlashingReceivedData struct {
|
||||
AttesterSlashing ethpb.AttSlashing
|
||||
}
|
||||
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
38
beacon-chain/core/peerdas/BUILD.bazel
Normal file
38
beacon-chain/core/peerdas/BUILD.bazel
Normal file
@@ -0,0 +1,38 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["helpers.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["helpers_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
359
beacon-chain/core/peerdas/helpers.go
Normal file
359
beacon-chain/core/peerdas/helpers.go
Normal file
@@ -0,0 +1,359 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"math/big"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/holiman/uint256"
|
||||
errors "github.com/pkg/errors"
|
||||
|
||||
kzg "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
CustodySubnetCountEnrKey = "csc"
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
|
||||
type Csc uint64
|
||||
|
||||
func (Csc) ENRKey() string { return CustodySubnetCountEnrKey }
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count")
|
||||
errIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
errMismatchLength = errors.New("mismatch in the length of the commitments and proofs")
|
||||
errRecordNil = errors.New("record is nil")
|
||||
errCannotLoadCustodySubnetCount = errors.New("cannot load the custody subnet count from peer")
|
||||
|
||||
// maxUint256 is the maximum value of a uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
// CustodyColumnSubnets computes the subnets the node should participate in for custody.
|
||||
func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Check if the custody subnet count is larger than the data column sidecar subnet count.
|
||||
if custodySubnetCount > dataColumnSidecarSubnetCount {
|
||||
return nil, errCustodySubnetCountTooLarge
|
||||
}
|
||||
|
||||
// First, compute the subnet IDs that the node should participate in.
|
||||
subnetIds := make(map[uint64]bool, custodySubnetCount)
|
||||
|
||||
one := uint256.NewInt(1)
|
||||
|
||||
for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(subnetIds)) < custodySubnetCount; currentId.Add(currentId, one) {
|
||||
// Convert to big endian bytes.
|
||||
currentIdBytesBigEndian := currentId.Bytes32()
|
||||
|
||||
// Convert to little endian.
|
||||
currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:])
|
||||
|
||||
// Hash the result.
|
||||
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
|
||||
|
||||
// Get the subnet ID.
|
||||
subnetId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % dataColumnSidecarSubnetCount
|
||||
|
||||
// Add the subnet to the map.
|
||||
subnetIds[subnetId] = true
|
||||
|
||||
// Overflow prevention.
|
||||
if currentId.Cmp(maxUint256) == 0 {
|
||||
currentId = uint256.NewInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
return subnetIds, nil
|
||||
}
|
||||
|
||||
// CustodyColumns computes the columns the node should custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#helper-functions
|
||||
func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Compute the custodied subnets.
|
||||
subnetIds, err := CustodyColumnSubnets(nodeId, custodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody subnets")
|
||||
}
|
||||
|
||||
columnsPerSubnet := fieldparams.NumberOfColumns / dataColumnSidecarSubnetCount
|
||||
|
||||
// Knowing the subnet ID and the number of columns per subnet, select all the columns the node should custody.
|
||||
// Columns belonging to the same subnet are contiguous.
|
||||
columnIndices := make(map[uint64]bool, custodySubnetCount*columnsPerSubnet)
|
||||
for i := uint64(0); i < columnsPerSubnet; i++ {
|
||||
for subnetId := range subnetIds {
|
||||
columnIndex := dataColumnSidecarSubnetCount*i + subnetId
|
||||
columnIndices[columnIndex] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columnIndices, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block and blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []kzg.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the signed block header.
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
// Get the block body.
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
|
||||
// Get the blob KZG commitments.
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the KZG commitments inclusion proof.
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobsCount)
|
||||
|
||||
for i := range blobs {
|
||||
blob := &blobs[i]
|
||||
blobCellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cellsAndProofs = append(cellsAndProofs, blobCellsAndProofs)
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecarsForReconstruct is a TEMPORARY function until there is an official specification for it.
|
||||
// It is scheduled for deletion.
|
||||
func DataColumnSidecarsForReconstruct(
|
||||
blobKzgCommitments [][]byte,
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
cellsAndProofs []kzg.CellsAndProofs,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Each CellsAndProofs corresponds to a Blob
|
||||
// So we can get the BlobCount by checking the length of CellsAndProofs
|
||||
blobsCount := len(cellsAndProofs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// VerifyDataColumnSidecarKZGProofs verifies the provided KZG Proofs for the particular
|
||||
// data column.
|
||||
func VerifyDataColumnSidecarKZGProofs(sc *ethpb.DataColumnSidecar) (bool, error) {
|
||||
if sc.ColumnIndex >= params.BeaconConfig().NumberOfColumns {
|
||||
return false, errIndexTooLarge
|
||||
}
|
||||
if len(sc.DataColumn) != len(sc.KzgCommitments) || len(sc.KzgCommitments) != len(sc.KzgProof) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
var commitments []kzg.Bytes48
|
||||
var indices []uint64
|
||||
var cells []kzg.Cell
|
||||
var proofs []kzg.Bytes48
|
||||
for i := range sc.DataColumn {
|
||||
commitments = append(commitments, kzg.Bytes48(sc.KzgCommitments[i]))
|
||||
indices = append(indices, sc.ColumnIndex)
|
||||
cells = append(cells, kzg.Cell(sc.DataColumn[i]))
|
||||
proofs = append(proofs, kzg.Bytes48(sc.KzgProof[i]))
|
||||
}
|
||||
|
||||
return kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
||||
}
|
||||
|
||||
// CustodySubnetCount returns the number of subnets the node should participate in for custody.
|
||||
func CustodySubnetCount() uint64 {
|
||||
count := params.BeaconConfig().CustodyRequirement
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
count = params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// HypergeomCDF computes the hypergeometric cumulative distribution function.
|
||||
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
func HypergeomCDF(k, M, n, N uint64) float64 {
|
||||
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
|
||||
denominator := new(big.Float).SetInt(denominatorInt)
|
||||
|
||||
rBig := big.NewFloat(0)
|
||||
|
||||
for i := uint64(0); i < k+1; i++ {
|
||||
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
|
||||
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
|
||||
numeratorInt := new(big.Int).Mul(a, b)
|
||||
numerator := new(big.Float).SetInt(numeratorInt)
|
||||
item := new(big.Float).Quo(numerator, denominator)
|
||||
rBig.Add(rBig, item)
|
||||
}
|
||||
|
||||
r, _ := rBig.Float64()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
|
||||
// number of samples we should actually query from peers.
|
||||
// TODO: Add link to the specification once it is available.
|
||||
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
|
||||
// Retrieve the columns count
|
||||
columnsCount := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If half of the columns are missing, we are able to reconstruct the data.
|
||||
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
|
||||
// This is the smallest worst case.
|
||||
worstCaseMissing := columnsCount/2 + 1
|
||||
|
||||
// Compute the false positive threshold.
|
||||
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
|
||||
|
||||
var sampleCount uint64
|
||||
|
||||
// Finally, compute the extended sample count.
|
||||
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
|
||||
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sampleCount
|
||||
}
|
||||
|
||||
func CustodyCountFromRecord(record *enr.Record) (uint64, error) {
|
||||
// By default, we assume the peer custodies the minimum number of subnets.
|
||||
if record == nil {
|
||||
return 0, errRecordNil
|
||||
}
|
||||
|
||||
// Load the `custody_subnet_count`
|
||||
var csc Csc
|
||||
if err := record.Load(&csc); err != nil {
|
||||
return 0, errCannotLoadCustodySubnetCount
|
||||
}
|
||||
|
||||
return uint64(csc), nil
|
||||
}
|
||||
|
||||
func CanSelfReconstruct(numCol uint64) bool {
|
||||
total := params.BeaconConfig().NumberOfColumns
|
||||
// if total is odd, then we need total / 2 + 1 columns to reconstruct
|
||||
// if total is even, then we need total / 2 columns to reconstruct
|
||||
columnsNeeded := total/2 + total%2
|
||||
return numCol >= columnsNeeded
|
||||
}
|
||||
144
beacon-chain/core/peerdas/helpers_test.go
Normal file
144
beacon-chain/core/peerdas/helpers_test.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func GetRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func GetRandBlob(seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := GetRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func GenerateCommitmentAndProof(blob kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(&blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(&blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
|
||||
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
var (
|
||||
comms [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := int64(0); i < 6; i++ {
|
||||
blob := GetRandBlob(i)
|
||||
commitment, _, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
comms = append(comms, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
dbBlock.Block.Body.BlobKzgCommitments = comms
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
sCars, err := peerdas.DataColumnSidecars(sBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, sidecar := range sCars {
|
||||
verified, err := peerdas.VerifyDataColumnSidecarKZGProofs(sidecar)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, fmt.Sprintf("sidecar %d failed", i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHypergeomCDF(t *testing.T) {
|
||||
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
|
||||
// Expected result: 0.072
|
||||
const (
|
||||
expected = 0.0796665913283742
|
||||
margin = 0.000001
|
||||
)
|
||||
|
||||
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
|
||||
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
|
||||
}
|
||||
|
||||
func TestExtendedSampleCount(t *testing.T) {
|
||||
const samplesPerSlot = 16
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
allowedMissings uint64
|
||||
extendedSampleCount uint64
|
||||
}{
|
||||
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
|
||||
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
|
||||
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
|
||||
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
|
||||
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
|
||||
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
|
||||
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
|
||||
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
|
||||
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
|
||||
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
|
||||
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
|
||||
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
|
||||
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
|
||||
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
|
||||
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
|
||||
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
|
||||
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
|
||||
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
|
||||
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
|
||||
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
|
||||
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
|
||||
require.Equal(t, tc.extendedSampleCount, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -53,6 +53,11 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
|
||||
func PeerDASIsActive(slot primitives.Slot) bool {
|
||||
return params.PeerDASEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().Eip7594ForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability.go",
|
||||
"availability_columns.go",
|
||||
"cache.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
@@ -20,6 +21,7 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
|
||||
151
beacon-chain/das/availability_columns.go
Normal file
151
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *cache
|
||||
verifier ColumnBatchVerifier
|
||||
nodeID enode.ID
|
||||
}
|
||||
|
||||
type ColumnBatchVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, sc []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.BlobStorage, verifier ColumnBatchVerifier, id enode.ID) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newCache(),
|
||||
verifier: verifier,
|
||||
nodeID: id,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Very Ugly, change interface to allow for columns and blobs
|
||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) PersistColumns(current primitives.Slot, sc ...blocks.RODataColumn) error {
|
||||
if len(sc) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sc) > 1 {
|
||||
first := sc[0].BlockRoot()
|
||||
for i := 1; i < len(sc); i++ {
|
||||
if first != sc[i].BlockRoot() {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sc[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromColumn(sc[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for i := range sc {
|
||||
if err := entry.stashColumns(&sc[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := fullCommitmentsToCheck(b, current)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could check data availability for block %#x", b.Root())
|
||||
}
|
||||
// Return early for blocks that are pre-deneb or which do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := keyFromBlock(b)
|
||||
entry := s.cache.ensure(key)
|
||||
defer s.cache.delete(key)
|
||||
root := b.Root()
|
||||
sumz, err := s.store.WaitForSummarizer(ctx)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
|
||||
WithError(err).
|
||||
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
|
||||
} else {
|
||||
entry.setDiskSummary(sumz.Summary(root))
|
||||
}
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
sidecars, err := entry.filterColumns(root, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete BlobSidecar batch")
|
||||
}
|
||||
// Do thorough verifications of each BlobSidecar for the block.
|
||||
// Same as above, we don't save BlobSidecars if there are any problems with the batch.
|
||||
vscs, err := s.verifier.VerifiedRODataColumns(ctx, b, sidecars)
|
||||
if err != nil {
|
||||
var me verification.VerificationMultiError
|
||||
ok := errors.As(err, &me)
|
||||
if ok {
|
||||
fails := me.Failures()
|
||||
lf := make(log.Fields, len(fails))
|
||||
for i := range fails {
|
||||
lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error()
|
||||
}
|
||||
log.WithFields(lf).
|
||||
Debug("invalid ColumnSidecars received")
|
||||
}
|
||||
return errors.Wrapf(err, "invalid ColumnSidecars received for block %#x", root)
|
||||
}
|
||||
// Ensure that each column sidecar is written to disk.
|
||||
for i := range vscs {
|
||||
if err := s.store.SaveDataColumn(vscs[i]); err != nil {
|
||||
return errors.Wrapf(err, "failed to save ColumnSidecar index %d for block %#x", vscs[i].ColumnIndex, root)
|
||||
}
|
||||
}
|
||||
// All ColumnSidecars are persisted - da check succeeds.
|
||||
return nil
|
||||
}
|
||||
|
||||
func fullCommitmentsToCheck(b blocks.ROBlock, current primitives.Slot) (safeCommitmentsArray, error) {
|
||||
var ar safeCommitmentsArray
|
||||
if b.Version() < version.Deneb {
|
||||
return ar, nil
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
|
||||
return ar, nil
|
||||
}
|
||||
kc, err := b.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return ar, err
|
||||
}
|
||||
for i := range ar {
|
||||
copy(ar[i], kc)
|
||||
}
|
||||
return ar, nil
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
@@ -38,6 +39,10 @@ func keyFromSidecar(sc blocks.ROBlob) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
func keyFromColumn(sc blocks.RODataColumn) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value.
|
||||
func keyFromBlock(b blocks.ROBlock) cacheKey {
|
||||
return cacheKey{slot: b.Block().Slot(), root: b.Root()}
|
||||
@@ -61,6 +66,7 @@ func (c *cache) delete(key cacheKey) {
|
||||
// cacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type cacheEntry struct {
|
||||
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
|
||||
colScs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.BlobStorageSummary
|
||||
}
|
||||
|
||||
@@ -82,6 +88,17 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) stashColumns(sc *blocks.RODataColumn) error {
|
||||
if sc.ColumnIndex >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.ColumnIndex)
|
||||
}
|
||||
if e.colScs[sc.ColumnIndex] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.ColumnIndex, sc.KzgCommitments)
|
||||
}
|
||||
e.colScs[sc.ColumnIndex] = sc
|
||||
return nil
|
||||
}
|
||||
|
||||
// filter evicts sidecars that are not committed to by the block and returns custom
|
||||
// errors if the cache is missing any of the commitments, or if the commitments in
|
||||
// the cache do not match those found in the block. If err is nil, then all expected
|
||||
@@ -117,6 +134,35 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
|
||||
return scs, nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) filterColumns(root [32]byte, kc safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
if e.diskSummary.AllAvailable(kc.count()) {
|
||||
return nil, nil
|
||||
}
|
||||
scs := make([]blocks.RODataColumn, 0, kc.count())
|
||||
for i := uint64(0); i < fieldparams.NumberOfColumns; i++ {
|
||||
// We already have this blob, we don't need to write it or validate it.
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
if kc[i] == nil {
|
||||
if e.colScs[i] != nil {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if e.colScs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
if !reflect.DeepEqual(kc[i], e.colScs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.colScs[i].KzgCommitments, kc[i])
|
||||
}
|
||||
scs = append(scs, *e.colScs[i])
|
||||
}
|
||||
|
||||
return scs, nil
|
||||
}
|
||||
|
||||
// safeCommitmentArray is a fixed size array of commitment byte slices. This is helpful for avoiding
|
||||
// gratuitous bounds checks.
|
||||
type safeCommitmentArray [fieldparams.MaxBlobsPerBlock][]byte
|
||||
@@ -129,3 +175,14 @@ func (s safeCommitmentArray) count() int {
|
||||
}
|
||||
return fieldparams.MaxBlobsPerBlock
|
||||
}
|
||||
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
func (s safeCommitmentsArray) count() int {
|
||||
for i := range s {
|
||||
if s[i] == nil {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return fieldparams.NumberOfColumns
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -39,8 +40,15 @@ const (
|
||||
directoryPermissions = 0700
|
||||
)
|
||||
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
type BlobStorageOption func(*BlobStorage) error
|
||||
type (
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
RootIndexPair struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
Index uint64
|
||||
}
|
||||
)
|
||||
|
||||
// WithBasePath is a required option that sets the base path of blob storage.
|
||||
func WithBasePath(base string) BlobStorageOption {
|
||||
@@ -70,7 +78,10 @@ func WithSaveFsync(fsync bool) BlobStorageOption {
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
b := &BlobStorage{}
|
||||
b := &BlobStorage{
|
||||
DataColumnFeed: new(event.Feed),
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(b); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create blob storage")
|
||||
@@ -99,6 +110,7 @@ type BlobStorage struct {
|
||||
fsync bool
|
||||
fs afero.Fs
|
||||
pruner *blobPruner
|
||||
DataColumnFeed *event.Feed
|
||||
}
|
||||
|
||||
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
|
||||
@@ -221,6 +233,110 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveDataColumn saves a data column to our local filesystem.
|
||||
func (bs *BlobStorage) SaveDataColumn(column blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
fname := namerForDataColumn(column)
|
||||
sszPath := fname.path()
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
log.Trace("Ignoring a duplicate data column sidecar save attempt")
|
||||
return nil
|
||||
}
|
||||
|
||||
if bs.pruner != nil {
|
||||
hRoot, err := column.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bs.pruner.notify(hRoot, column.SignedBlockHeader.Header.Slot, column.ColumnIndex); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", hRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the ethpb.DataColumnSidecar to binary data using SSZ.
|
||||
sidecarData, err := column.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
} else if len(sidecarData) == 0 {
|
||||
return errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
partialMoved := false
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
if partialMoved {
|
||||
return
|
||||
}
|
||||
// It's expected to error if the save is successful.
|
||||
err = bs.fs.Remove(partPath)
|
||||
if err == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"partPath": partPath,
|
||||
}).Debugf("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := partialFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return errEmptyBlobWritten
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
partialMoved = true
|
||||
|
||||
// Notify the data column notifier that a new data column has been saved.
|
||||
bs.DataColumnFeed.Send(RootIndexPair{
|
||||
Root: column.BlockRoot(),
|
||||
Index: column.ColumnIndex,
|
||||
})
|
||||
|
||||
// TODO: Use new metrics for data columns
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a single BlobSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
@@ -246,6 +362,20 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
}
|
||||
|
||||
// GetColumn retrieves a single DataColumnSidecar by its root and index.
|
||||
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (*ethpb.DataColumnSidecar, error) {
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.path())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.DataColumnSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Remove removes all blobs for a given root.
|
||||
func (bs *BlobStorage) Remove(root [32]byte) error {
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
@@ -289,6 +419,61 @@ func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]boo
|
||||
return mask, nil
|
||||
}
|
||||
|
||||
// ColumnIndices retrieve the stored column indexes from our filesystem.
|
||||
func (bs *BlobStorage) ColumnIndices(root [32]byte) (map[uint64]bool, error) {
|
||||
custody := make(map[uint64]bool, fieldparams.NumberOfColumns)
|
||||
|
||||
// Get all the files in the directory.
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
entries, err := afero.ReadDir(bs.fs, rootDir)
|
||||
if err != nil {
|
||||
// If the directory does not exist, we do not custody any columns.
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "read directory")
|
||||
}
|
||||
|
||||
// Iterate over all the entries in the directory.
|
||||
for _, entry := range entries {
|
||||
// If the entry is a directory, skip it.
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the entry does not have the correct extension, skip it.
|
||||
name := entry.Name()
|
||||
if !strings.HasSuffix(name, sszExt) {
|
||||
continue
|
||||
}
|
||||
|
||||
// The file should be in the `<index>.<extension>` format.
|
||||
// Skip the file if it does not match the format.
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the column index from the file name.
|
||||
columnIndexStr := parts[0]
|
||||
columnIndex, err := strconv.ParseUint(columnIndexStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
|
||||
}
|
||||
|
||||
// If the column index is out of bounds, return an error.
|
||||
if columnIndex >= fieldparams.NumberOfColumns {
|
||||
return nil, errors.Wrapf(errIndexOutOfBounds, "invalid index %d", columnIndex)
|
||||
}
|
||||
|
||||
// Mark the column index as in custody.
|
||||
custody[columnIndex] = true
|
||||
}
|
||||
|
||||
return custody, nil
|
||||
}
|
||||
|
||||
// Clear deletes all files on the filesystem.
|
||||
func (bs *BlobStorage) Clear() error {
|
||||
dirs, err := listDir(bs.fs, ".")
|
||||
@@ -321,6 +506,10 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func namerForDataColumn(col blocks.VerifiedRODataColumn) blobNamer {
|
||||
return blobNamer{root: col.BlockRoot(), index: col.ColumnIndex}
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return rootString(p.root)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
|
||||
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
|
||||
type blobIndexMask [fieldparams.NumberOfColumns]bool
|
||||
|
||||
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
|
||||
type BlobStorageSummary struct {
|
||||
@@ -68,9 +68,12 @@ func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) ensure(key [32]byte, slot primitives.Slot, idx uint64) error {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
// TODO: Separate blob index checks from data column index checks
|
||||
/*
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
*/
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v := s.cache[key]
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSlotByRoot_Summary(t *testing.T) {
|
||||
t.Skip("Use new test for data columns")
|
||||
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
|
||||
firstSet[0] = true
|
||||
lastSet[len(lastSet)-1] = true
|
||||
|
||||
@@ -23,10 +23,10 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// used to represent errors for inconsistent slot ranges.
|
||||
// Used to represent errors for inconsistent slot ranges.
|
||||
var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
|
||||
|
||||
// Block retrieval by root.
|
||||
// Block retrieval by root. Return nil if block is not found.
|
||||
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
|
||||
defer span.End()
|
||||
|
||||
@@ -988,6 +988,7 @@ func (b *BeaconNode) registerRPCService(router *mux.Router) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"broadcaster.go",
|
||||
"config.go",
|
||||
"connection_gater.go",
|
||||
"custody.go",
|
||||
"dial_relay_node.go",
|
||||
"discovery.go",
|
||||
"doc.go",
|
||||
@@ -46,6 +47,7 @@ go_library(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -56,6 +58,7 @@ go_library(
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
@@ -74,6 +77,8 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_btcsuite_btcd_btcec_v2//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
@@ -115,6 +120,7 @@ go_test(
|
||||
"addr_factory_test.go",
|
||||
"broadcaster_test.go",
|
||||
"connection_gater_test.go",
|
||||
"custody_test.go",
|
||||
"dial_relay_node_test.go",
|
||||
"discovery_test.go",
|
||||
"fork_test.go",
|
||||
@@ -136,9 +142,11 @@ go_test(
|
||||
flaky = True,
|
||||
tags = ["requires-network"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -151,6 +159,7 @@ go_test(
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
|
||||
@@ -9,16 +9,18 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// ErrMessageNotMapped occurs on a Broadcast attempt when a message has not been defined in the
|
||||
@@ -96,7 +98,12 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att, forkDigest [4]byte) {
|
||||
func (s *Service) internalBroadcastAttestation(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
att ethpb.Att,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -152,7 +159,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [4]byte) {
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -228,7 +235,12 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
|
||||
func (s *Service) internalBroadcastBlob(
|
||||
ctx context.Context,
|
||||
subnet uint64,
|
||||
blobSidecar *ethpb.BlobSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -243,7 +255,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
blobSidecarCommitteeBroadcastAttempts.Inc()
|
||||
blobSidecarBroadcastAttempts.Inc()
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
@@ -252,7 +264,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
blobSidecarCommitteeBroadcasts.Inc()
|
||||
blobSidecarBroadcasts.Inc()
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
@@ -268,6 +280,99 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
// TODO: Add tests
|
||||
func (s *Service) BroadcastDataColumn(ctx context.Context, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
if dataColumnSidecar == nil {
|
||||
return errors.Errorf("attempted to broadcast nil data column sidecar at subnet %d", columnSubnet)
|
||||
}
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "current fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumn(ctx, columnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumn(
|
||||
ctx context.Context,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Inc()
|
||||
|
||||
// Clear parent context / deadline.
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
|
||||
// Define a one-slot length context timeout.
|
||||
oneSlot := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
// Build the topic corresponding to this column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
|
||||
|
||||
// Check if we have peers with this subnet.
|
||||
hasPeer := func() bool {
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
defer s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
return s.hasPeerWithSubnet(topic)
|
||||
}()
|
||||
|
||||
// If no peers are found, attempt to find peers with this subnet.
|
||||
if !hasPeer {
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
ok, err := s.FindPeersWithSubnet(ctx, topic, columnSubnet, 1 /*threshold*/)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "find peers for subnet")
|
||||
}
|
||||
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
return errors.New("failed to find peers for subnet")
|
||||
}(); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast data column sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
dataColumnSidecarBroadcasts.Inc()
|
||||
}
|
||||
|
||||
// method to broadcast messages to other peers in our gossip mesh.
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
@@ -297,14 +402,18 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
return nil
|
||||
}
|
||||
|
||||
func attestationToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [4]byte) string {
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -13,11 +13,16 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -25,7 +30,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestService_Broadcast(t *testing.T) {
|
||||
@@ -520,3 +524,70 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
require.NoError(t, p.BroadcastBlob(ctx, subnet, blobSidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
require.NoError(t, kzg.Start())
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
require.NotEqual(t, 0, len(p1.BHost.Network().Peers()), "No peers")
|
||||
|
||||
p := &Service{
|
||||
host: p1.BHost,
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
}
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockElectra())
|
||||
require.NoError(t, err)
|
||||
blobs := make([]kzg.Blob, fieldparams.MaxBlobsPerBlock)
|
||||
sidecars, err := peerdas.DataColumnSidecars(b, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
sidecar := sidecars[0]
|
||||
subnet := uint64(0)
|
||||
topic := DataColumnSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeOf(sidecar)] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
msg, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ðpb.DataColumnSidecar{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(msg.Data, result))
|
||||
require.DeepEqual(t, result, sidecar)
|
||||
}(t)
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
ctx := context.Background()
|
||||
require.ErrorContains(t, "attempted to broadcast nil", p.BroadcastDataColumn(ctx, subnet, nil))
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
require.NoError(t, p.BroadcastDataColumn(ctx, subnet, sidecar))
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 1*time.Second), "Failed to receive pubsub within 1s")
|
||||
}
|
||||
|
||||
103
beacon-chain/p2p/custody.go
Normal file
103
beacon-chain/p2p/custody.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
// GetValidCustodyPeers returns a list of peers that custody a super set of the local node's custody columns.
|
||||
func (s *Service) GetValidCustodyPeers(peers []peer.ID) ([]peer.ID, error) {
|
||||
// Get the total number of columns.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
localCustodySubnetCount := peerdas.CustodySubnetCount()
|
||||
localCustodyColumns, err := peerdas.CustodyColumns(s.NodeID(), localCustodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody columns for local node")
|
||||
}
|
||||
|
||||
localCustotyColumnsCount := uint64(len(localCustodyColumns))
|
||||
|
||||
// Find the valid peers.
|
||||
validPeers := make([]peer.ID, 0, len(peers))
|
||||
|
||||
loop:
|
||||
for _, pid := range peers {
|
||||
// Get the custody subnets count of the remote peer.
|
||||
remoteCustodySubnetCount := s.CustodyCountFromRemotePeer(pid)
|
||||
|
||||
// Get the remote node ID from the peer ID.
|
||||
remoteNodeID, err := ConvertPeerIDToNodeID(pid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "convert peer ID to node ID")
|
||||
}
|
||||
|
||||
// Get the custody columns of the remote peer.
|
||||
remoteCustodyColumns, err := peerdas.CustodyColumns(remoteNodeID, remoteCustodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
remoteCustodyColumnsCount := uint64(len(remoteCustodyColumns))
|
||||
|
||||
// If the remote peer custodies less columns than the local node, skip it.
|
||||
if remoteCustodyColumnsCount < localCustotyColumnsCount {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the remote peers custodies all the possible columns, add it to the list.
|
||||
if remoteCustodyColumnsCount == numberOfColumns {
|
||||
copiedId := pid
|
||||
validPeers = append(validPeers, copiedId)
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter out invalid peers.
|
||||
for c := range localCustodyColumns {
|
||||
if !remoteCustodyColumns[c] {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
|
||||
copiedId := pid
|
||||
|
||||
// Add valid peer to list
|
||||
validPeers = append(validPeers, copiedId)
|
||||
}
|
||||
|
||||
return validPeers, nil
|
||||
}
|
||||
|
||||
// CustodyCountFromRemotePeer retrieves the custody count from a remote peer.
|
||||
func (s *Service) CustodyCountFromRemotePeer(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of subnets.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
record, err := s.peers.ENR(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
}).Error("Failed to retrieve ENR for peer, defaulting to the default value")
|
||||
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
// Retrieve the custody subnets count from the ENR.
|
||||
custodyCount, err := peerdas.CustodyCountFromRecord(record)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
}).Debug("Failed to retrieve custody count from ENR for peer, defaulting to the default value")
|
||||
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
163
beacon-chain/p2p/custody_test.go
Normal file
163
beacon-chain/p2p/custody_test.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
prysmNetwork "github.com/prysmaticlabs/prysm/v5/network"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func createPeer(t *testing.T, privateKeyOffset int, custodyCount uint64) (*enr.Record, peer.ID, *ecdsa.PrivateKey) {
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(privateKeyOffset + i)
|
||||
}
|
||||
|
||||
unmarshalledPrivateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
privateKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(unmarshalledPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
peerID, err := peer.IDFromPrivateKey(unmarshalledPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
record := &enr.Record{}
|
||||
record.Set(peerdas.Csc(custodyCount))
|
||||
record.Set(enode.Secp256k1(privateKey.PublicKey))
|
||||
|
||||
return record, peerID, privateKey
|
||||
}
|
||||
|
||||
func TestGetValidCustodyPeers(t *testing.T) {
|
||||
genesisValidatorRoot := make([]byte, 32)
|
||||
|
||||
for i := 0; i < 32; i++ {
|
||||
genesisValidatorRoot[i] = byte(i)
|
||||
}
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: genesisValidatorRoot,
|
||||
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
}
|
||||
|
||||
ipAddrString, err := prysmNetwork.ExternalIPv4()
|
||||
require.NoError(t, err)
|
||||
ipAddr := net.ParseIP(ipAddrString)
|
||||
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Peer 1 custodies exactly the same columns than us.
|
||||
// (We use the same keys pair than ours for simplicity)
|
||||
peer1Record, peer1ID, localPrivateKey := createPeer(t, 1, custodyRequirement)
|
||||
|
||||
// Peer 2 custodies all the columns.
|
||||
peer2Record, peer2ID, _ := createPeer(t, 2, dataColumnSidecarSubnetCount)
|
||||
|
||||
// Peer 3 custodies different columns than us (but the same count).
|
||||
// (We use the same public key than peer 2 for simplicity)
|
||||
peer3Record, peer3ID, _ := createPeer(t, 3, custodyRequirement)
|
||||
|
||||
// Peer 4 custodies less columns than us.
|
||||
peer4Record, peer4ID, _ := createPeer(t, 4, custodyRequirement-1)
|
||||
|
||||
listener, err := service.createListener(ipAddr, localPrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.dv5Listener = listener
|
||||
|
||||
service.peers.Add(peer1Record, peer1ID, nil, network.DirOutbound)
|
||||
service.peers.Add(peer2Record, peer2ID, nil, network.DirOutbound)
|
||||
service.peers.Add(peer3Record, peer3ID, nil, network.DirOutbound)
|
||||
service.peers.Add(peer4Record, peer4ID, nil, network.DirOutbound)
|
||||
|
||||
actual, err := service.GetValidCustodyPeers([]peer.ID{peer1ID, peer2ID, peer3ID, peer4ID})
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := []peer.ID{peer1ID, peer2ID}
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestCustodyCountFromRemotePeer(t *testing.T) {
|
||||
const (
|
||||
expected uint64 = 7
|
||||
pid = "test-id"
|
||||
)
|
||||
|
||||
csc := peerdas.Csc(expected)
|
||||
|
||||
// Define a nil record
|
||||
var nilRecord *enr.Record = nil
|
||||
|
||||
// Define an empty record (record with non `csc` entry)
|
||||
emptyRecord := &enr.Record{}
|
||||
|
||||
// Define a nominal record
|
||||
nominalRecord := &enr.Record{}
|
||||
nominalRecord.Set(csc)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
record *enr.Record
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "nominal",
|
||||
record: nominalRecord,
|
||||
expected: expected,
|
||||
},
|
||||
{
|
||||
name: "nil",
|
||||
record: nilRecord,
|
||||
expected: params.BeaconConfig().CustodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
record: emptyRecord,
|
||||
expected: params.BeaconConfig().CustodyRequirement,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create peers status.
|
||||
peers := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
|
||||
// Add a new peer with the record.
|
||||
peers.Add(tc.record, pid, nil, network.DirOutbound)
|
||||
|
||||
// Create a new service.
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
}
|
||||
|
||||
// Retrieve the custody count from the remote peer.
|
||||
actual := service.CustodyCountFromRemotePeer(pid)
|
||||
|
||||
// Verify the result.
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -15,7 +15,9 @@ import (
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -42,15 +44,17 @@ const (
|
||||
udp6
|
||||
)
|
||||
|
||||
const quickProtocolEnrKey = "quic"
|
||||
|
||||
type quicProtocol uint16
|
||||
|
||||
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
|
||||
func (quicProtocol) ENRKey() string { return "quic" }
|
||||
func (quicProtocol) ENRKey() string { return quickProtocolEnrKey }
|
||||
|
||||
// RefreshENR uses an epoch to refresh the enr entry for our node
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
func (s *Service) RefreshENR() {
|
||||
// RefreshPersistentSubnets checks that we are tracking our local persistent subnets for a variety of gossip topics.
|
||||
// This routine checks for our attestation, sync committee and data column subnets and updates them if they have
|
||||
// been rotated.
|
||||
func (s *Service) RefreshPersistentSubnets() {
|
||||
// return early if discv5 isn't running
|
||||
if s.dv5Listener == nil || !s.isInitialized() {
|
||||
return
|
||||
@@ -60,6 +64,10 @@ func (s *Service) RefreshENR() {
|
||||
log.WithError(err).Error("Could not initialize persistent subnets")
|
||||
return
|
||||
}
|
||||
if err := initializePersistentColumnSubnets(s.dv5Listener.LocalNode().ID()); err != nil {
|
||||
log.WithError(err).Error("Could not initialize persistent column subnets")
|
||||
return
|
||||
}
|
||||
|
||||
bitV := bitfield.NewBitvector64()
|
||||
committees := cache.SubnetIDs.GetAllSubnets()
|
||||
@@ -258,6 +266,10 @@ func (s *Service) createLocalNode(
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
if params.PeerDASEnabled() {
|
||||
localNode.Set(peerdas.Csc(peerdas.CustodySubnetCount()))
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
@@ -346,6 +358,8 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
|
||||
// Ignore nodes that are already active.
|
||||
if s.peers.IsActive(peerData.ID) {
|
||||
// Constantly update enr for known peers
|
||||
s.peers.UpdateENR(node.Record(), peerData.ID)
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -20,8 +20,11 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
@@ -37,7 +40,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
var discoveryWaitTime = 1 * time.Second
|
||||
@@ -131,6 +133,10 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCreateLocalNode(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.Eip7594ForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
testCases := []struct {
|
||||
name string
|
||||
cfg *Config
|
||||
@@ -227,6 +233,11 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
syncSubnets := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(syncCommsSubnetEnrKey, syncSubnets)))
|
||||
require.DeepSSZEqual(t, []byte{0}, *syncSubnets)
|
||||
|
||||
// Check custody_subnet_count config.
|
||||
custodySubnetCount := new(uint64)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(peerdas.CustodySubnetCountEnrKey, custodySubnetCount)))
|
||||
require.Equal(t, uint64(1), *custodySubnetCount)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -601,7 +612,7 @@ func TestRefreshENR_ForkBoundaries(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcBuilder(t)
|
||||
s.RefreshENR()
|
||||
s.RefreshPersistentSubnets()
|
||||
tt.postValidation(t, s)
|
||||
s.dv5Listener.Close()
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
@@ -121,7 +121,7 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
|
||||
return defaultAttesterSlashingTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlsToExecutionChangeMessage):
|
||||
return defaultBlsToExecutionChangeTopicParams(), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage), strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
// TODO(Deneb): Using the default block scoring. But this should be updated.
|
||||
return defaultBlockTopicParams(), nil
|
||||
default:
|
||||
|
||||
@@ -22,6 +22,7 @@ var gossipTopicMappings = map[string]func() proto.Message{
|
||||
SyncCommitteeSubnetTopicFormat: func() proto.Message { return ðpb.SyncCommitteeMessage{} },
|
||||
BlsToExecutionChangeSubnetTopicFormat: func() proto.Message { return ðpb.SignedBLSToExecutionChange{} },
|
||||
BlobSubnetTopicFormat: func() proto.Message { return ðpb.BlobSidecar{} },
|
||||
DataColumnSubnetTopicFormat: func() proto.Message { return ðpb.DataColumnSidecar{} },
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -3,6 +3,7 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
@@ -28,6 +29,12 @@ type P2P interface {
|
||||
ConnectionHandler
|
||||
PeersProvider
|
||||
MetadataProvider
|
||||
CustodyHandler
|
||||
}
|
||||
|
||||
type Acceser interface {
|
||||
Broadcaster
|
||||
PeerManager
|
||||
}
|
||||
|
||||
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
|
||||
@@ -36,6 +43,7 @@ type Broadcaster interface {
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastDataColumn(ctx context.Context, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
@@ -81,8 +89,9 @@ type PeerManager interface {
|
||||
PeerID() peer.ID
|
||||
Host() host.Host
|
||||
ENR() *enr.Record
|
||||
NodeID() enode.ID
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshENR()
|
||||
RefreshPersistentSubnets()
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
}
|
||||
@@ -102,3 +111,8 @@ type MetadataProvider interface {
|
||||
Metadata() metadata.Metadata
|
||||
MetadataSeq() uint64
|
||||
}
|
||||
|
||||
type CustodyHandler interface {
|
||||
CustodyCountFromRemotePeer(peer.ID) uint64
|
||||
GetValidCustodyPeers([]peer.ID) ([]peer.ID, error)
|
||||
}
|
||||
|
||||
@@ -60,17 +60,25 @@ var (
|
||||
"the subnet. The beacon node increments this counter when the broadcast is blocked " +
|
||||
"until a subnet peer can be found.",
|
||||
})
|
||||
blobSidecarCommitteeBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
blobSidecarBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_broadcasts",
|
||||
Help: "The number of blob sidecar committee messages that were broadcast with no peer on.",
|
||||
Help: "The number of blob sidecar messages that were broadcast with no peer on.",
|
||||
})
|
||||
syncCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_sync_committee_subnet_attempted_broadcasts",
|
||||
Help: "The number of sync committee that were attempted to be broadcast.",
|
||||
})
|
||||
blobSidecarCommitteeBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
blobSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_blob_sidecar_committee_attempted_broadcasts",
|
||||
Help: "The number of blob sidecar committee messages that were attempted to be broadcast.",
|
||||
Help: "The number of blob sidecar messages that were attempted to be broadcast.",
|
||||
})
|
||||
dataColumnSidecarBroadcasts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_data_column_sidecar_broadcasts",
|
||||
Help: "The number of data column sidecar messages that were broadcasted.",
|
||||
})
|
||||
dataColumnSidecarBroadcastAttempts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_data_column_sidecar_attempted_broadcasts",
|
||||
Help: "The number of data column sidecar messages that were attempted to be broadcast.",
|
||||
})
|
||||
|
||||
// Gossip Tracer Metrics
|
||||
|
||||
@@ -159,6 +159,14 @@ func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, dire
|
||||
p.addIpToTracker(pid)
|
||||
}
|
||||
|
||||
func (p *Status) UpdateENR(record *enr.Record, pid peer.ID) {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
if peerData, ok := p.store.PeerData(pid); ok {
|
||||
peerData.Enr = record
|
||||
}
|
||||
}
|
||||
|
||||
// Address returns the multiaddress of the given remote peer.
|
||||
// This will error if the peer does not exist.
|
||||
func (p *Status) Address(pid peer.ID) (ma.Multiaddr, error) {
|
||||
|
||||
@@ -90,7 +90,7 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
formatting := []interface{}{digest}
|
||||
|
||||
// Special case for attestation subnets which have a second formatting placeholder.
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat {
|
||||
if topic == AttestationSubnetTopicFormat || topic == SyncCommitteeSubnetTopicFormat || topic == BlobSubnetTopicFormat || topic == DataColumnSubnetTopicFormat {
|
||||
formatting = append(formatting, 0 /* some subnet ID */)
|
||||
}
|
||||
|
||||
|
||||
@@ -43,6 +43,12 @@ const BlobSidecarsByRangeName = "/blob_sidecars_by_range"
|
||||
// BlobSidecarsByRootName is the name for the BlobSidecarsByRoot v1 message topic.
|
||||
const BlobSidecarsByRootName = "/blob_sidecars_by_root"
|
||||
|
||||
// DataColumnSidecarsByRootName is the name for the DataColumnSidecarsByRoot v1 message topic.
|
||||
const DataColumnSidecarsByRootName = "/data_column_sidecars_by_root"
|
||||
|
||||
// DataColumnSidecarsByRangeName is the name for the DataColumnSidecarsByRange v1 message topic.
|
||||
const DataColumnSidecarsByRangeName = "/data_column_sidecars_by_range"
|
||||
|
||||
const (
|
||||
// V1 RPC Topics
|
||||
// RPCStatusTopicV1 defines the v1 topic for the status rpc method.
|
||||
@@ -65,6 +71,12 @@ const (
|
||||
// RPCBlobSidecarsByRootTopicV1 is a topic for requesting blob sidecars by their block root. New in deneb.
|
||||
// /eth2/beacon_chain/req/blob_sidecars_by_root/1/
|
||||
RPCBlobSidecarsByRootTopicV1 = protocolPrefix + BlobSidecarsByRootName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRootTopicV1 is a topic for requesting data column sidecars by their block root. New in PeerDAS.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_root/1
|
||||
RPCDataColumnSidecarsByRootTopicV1 = protocolPrefix + DataColumnSidecarsByRootName + SchemaVersionV1
|
||||
// RPCDataColumnSidecarsByRangeTopicV1 is a topic for requesting data column sidecars by their slot. New in PeerDAS.
|
||||
// /eth2/beacon_chain/req/data_column_sidecars_by_range/1
|
||||
RPCDataColumnSidecarsByRangeTopicV1 = protocolPrefix + DataColumnSidecarsByRangeName + SchemaVersionV1
|
||||
|
||||
// V2 RPC Topics
|
||||
// RPCBlocksByRangeTopicV2 defines v2 the topic for the blocks by range rpc method.
|
||||
@@ -101,6 +113,10 @@ var RPCTopicMappings = map[string]interface{}{
|
||||
RPCBlobSidecarsByRangeTopicV1: new(pb.BlobSidecarsByRangeRequest),
|
||||
// BlobSidecarsByRoot v1 Message
|
||||
RPCBlobSidecarsByRootTopicV1: new(p2ptypes.BlobSidecarsByRootReq),
|
||||
// DataColumnSidecarsByRange v1 Message
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnSidecarsByRootReq),
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
@@ -119,6 +135,8 @@ var messageMapping = map[string]bool{
|
||||
MetadataMessageName: true,
|
||||
BlobSidecarsByRangeName: true,
|
||||
BlobSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRootName: true,
|
||||
DataColumnSidecarsByRangeName: true,
|
||||
}
|
||||
|
||||
// Maps all the RPC messages which are to updated in altair.
|
||||
|
||||
@@ -226,7 +226,7 @@ func (s *Service) Start() {
|
||||
}
|
||||
// Initialize metadata according to the
|
||||
// current epoch.
|
||||
s.RefreshENR()
|
||||
s.RefreshPersistentSubnets()
|
||||
|
||||
// Periodic functions.
|
||||
async.RunEvery(s.ctx, params.BeaconConfig().TtfbTimeoutDuration(), func() {
|
||||
@@ -234,7 +234,7 @@ func (s *Service) Start() {
|
||||
})
|
||||
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
|
||||
async.RunEvery(s.ctx, time.Duration(params.BeaconConfig().RespTimeout)*time.Second, s.updateMetrics)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshPersistentSubnets)
|
||||
async.RunEvery(s.ctx, 1*time.Minute, func() {
|
||||
inboundQUICCount := len(s.peers.InboundConnectedWithProtocol(peers.QUIC))
|
||||
inboundTCPCount := len(s.peers.InboundConnectedWithProtocol(peers.TCP))
|
||||
@@ -358,6 +358,15 @@ func (s *Service) ENR() *enr.Record {
|
||||
return s.dv5Listener.Self().Record()
|
||||
}
|
||||
|
||||
// NodeID returns the local node's node ID
|
||||
// for discovery.
|
||||
func (s *Service) NodeID() enode.ID {
|
||||
if s.dv5Listener == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return s.dv5Listener.Self().ID()
|
||||
}
|
||||
|
||||
// DiscoveryAddresses represents our enr addresses as multiaddresses.
|
||||
func (s *Service) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if s.dv5Listener == nil {
|
||||
|
||||
@@ -11,8 +11,11 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -33,8 +36,8 @@ var syncCommsSubnetEnrKey = params.BeaconNetworkConfig().SyncCommsSubnetKey
|
||||
// The value used with the subnet, in order
|
||||
// to create an appropriate key to retrieve
|
||||
// the relevant lock. This is used to differentiate
|
||||
// sync subnets from attestation subnets. This is deliberately
|
||||
// chosen as more than 64(attestation subnet count).
|
||||
// sync subnets from others. This is deliberately
|
||||
// chosen as more than 64 (attestation subnet count).
|
||||
const syncLockerVal = 100
|
||||
|
||||
// The value used with the blob sidecar subnet, in order
|
||||
@@ -44,6 +47,13 @@ const syncLockerVal = 100
|
||||
// chosen more than sync and attestation subnet combined.
|
||||
const blobSubnetLockerVal = 110
|
||||
|
||||
// The value used with the data column sidecar subnet, in order
|
||||
// to create an appropriate key to retrieve
|
||||
// the relevant lock. This is used to differentiate
|
||||
// data column subnets from others. This is deliberately
|
||||
// chosen more than sync, attestation and blob subnet (6) combined.
|
||||
const dataColumnSubnetVal = 150
|
||||
|
||||
// FindPeersWithSubnet performs a network search for peers
|
||||
// subscribed to a particular subnet. Then it tries to connect
|
||||
// with those peers. This method will block until either:
|
||||
@@ -72,8 +82,10 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index))
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(index))
|
||||
case strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForDataColumnsSubnet(index))
|
||||
default:
|
||||
return false, errors.New("no subnet exists for provided topic")
|
||||
return false, errors.Errorf("no subnet exists for provided topic: %s", topic)
|
||||
}
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
@@ -153,6 +165,22 @@ func (s *Service) filterPeerForSyncSubnet(index uint64) func(node *enode.Node) b
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular data column subnet.
|
||||
func (s *Service) filterPeerForDataColumnsSubnet(index uint64) func(node *enode.Node) bool {
|
||||
return func(node *enode.Node) bool {
|
||||
if !s.filterPeer(node) {
|
||||
return false
|
||||
}
|
||||
|
||||
subnets, err := dataColumnSubnets(node.ID(), node.Record())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return subnets[index]
|
||||
}
|
||||
}
|
||||
|
||||
// lower threshold to broadcast object compared to searching
|
||||
// for a subnet. So that even in the event of poor peer
|
||||
// connectivity, we can still broadcast an attestation.
|
||||
@@ -206,6 +234,25 @@ func initializePersistentSubnets(id enode.ID, epoch primitives.Epoch) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func initializePersistentColumnSubnets(id enode.ID) error {
|
||||
_, ok, expTime := cache.ColumnSubnetIDs.GetColumnSubnets()
|
||||
if ok && expTime.After(time.Now()) {
|
||||
return nil
|
||||
}
|
||||
subsMap, err := peerdas.CustodyColumnSubnets(id, peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
subs := make([]uint64, 0, len(subsMap))
|
||||
for sub := range subsMap {
|
||||
subs = append(subs, sub)
|
||||
}
|
||||
|
||||
cache.ColumnSubnetIDs.AddColumnSubnets(subs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def compute_subscribed_subnets(node_id: NodeID, epoch: Epoch) -> Sequence[SubnetID]:
|
||||
@@ -329,6 +376,25 @@ func syncSubnets(record *enr.Record) ([]uint64, error) {
|
||||
return committeeIdxs, nil
|
||||
}
|
||||
|
||||
func dataColumnSubnets(nodeID enode.ID, record *enr.Record) (map[uint64]bool, error) {
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Retrieve the custody count from the ENR.
|
||||
custodyCount, err := peerdas.CustodyCountFromRecord(record)
|
||||
if err != nil {
|
||||
// If we fail to retrieve the custody count, we default to the custody requirement.
|
||||
custodyCount = custodyRequirement
|
||||
}
|
||||
|
||||
// Retrieve the custody subnets from the remote peer
|
||||
custodyColumnsSubnets, err := peerdas.CustodyColumnSubnets(nodeID, custodyCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody column subnets")
|
||||
}
|
||||
|
||||
return custodyColumnsSubnets, nil
|
||||
}
|
||||
|
||||
// Parses the attestation subnets ENR entry in a node and extracts its value
|
||||
// as a bitvector for further manipulation.
|
||||
func attBitvector(record *enr.Record) (bitfield.Bitvector64, error) {
|
||||
@@ -355,10 +421,11 @@ func syncBitvector(record *enr.Record) (bitfield.Bitvector4, error) {
|
||||
|
||||
// The subnet locker is a map which keeps track of all
|
||||
// mutexes stored per subnet. This locker is re-used
|
||||
// between both the attestation and sync subnets. In
|
||||
// order to differentiate between attestation and sync
|
||||
// subnets. Sync subnets are stored by (subnet+syncLockerVal). This
|
||||
// is to prevent conflicts while allowing both subnets
|
||||
// between both the attestation, sync and blob subnets.
|
||||
// Sync subnets are stored by (subnet+syncLockerVal).
|
||||
// Blob subnets are stored by (subnet+blobSubnetLockerVal).
|
||||
// Data column subnets are stored by (subnet+dataColumnSubnetVal).
|
||||
// This is to prevent conflicts while allowing subnets
|
||||
// to use a single locker.
|
||||
func (s *Service) subnetLocker(i uint64) *sync.RWMutex {
|
||||
s.subnetsLockLock.Lock()
|
||||
|
||||
@@ -17,9 +17,11 @@ go_library(
|
||||
"//beacon-chain:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
|
||||
@@ -3,6 +3,7 @@ package testing
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/control"
|
||||
@@ -27,148 +28,166 @@ func NewFuzzTestP2P() *FakeP2P {
|
||||
}
|
||||
|
||||
// Encoding -- fake.
|
||||
func (_ *FakeP2P) Encoding() encoder.NetworkEncoding {
|
||||
func (*FakeP2P) Encoding() encoder.NetworkEncoding {
|
||||
return &encoder.SszNetworkEncoder{}
|
||||
}
|
||||
|
||||
// AddConnectionHandler -- fake.
|
||||
func (_ *FakeP2P) AddConnectionHandler(_, _ func(ctx context.Context, id peer.ID) error) {
|
||||
func (*FakeP2P) AddConnectionHandler(_, _ func(ctx context.Context, id peer.ID) error) {
|
||||
|
||||
}
|
||||
|
||||
// AddDisconnectionHandler -- fake.
|
||||
func (_ *FakeP2P) AddDisconnectionHandler(_ func(ctx context.Context, id peer.ID) error) {
|
||||
func (*FakeP2P) AddDisconnectionHandler(_ func(ctx context.Context, id peer.ID) error) {
|
||||
}
|
||||
|
||||
// AddPingMethod -- fake.
|
||||
func (_ *FakeP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
func (*FakeP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
|
||||
}
|
||||
|
||||
// PeerID -- fake.
|
||||
func (_ *FakeP2P) PeerID() peer.ID {
|
||||
func (*FakeP2P) PeerID() peer.ID {
|
||||
return "fake"
|
||||
}
|
||||
|
||||
// ENR returns the enr of the local peer.
|
||||
func (_ *FakeP2P) ENR() *enr.Record {
|
||||
func (*FakeP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
}
|
||||
|
||||
// NodeID returns the node id of the local peer.
|
||||
func (*FakeP2P) NodeID() enode.ID {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// DiscoveryAddresses -- fake
|
||||
func (_ *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
func (*FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet mocks the p2p func.
|
||||
func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
func (*FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// RefreshENR mocks the p2p func.
|
||||
func (_ *FakeP2P) RefreshENR() {}
|
||||
func (*FakeP2P) RefreshPersistentSubnets() {}
|
||||
|
||||
// LeaveTopic -- fake.
|
||||
func (_ *FakeP2P) LeaveTopic(_ string) error {
|
||||
func (*FakeP2P) LeaveTopic(_ string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Metadata -- fake.
|
||||
func (_ *FakeP2P) Metadata() metadata.Metadata {
|
||||
func (*FakeP2P) Metadata() metadata.Metadata {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Peers -- fake.
|
||||
func (_ *FakeP2P) Peers() *peers.Status {
|
||||
func (*FakeP2P) Peers() *peers.Status {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublishToTopic -- fake.
|
||||
func (_ *FakeP2P) PublishToTopic(_ context.Context, _ string, _ []byte, _ ...pubsub.PubOpt) error {
|
||||
func (*FakeP2P) PublishToTopic(_ context.Context, _ string, _ []byte, _ ...pubsub.PubOpt) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send -- fake.
|
||||
func (_ *FakeP2P) Send(_ context.Context, _ interface{}, _ string, _ peer.ID) (network.Stream, error) {
|
||||
func (*FakeP2P) Send(_ context.Context, _ interface{}, _ string, _ peer.ID) (network.Stream, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// PubSub -- fake.
|
||||
func (_ *FakeP2P) PubSub() *pubsub.PubSub {
|
||||
func (*FakeP2P) PubSub() *pubsub.PubSub {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MetadataSeq -- fake.
|
||||
func (_ *FakeP2P) MetadataSeq() uint64 {
|
||||
func (*FakeP2P) MetadataSeq() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// SetStreamHandler -- fake.
|
||||
func (_ *FakeP2P) SetStreamHandler(_ string, _ network.StreamHandler) {
|
||||
func (*FakeP2P) SetStreamHandler(_ string, _ network.StreamHandler) {
|
||||
|
||||
}
|
||||
|
||||
// SubscribeToTopic -- fake.
|
||||
func (_ *FakeP2P) SubscribeToTopic(_ string, _ ...pubsub.SubOpt) (*pubsub.Subscription, error) {
|
||||
func (*FakeP2P) SubscribeToTopic(_ string, _ ...pubsub.SubOpt) (*pubsub.Subscription, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// JoinTopic -- fake.
|
||||
func (_ *FakeP2P) JoinTopic(_ string, _ ...pubsub.TopicOpt) (*pubsub.Topic, error) {
|
||||
func (*FakeP2P) JoinTopic(_ string, _ ...pubsub.TopicOpt) (*pubsub.Topic, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Host -- fake.
|
||||
func (_ *FakeP2P) Host() host.Host {
|
||||
func (*FakeP2P) Host() host.Host {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disconnect -- fake.
|
||||
func (_ *FakeP2P) Disconnect(_ peer.ID) error {
|
||||
func (*FakeP2P) Disconnect(_ peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Broadcast -- fake.
|
||||
func (_ *FakeP2P) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
func (*FakeP2P) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastAttestation -- fake.
|
||||
func (_ *FakeP2P) BroadcastAttestation(_ context.Context, _ uint64, _ ethpb.Att) error {
|
||||
func (*FakeP2P) BroadcastAttestation(_ context.Context, _ uint64, _ ethpb.Att) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastSyncCommitteeMessage -- fake.
|
||||
func (_ *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
func (*FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastBlob -- fake.
|
||||
func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar) error {
|
||||
func (*FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InterceptPeerDial -- fake.
|
||||
func (_ *FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
func (*FakeP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptAddrDial -- fake.
|
||||
func (_ *FakeP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
func (*FakeP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptAccept -- fake.
|
||||
func (_ *FakeP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
func (*FakeP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptSecured -- fake.
|
||||
func (_ *FakeP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
|
||||
func (*FakeP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptUpgraded -- fake.
|
||||
func (_ *FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
func (*FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
func (*FakeP2P) CustodyCountFromRemotePeer(peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (*FakeP2P) GetValidCustodyPeers(peers []peer.ID) ([]peer.ID, error) {
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
@@ -48,6 +48,12 @@ func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.BlobSide
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumn(context.Context, uint64, *ethpb.DataColumnSidecar) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumMessages returns the number of messages broadcasted.
|
||||
func (m *MockBroadcaster) NumMessages() int {
|
||||
m.msgLock.Lock()
|
||||
|
||||
@@ -18,12 +18,12 @@ type MockHost struct {
|
||||
}
|
||||
|
||||
// ID --
|
||||
func (_ *MockHost) ID() peer.ID {
|
||||
func (*MockHost) ID() peer.ID {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Peerstore --
|
||||
func (_ *MockHost) Peerstore() peerstore.Peerstore {
|
||||
func (*MockHost) Peerstore() peerstore.Peerstore {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -33,46 +33,46 @@ func (m *MockHost) Addrs() []ma.Multiaddr {
|
||||
}
|
||||
|
||||
// Network --
|
||||
func (_ *MockHost) Network() network.Network {
|
||||
func (*MockHost) Network() network.Network {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mux --
|
||||
func (_ *MockHost) Mux() protocol.Switch {
|
||||
func (*MockHost) Mux() protocol.Switch {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connect --
|
||||
func (_ *MockHost) Connect(_ context.Context, _ peer.AddrInfo) error {
|
||||
func (*MockHost) Connect(_ context.Context, _ peer.AddrInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStreamHandler --
|
||||
func (_ *MockHost) SetStreamHandler(_ protocol.ID, _ network.StreamHandler) {}
|
||||
func (*MockHost) SetStreamHandler(_ protocol.ID, _ network.StreamHandler) {}
|
||||
|
||||
// SetStreamHandlerMatch --
|
||||
func (_ *MockHost) SetStreamHandlerMatch(protocol.ID, func(id protocol.ID) bool, network.StreamHandler) {
|
||||
func (*MockHost) SetStreamHandlerMatch(protocol.ID, func(id protocol.ID) bool, network.StreamHandler) {
|
||||
}
|
||||
|
||||
// RemoveStreamHandler --
|
||||
func (_ *MockHost) RemoveStreamHandler(_ protocol.ID) {}
|
||||
func (*MockHost) RemoveStreamHandler(_ protocol.ID) {}
|
||||
|
||||
// NewStream --
|
||||
func (_ *MockHost) NewStream(_ context.Context, _ peer.ID, _ ...protocol.ID) (network.Stream, error) {
|
||||
func (*MockHost) NewStream(_ context.Context, _ peer.ID, _ ...protocol.ID) (network.Stream, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Close --
|
||||
func (_ *MockHost) Close() error {
|
||||
func (*MockHost) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConnManager --
|
||||
func (_ *MockHost) ConnManager() connmgr.ConnManager {
|
||||
func (*MockHost) ConnManager() connmgr.ConnManager {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EventBus --
|
||||
func (_ *MockHost) EventBus() event.Bus {
|
||||
func (*MockHost) EventBus() event.Bus {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -20,7 +21,7 @@ type MockPeerManager struct {
|
||||
}
|
||||
|
||||
// Disconnect .
|
||||
func (_ *MockPeerManager) Disconnect(peer.ID) error {
|
||||
func (*MockPeerManager) Disconnect(peer.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -39,6 +40,11 @@ func (m MockPeerManager) ENR() *enr.Record {
|
||||
return m.Enr
|
||||
}
|
||||
|
||||
// NodeID .
|
||||
func (m MockPeerManager) NodeID() enode.ID {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// DiscoveryAddresses .
|
||||
func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if m.FailDiscoveryAddr {
|
||||
@@ -48,12 +54,12 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
}
|
||||
|
||||
// RefreshENR .
|
||||
func (_ MockPeerManager) RefreshENR() {}
|
||||
func (MockPeerManager) RefreshPersistentSubnets() {}
|
||||
|
||||
// FindPeersWithSubnet .
|
||||
func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
func (MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// AddPingMethod .
|
||||
func (_ MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {}
|
||||
func (MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
core "github.com/libp2p/go-libp2p/core"
|
||||
@@ -22,9 +23,11 @@ import (
|
||||
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -50,9 +53,10 @@ type TestP2P struct {
|
||||
}
|
||||
|
||||
// NewTestP2P initializes a new p2p test service.
|
||||
func NewTestP2P(t *testing.T) *TestP2P {
|
||||
func NewTestP2P(t *testing.T, opts ...swarmt.Option) *TestP2P {
|
||||
opts = append(opts, swarmt.OptDisableQUIC)
|
||||
ctx := context.Background()
|
||||
h := bhost.NewBlankHost(swarmt.GenSwarm(t, swarmt.OptDisableQUIC))
|
||||
h := bhost.NewBlankHost(swarmt.GenSwarm(t, opts...))
|
||||
ps, err := pubsub.NewFloodSub(ctx, h,
|
||||
pubsub.WithMessageSigning(false),
|
||||
pubsub.WithStrictSignatureVerification(false),
|
||||
@@ -183,6 +187,12 @@ func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.BlobSidecar) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumn(context.Context, uint64, *ethpb.DataColumnSidecar) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetStreamHandler for RPC.
|
||||
func (p *TestP2P) SetStreamHandler(topic string, handler network.StreamHandler) {
|
||||
p.BHost.SetStreamHandler(protocol.ID(topic), handler)
|
||||
@@ -232,7 +242,7 @@ func (p *TestP2P) LeaveTopic(topic string) error {
|
||||
}
|
||||
|
||||
// Encoding returns ssz encoding.
|
||||
func (_ *TestP2P) Encoding() encoder.NetworkEncoding {
|
||||
func (*TestP2P) Encoding() encoder.NetworkEncoding {
|
||||
return &encoder.SszNetworkEncoder{}
|
||||
}
|
||||
|
||||
@@ -259,12 +269,17 @@ func (p *TestP2P) Host() host.Host {
|
||||
}
|
||||
|
||||
// ENR returns the enr of the local peer.
|
||||
func (_ *TestP2P) ENR() *enr.Record {
|
||||
func (*TestP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
}
|
||||
|
||||
// NodeID returns the node id of the local peer.
|
||||
func (*TestP2P) NodeID() enode.ID {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// DiscoveryAddresses --
|
||||
func (_ *TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
func (*TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -346,7 +361,7 @@ func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid p
|
||||
}
|
||||
|
||||
// Started always returns true.
|
||||
func (_ *TestP2P) Started() bool {
|
||||
func (*TestP2P) Started() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -356,12 +371,12 @@ func (p *TestP2P) Peers() *peers.Status {
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet mocks the p2p func.
|
||||
func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
func (*TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// RefreshENR mocks the p2p func.
|
||||
func (_ *TestP2P) RefreshENR() {}
|
||||
func (*TestP2P) RefreshPersistentSubnets() {}
|
||||
|
||||
// ForkDigest mocks the p2p func.
|
||||
func (p *TestP2P) ForkDigest() ([4]byte, error) {
|
||||
@@ -379,31 +394,54 @@ func (p *TestP2P) MetadataSeq() uint64 {
|
||||
}
|
||||
|
||||
// AddPingMethod mocks the p2p func.
|
||||
func (_ *TestP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
func (*TestP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
// InterceptPeerDial .
|
||||
func (_ *TestP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
func (*TestP2P) InterceptPeerDial(peer.ID) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptAddrDial .
|
||||
func (_ *TestP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
func (*TestP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptAccept .
|
||||
func (_ *TestP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
func (*TestP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptSecured .
|
||||
func (_ *TestP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
|
||||
func (*TestP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
// InterceptUpgraded .
|
||||
func (_ *TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
func (*TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
func (s *TestP2P) CustodyCountFromRemotePeer(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of subnets.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
record, err := s.peers.ENR(pid)
|
||||
if err != nil {
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
// Retrieve the custody subnets count from the ENR.
|
||||
custodyCount, err := peerdas.CustodyCountFromRecord(record)
|
||||
if err != nil {
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
|
||||
func (*TestP2P) GetValidCustodyPeers(peers []peer.ID) ([]peer.ID, error) {
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
@@ -30,6 +30,9 @@ const (
|
||||
GossipBlsToExecutionChangeMessage = "bls_to_execution_change"
|
||||
// GossipBlobSidecarMessage is the name for the blob sidecar message type.
|
||||
GossipBlobSidecarMessage = "blob_sidecar"
|
||||
// GossipDataColumnSidecarMessage is the name for the data column sidecar message type.
|
||||
GossipDataColumnSidecarMessage = "data_column_sidecar"
|
||||
|
||||
// Topic Formats
|
||||
//
|
||||
// AttestationSubnetTopicFormat is the topic format for the attestation subnet.
|
||||
@@ -52,4 +55,6 @@ const (
|
||||
BlsToExecutionChangeSubnetTopicFormat = GossipProtocolAndDigest + GossipBlsToExecutionChangeMessage
|
||||
// BlobSubnetTopicFormat is the topic format for the blob subnet.
|
||||
BlobSubnetTopicFormat = GossipProtocolAndDigest + GossipBlobSidecarMessage + "_%d"
|
||||
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
|
||||
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
|
||||
)
|
||||
|
||||
@@ -9,10 +9,15 @@ var (
|
||||
ErrInvalidSequenceNum = errors.New("invalid sequence number provided")
|
||||
ErrGeneric = errors.New("internal service error")
|
||||
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob slot < minimum_request_epoch")
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
ErrBlobLTMinRequest = errors.New("blob epoch < minimum_request_epoch")
|
||||
|
||||
ErrDataColumnLTMinRequest = errors.New("data column epoch < minimum_request_epoch")
|
||||
ErrMaxBlobReqExceeded = errors.New("requested more than MAX_REQUEST_BLOB_SIDECARS")
|
||||
ErrMaxDataColumnReqExceeded = errors.New("requested more than MAX_REQUEST_DATA_COLUMN_SIDECARS")
|
||||
|
||||
ErrResourceUnavailable = errors.New("resource requested unavailable")
|
||||
ErrInvalidColumnIndex = errors.New("invalid column index requested")
|
||||
)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -207,7 +208,94 @@ func (s BlobSidecarsByRootReq) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func init() {
|
||||
sizer := ð.BlobIdentifier{}
|
||||
blobIdSize = sizer.SizeSSZ()
|
||||
// =====================================
|
||||
// DataColumnSidecarsByRootReq section
|
||||
// =====================================
|
||||
var _ ssz.Marshaler = (*DataColumnSidecarsByRootReq)(nil)
|
||||
var _ ssz.Unmarshaler = (*DataColumnSidecarsByRootReq)(nil)
|
||||
var _ sort.Interface = (*DataColumnSidecarsByRootReq)(nil)
|
||||
|
||||
// DataColumnSidecarsByRootReq is used to specify a list of data column targets (root+index) in a DataColumnSidecarsByRoot RPC request.
|
||||
type DataColumnSidecarsByRootReq []*eth.DataColumnIdentifier
|
||||
|
||||
// DataColumnIdentifier is a fixed size value, so we can compute its fixed size at start time (see init below)
|
||||
var dataColumnIdSize int
|
||||
|
||||
// UnmarshalSSZ implements ssz.Unmarshaler. It unmarshals the provided bytes buffer into the DataColumnSidecarsByRootReq value.
|
||||
func (d *DataColumnSidecarsByRootReq) UnmarshalSSZ(buf []byte) error {
|
||||
bufLen := len(buf)
|
||||
maxLen := int(params.BeaconConfig().MaxRequestDataColumnSidecars) * dataColumnIdSize
|
||||
if bufLen > maxLen {
|
||||
return errors.Errorf("expected buffer with length of up to %d but received length %d", maxLen, bufLen)
|
||||
}
|
||||
if bufLen%dataColumnIdSize != 0 {
|
||||
return errors.Wrapf(ssz.ErrIncorrectByteSize, "size=%d", bufLen)
|
||||
}
|
||||
count := bufLen / dataColumnIdSize
|
||||
*d = make([]*eth.DataColumnIdentifier, count)
|
||||
for i := 0; i < count; i++ {
|
||||
id := ð.DataColumnIdentifier{}
|
||||
err := id.UnmarshalSSZ(buf[i*dataColumnIdSize : (i+1)*dataColumnIdSize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*d)[i] = id
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalSSZ implements ssz.Marshaler. It serializes the DataColumnSidecarsByRootReq value to a byte slice.
|
||||
func (d *DataColumnSidecarsByRootReq) MarshalSSZ() ([]byte, error) {
|
||||
buf := make([]byte, d.SizeSSZ())
|
||||
for i, id := range *d {
|
||||
bytes, err := id.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(buf[i*dataColumnIdSize:(i+1)*dataColumnIdSize], bytes)
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// MarshalSSZTo implements ssz.Marshaler. It appends the serialized DataColumnSidecarsByRootReq value to the provided byte slice.
|
||||
func (d *DataColumnSidecarsByRootReq) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
mobj, err := d.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(dst, mobj...), nil
|
||||
}
|
||||
|
||||
// SizeSSZ implements ssz.Marshaler. It returns the size of the serialized representation.
|
||||
func (d *DataColumnSidecarsByRootReq) SizeSSZ() int {
|
||||
return len(*d) * dataColumnIdSize
|
||||
}
|
||||
|
||||
// Len implements sort.Interface. It returns the number of elements in the collection.
|
||||
func (d DataColumnSidecarsByRootReq) Len() int {
|
||||
return len(d)
|
||||
}
|
||||
|
||||
// Less implements sort.Interface. It reports whether the element with index i must sort before the element with index j.
|
||||
func (d DataColumnSidecarsByRootReq) Less(i int, j int) bool {
|
||||
rootCmp := bytes.Compare(d[i].BlockRoot, d[j].BlockRoot)
|
||||
if rootCmp != 0 {
|
||||
return rootCmp < 0
|
||||
}
|
||||
|
||||
return d[i].ColumnIndex < d[j].ColumnIndex
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface. It swaps the elements with indexes i and j.
|
||||
func (d DataColumnSidecarsByRootReq) Swap(i int, j int) {
|
||||
d[i], d[j] = d[j], d[i]
|
||||
}
|
||||
|
||||
func init() {
|
||||
blobSizer := ð.BlobIdentifier{}
|
||||
blobIdSize = blobSizer.SizeSSZ()
|
||||
|
||||
dataColumnSizer := ð.DataColumnIdentifier{}
|
||||
dataColumnIdSize = dataColumnSizer.SizeSSZ()
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -194,3 +195,136 @@ func hexDecodeOrDie(t *testing.T, str string) []byte {
|
||||
require.NoError(t, err)
|
||||
return decoded
|
||||
}
|
||||
|
||||
// =====================================
|
||||
// DataColumnSidecarsByRootReq section
|
||||
// =====================================
|
||||
func generateDataColumnIdentifiers(n int) []*eth.DataColumnIdentifier {
|
||||
r := make([]*eth.DataColumnIdentifier, n)
|
||||
for i := 0; i < n; i++ {
|
||||
r[i] = ð.DataColumnIdentifier{
|
||||
BlockRoot: bytesutil.PadTo([]byte{byte(i)}, 32),
|
||||
ColumnIndex: uint64(i),
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestDataColumnSidecarsByRootReq_MarshalUnmarshal(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
ids []*eth.DataColumnIdentifier
|
||||
marshalErr error
|
||||
unmarshalErr string
|
||||
unmarshalMod func([]byte) []byte
|
||||
}{
|
||||
{
|
||||
name: "empty list",
|
||||
},
|
||||
{
|
||||
name: "single item list",
|
||||
ids: generateDataColumnIdentifiers(1),
|
||||
},
|
||||
{
|
||||
name: "10 item list",
|
||||
ids: generateDataColumnIdentifiers(10),
|
||||
},
|
||||
{
|
||||
name: "wonky unmarshal size",
|
||||
ids: generateDataColumnIdentifiers(10),
|
||||
unmarshalMod: func(in []byte) []byte {
|
||||
in = append(in, byte(0))
|
||||
return in
|
||||
},
|
||||
unmarshalErr: ssz.ErrIncorrectByteSize.Error(),
|
||||
},
|
||||
{
|
||||
name: "size too big",
|
||||
ids: generateDataColumnIdentifiers(1),
|
||||
unmarshalMod: func(in []byte) []byte {
|
||||
maxLen := params.BeaconConfig().MaxRequestDataColumnSidecars * uint64(dataColumnIdSize)
|
||||
add := make([]byte, maxLen)
|
||||
in = append(in, add...)
|
||||
return in
|
||||
},
|
||||
unmarshalErr: "expected buffer with length of up to",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
req := DataColumnSidecarsByRootReq(c.ids)
|
||||
bytes, err := req.MarshalSSZ()
|
||||
if c.marshalErr != nil {
|
||||
require.ErrorIs(t, err, c.marshalErr)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
if c.unmarshalMod != nil {
|
||||
bytes = c.unmarshalMod(bytes)
|
||||
}
|
||||
got := &DataColumnSidecarsByRootReq{}
|
||||
err = got.UnmarshalSSZ(bytes)
|
||||
if c.unmarshalErr != "" {
|
||||
require.ErrorContains(t, c.unmarshalErr, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
for i, id := range *got {
|
||||
require.DeepEqual(t, c.ids[i], id)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Test MarshalSSZTo
|
||||
req := DataColumnSidecarsByRootReq(generateDataColumnIdentifiers(10))
|
||||
buf := make([]byte, 0)
|
||||
buf, err := req.MarshalSSZTo(buf)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(buf), int(req.SizeSSZ()))
|
||||
|
||||
var unmarshalled DataColumnSidecarsByRootReq
|
||||
err = unmarshalled.UnmarshalSSZ(buf)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, req, unmarshalled)
|
||||
}
|
||||
|
||||
func TestDataColumnSidecarsByRootReq_Sort(t *testing.T) {
|
||||
ids := []*eth.DataColumnIdentifier{
|
||||
{
|
||||
BlockRoot: bytesutil.PadTo([]byte{3}, 32),
|
||||
ColumnIndex: 0,
|
||||
},
|
||||
{
|
||||
BlockRoot: bytesutil.PadTo([]byte{2}, 32),
|
||||
ColumnIndex: 2,
|
||||
},
|
||||
{
|
||||
BlockRoot: bytesutil.PadTo([]byte{2}, 32),
|
||||
ColumnIndex: 1,
|
||||
},
|
||||
{
|
||||
BlockRoot: bytesutil.PadTo([]byte{1}, 32),
|
||||
ColumnIndex: 2,
|
||||
},
|
||||
{
|
||||
BlockRoot: bytesutil.PadTo([]byte{0}, 32),
|
||||
ColumnIndex: 3,
|
||||
},
|
||||
}
|
||||
req := DataColumnSidecarsByRootReq(ids)
|
||||
require.Equal(t, true, req.Less(4, 3))
|
||||
require.Equal(t, true, req.Less(3, 2))
|
||||
require.Equal(t, true, req.Less(2, 1))
|
||||
require.Equal(t, true, req.Less(1, 0))
|
||||
require.Equal(t, 5, req.Len())
|
||||
|
||||
ids = []*eth.DataColumnIdentifier{
|
||||
{
|
||||
BlockRoot: bytesutil.PadTo([]byte{0}, 32),
|
||||
ColumnIndex: 3,
|
||||
},
|
||||
}
|
||||
req = DataColumnSidecarsByRootReq(ids)
|
||||
require.Equal(t, 1, req.Len())
|
||||
}
|
||||
|
||||
@@ -12,10 +12,15 @@ import (
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
gCrypto "github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/wrapper"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
@@ -62,6 +67,7 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
|
||||
}
|
||||
|
||||
if defaultKeysExist {
|
||||
log.WithField("filePath", defaultKeyPath).Info("Reading static P2P private key from a file. To generate a new random private key at every start, please remove this file.")
|
||||
return privKeyFromFile(defaultKeyPath)
|
||||
}
|
||||
|
||||
@@ -71,8 +77,8 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the StaticPeerID flag is not set, return the private key.
|
||||
if !cfg.StaticPeerID {
|
||||
// If the StaticPeerID flag is not set and if peerDAS is not enabled, return the private key.
|
||||
if !(cfg.StaticPeerID || params.PeerDASEnabled()) {
|
||||
return ecdsaprysm.ConvertFromInterfacePrivKey(priv)
|
||||
}
|
||||
|
||||
@@ -89,7 +95,7 @@ func privKey(cfg *Config) (*ecdsa.PrivateKey, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info("Wrote network key to file")
|
||||
log.WithField("path", defaultKeyPath).Info("Wrote network key to file")
|
||||
// Read the key from the defaultKeyPath file just written
|
||||
// for the strongest guarantee that the next start will be the same as this one.
|
||||
return privKeyFromFile(defaultKeyPath)
|
||||
@@ -173,3 +179,23 @@ func verifyConnectivity(addr string, port uint, protocol string) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ConvertPeerIDToNodeID(pid peer.ID) (enode.ID, error) {
|
||||
// Retrieve the public key object of the peer under "crypto" form.
|
||||
pubkeyObjCrypto, err := pid.ExtractPublicKey()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "extract public key")
|
||||
}
|
||||
// Extract the bytes representation of the public key.
|
||||
compressedPubKeyBytes, err := pubkeyObjCrypto.Raw()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "public key raw")
|
||||
}
|
||||
// Retrieve the public key object of the peer under "SECP256K1" form.
|
||||
pubKeyObjSecp256k1, err := btcec.ParsePubKey(compressedPubKeyBytes)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "parse public key")
|
||||
}
|
||||
newPubkey := &ecdsa.PublicKey{Curve: gCrypto.S256(), X: pubKeyObjSecp256k1.X(), Y: pubKeyObjSecp256k1.Y()}
|
||||
return enode.PubkeyToIDV4(newPubkey), nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -64,3 +65,19 @@ func TestSerializeENR(t *testing.T) {
|
||||
assert.ErrorContains(t, "could not serialize nil record", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertPeerIDToNodeID(t *testing.T) {
|
||||
const (
|
||||
peerIDStr = "16Uiu2HAmRrhnqEfybLYimCiAYer2AtZKDGamQrL1VwRCyeh2YiFc"
|
||||
expectedNodeIDStr = "eed26c5d2425ab95f57246a5dca87317c41cacee4bcafe8bbe57e5965527c290"
|
||||
)
|
||||
|
||||
peerID, err := peer.Decode(peerIDStr)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualNodeID, err := ConvertPeerIDToNodeID(peerID)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualNodeIDStr := actualNodeID.String()
|
||||
require.Equal(t, expectedNodeIDStr, actualNodeIDStr)
|
||||
}
|
||||
|
||||
@@ -79,6 +79,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.DenebForkEpoch = 105
|
||||
config.ElectraForkVersion = []byte("ElectraForkVersion")
|
||||
config.ElectraForkEpoch = 107
|
||||
config.Eip7594ForkEpoch = 109
|
||||
config.BLSWithdrawalPrefixByte = byte('b')
|
||||
config.ETH1AddressWithdrawalPrefixByte = byte('c')
|
||||
config.GenesisDelay = 24
|
||||
@@ -192,7 +193,7 @@ func TestGetSpec(t *testing.T) {
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 155, len(data))
|
||||
assert.Equal(t, 156, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -270,6 +271,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "0x"+hex.EncodeToString([]byte("ElectraForkVersion")), v)
|
||||
case "ELECTRA_FORK_EPOCH":
|
||||
assert.Equal(t, "107", v)
|
||||
case "EIP7594_FORK_EPOCH":
|
||||
assert.Equal(t, "109", v)
|
||||
case "MIN_ANCHOR_POW_BLOCK_DIFFICULTY":
|
||||
assert.Equal(t, "1000", v)
|
||||
case "BLS_WITHDRAWAL_PREFIX":
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
# gazelle:ignore
|
||||
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
@@ -35,6 +37,7 @@ go_library(
|
||||
"//api/client/builder:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
@@ -44,6 +47,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -174,7 +178,6 @@ common_deps = [
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
]
|
||||
|
||||
# gazelle:ignore
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
timeout = "moderate",
|
||||
|
||||
@@ -13,15 +13,20 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
builderapi "github.com/prysmaticlabs/prysm/v5/api/client/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -261,7 +266,13 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
}
|
||||
|
||||
// ProposeBeaconBlock handles the proposal of beacon blocks.
|
||||
// TODO: Add tests
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
var (
|
||||
blobSidecars []*ethpb.BlobSidecar
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar
|
||||
)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
@@ -273,15 +284,18 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", "decode block failed", err)
|
||||
}
|
||||
isPeerDASEnabled := coreTime.PeerDASIsActive(block.Block().Slot())
|
||||
|
||||
var sidecars []*ethpb.BlobSidecar
|
||||
if block.IsBlinded() {
|
||||
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
block, blobSidecars, dataColumnSideCars, err = vs.handleBlindedBlock(ctx, block, isPeerDASEnabled)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle blinded block", err)
|
||||
}
|
||||
} else {
|
||||
sidecars, err = vs.handleUnblindedBlock(block, req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
blobSidecars, dataColumnSideCars, err = handleUnblindedBlock(block, req, isPeerDASEnabled)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle unblided block", err)
|
||||
}
|
||||
}
|
||||
|
||||
root, err := block.Block().HashTreeRoot()
|
||||
@@ -302,8 +316,14 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, sidecars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
|
||||
if isPeerDASEnabled {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive data columns: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, blobSidecars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
@@ -315,47 +335,83 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
}
|
||||
|
||||
// handleBlindedBlock processes blinded beacon blocks.
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock, isPeerDASEnabled bool) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
if block.Version() < version.Bellatrix {
|
||||
return nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
return nil, nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
}
|
||||
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
return nil, nil, errors.New("unconfigured block builder")
|
||||
return nil, nil, nil, errors.New("unconfigured block builder")
|
||||
}
|
||||
|
||||
copiedBlock, err := block.Copy()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, nil, errors.Wrap(err, "block copy")
|
||||
}
|
||||
|
||||
payload, bundle, err := vs.BlockBuilder.SubmitBlindedBlock(ctx, block)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "submit blinded block failed")
|
||||
return nil, nil, nil, errors.Wrap(err, "submit blinded block")
|
||||
}
|
||||
|
||||
if err := copiedBlock.Unblind(payload); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unblind failed")
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind")
|
||||
}
|
||||
|
||||
sidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
|
||||
if isPeerDASEnabled {
|
||||
dataColumnSideCars, err := unblindDataColumnsSidecars(copiedBlock, bundle)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind data columns sidecars")
|
||||
}
|
||||
|
||||
return copiedBlock, nil, dataColumnSideCars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unblind sidecars failed")
|
||||
return nil, nil, nil, errors.Wrap(err, "unblind blobs sidecars")
|
||||
}
|
||||
|
||||
return copiedBlock, sidecars, nil
|
||||
return copiedBlock, blobSidecars, nil, nil
|
||||
}
|
||||
|
||||
// handleUnblindedBlock processes unblinded beacon blocks.
|
||||
func (vs *Server) handleUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
|
||||
func handleUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock, isPeerDASEnabled bool) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
dbBlockContents := req.GetDeneb()
|
||||
|
||||
if dbBlockContents == nil {
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
return BuildBlobSidecars(block, dbBlockContents.Blobs, dbBlockContents.KzgProofs)
|
||||
|
||||
if isPeerDASEnabled {
|
||||
// Convert blobs from slices to array.
|
||||
blobs := make([]kzg.Blob, 0, len(dbBlockContents.Blobs))
|
||||
for _, blob := range dbBlockContents.Blobs {
|
||||
if len(blob) != kzg.BytesPerBlob {
|
||||
return nil, nil, errors.Errorf("invalid blob size. expected %d bytes, got %d bytes", kzg.BytesPerBlob, len(blob))
|
||||
}
|
||||
|
||||
blobs = append(blobs, kzg.Blob(blob))
|
||||
}
|
||||
|
||||
dataColumnSideCars, err := peerdas.DataColumnSidecars(block, blobs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "data column sidecars")
|
||||
}
|
||||
|
||||
return nil, dataColumnSideCars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := BuildBlobSidecars(block, dbBlockContents.Blobs, dbBlockContents.KzgProofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "build blob sidecars")
|
||||
}
|
||||
|
||||
return blobSidecars, nil, nil
|
||||
}
|
||||
|
||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [32]byte) error {
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
protoBlock, err := block.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "protobuf conversion failed")
|
||||
@@ -371,7 +427,7 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
|
||||
}
|
||||
|
||||
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [fieldparams.RootLength]byte) error {
|
||||
eg, eCtx := errgroup.WithContext(ctx)
|
||||
for i, sc := range sidecars {
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
@@ -400,6 +456,53 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, sidecars []*ethpb.DataColumnSidecar, root [fieldparams.RootLength]byte) error {
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
|
||||
dataColumnsWithholdCount := features.Get().DataColumnsWithholdCount
|
||||
|
||||
for i, sd := range sidecars {
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
|
||||
colIdx, sidecar := i, sd
|
||||
|
||||
eg.Go(func() error {
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := uint64(colIdx) % params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
if colIdx < dataColumnsWithholdCount {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"subnet": subnet,
|
||||
"dataColumnIndex": colIdx,
|
||||
}).Warning("Withholding data column")
|
||||
} else {
|
||||
if err := vs.P2P.BroadcastDataColumn(ctx, subnet, sidecar); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
}
|
||||
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecar, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new read-only data column with root")
|
||||
}
|
||||
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumn(ctx, verifiedRODataColumn); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.DataColumnSidecarReceived,
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn},
|
||||
})
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
func (vs *Server) PrepareBeaconProposer(
|
||||
_ context.Context, request *ethpb.PrepareBeaconProposerRequest,
|
||||
|
||||
@@ -72,7 +72,6 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
|
||||
}
|
||||
setFeeRecipientIfBurnAddress(&val)
|
||||
|
||||
var err error
|
||||
if ok && payloadId != [8]byte{} {
|
||||
// Payload ID is cache hit. Return the cached payload ID.
|
||||
var pid primitives.PayloadID
|
||||
|
||||
@@ -941,7 +941,7 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk}
|
||||
},
|
||||
useBuilder: true,
|
||||
err: "unblind sidecars failed: commitment value doesn't match block",
|
||||
err: "unblind blobs sidecars: commitment value doesn't match block",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -67,6 +67,7 @@ type Server struct {
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"bytes"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -68,3 +70,29 @@ func unblindBlobsSidecars(block interfaces.SignedBeaconBlock, bundle *enginev1.B
|
||||
}
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// TODO: Add tests
|
||||
func unblindDataColumnsSidecars(block interfaces.SignedBeaconBlock, bundle *enginev1.BlobsBundle) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Check if the block is at least a Deneb block.
|
||||
if block.Version() < version.Deneb {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Convert blobs from slices to array.
|
||||
blobs := make([]kzg.Blob, 0, len(bundle.Blobs))
|
||||
for _, blob := range bundle.Blobs {
|
||||
if len(blob) != kzg.BytesPerBlob {
|
||||
return nil, errors.Errorf("invalid blob size. expected %d bytes, got %d bytes", kzg.BytesPerBlob, len(blob))
|
||||
}
|
||||
|
||||
blobs = append(blobs, kzg.Blob(blob))
|
||||
}
|
||||
|
||||
// Retrieve data columns from blobs.
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(block, blobs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars")
|
||||
}
|
||||
|
||||
return dataColumnSidecars, nil
|
||||
}
|
||||
|
||||
@@ -107,6 +107,7 @@ type Config struct {
|
||||
AttestationReceiver blockchain.AttestationReceiver
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
ExecutionChainService execution.Chain
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
@@ -249,6 +250,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
P2P: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
BlobReceiver: s.cfg.BlobReceiver,
|
||||
DataColumnReceiver: s.cfg.DataColumnReceiver,
|
||||
MockEth1Votes: s.cfg.MockEth1Votes,
|
||||
Eth1BlockFetcher: s.cfg.ExecutionChainService,
|
||||
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
|
||||
|
||||
@@ -7,6 +7,8 @@ go_library(
|
||||
"block_batcher.go",
|
||||
"broadcast_bls_changes.go",
|
||||
"context.go",
|
||||
"data_columns_reconstruct.go",
|
||||
"data_columns_sampling.go",
|
||||
"deadlines.go",
|
||||
"decode_pubsub.go",
|
||||
"doc.go",
|
||||
@@ -25,6 +27,8 @@ go_library(
|
||||
"rpc_blob_sidecars_by_range.go",
|
||||
"rpc_blob_sidecars_by_root.go",
|
||||
"rpc_chunked_response.go",
|
||||
"rpc_data_column_sidecars_by_range.go",
|
||||
"rpc_data_column_sidecars_by_root.go",
|
||||
"rpc_goodbye.go",
|
||||
"rpc_metadata.go",
|
||||
"rpc_ping.go",
|
||||
@@ -37,6 +41,7 @@ go_library(
|
||||
"subscriber_beacon_blocks.go",
|
||||
"subscriber_blob_sidecar.go",
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_data_column_sidecar.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
@@ -48,6 +53,7 @@ go_library(
|
||||
"validate_beacon_blocks.go",
|
||||
"validate_blob.go",
|
||||
"validate_bls_to_execution_change.go",
|
||||
"validate_data_column.go",
|
||||
"validate_proposer_slashing.go",
|
||||
"validate_sync_committee_message.go",
|
||||
"validate_sync_contribution_proof.go",
|
||||
@@ -64,6 +70,7 @@ go_library(
|
||||
"//async/abool:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
@@ -72,7 +79,9 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -152,6 +161,7 @@ go_test(
|
||||
"block_batcher_test.go",
|
||||
"broadcast_bls_changes_test.go",
|
||||
"context_test.go",
|
||||
"data_columns_sampling_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
@@ -193,12 +203,14 @@ go_test(
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -243,20 +255,25 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/net/swarm/testing:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
|
||||
318
beacon-chain/sync/data_columns_reconstruct.go
Normal file
318
beacon-chain/sync/data_columns_reconstruct.go
Normal file
@@ -0,0 +1,318 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
kzg "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
const broadCastMissingDataColumnsTimeIntoSlot = 3 * time.Second
|
||||
|
||||
// recoverCellsAndProofs recovers the cells and proofs from the data column sidecars.
|
||||
func recoverCellsAndProofs(
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar,
|
||||
columnsCount int,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
) ([]kzg.CellsAndProofs, error) {
|
||||
if len(dataColumnSideCars) == 0 {
|
||||
return nil, errors.New("no data column sidecars")
|
||||
}
|
||||
|
||||
// Check if all columns have the same length.
|
||||
blobCount := len(dataColumnSideCars[0].DataColumn)
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
length := len(sidecar.DataColumn)
|
||||
|
||||
if length != blobCount {
|
||||
return nil, errors.New("columns do not have the same length")
|
||||
}
|
||||
}
|
||||
|
||||
// Recover cells and compute proofs.
|
||||
recoveredCellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
|
||||
|
||||
for blobIndex := 0; blobIndex < blobCount; blobIndex++ {
|
||||
start := time.Now()
|
||||
|
||||
cellsIndices := make([]uint64, 0, columnsCount)
|
||||
cells := make([]kzg.Cell, 0, columnsCount)
|
||||
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
// Build the cell indices.
|
||||
cellsIndices = append(cellsIndices, sidecar.ColumnIndex)
|
||||
|
||||
// Get the cell.
|
||||
column := sidecar.DataColumn
|
||||
cell := column[blobIndex]
|
||||
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
}
|
||||
|
||||
// Recover the cells and proofs for the corresponding blob
|
||||
cellsAndProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "recover cells and KZG proofs for blob %d", blobIndex)
|
||||
}
|
||||
|
||||
recoveredCellsAndProofs = append(recoveredCellsAndProofs, cellsAndProofs)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"elapsed": time.Since(start),
|
||||
"index": blobIndex,
|
||||
"root": fmt.Sprintf("%x", blockRoot),
|
||||
}).Debug("Recovered cells and proofs")
|
||||
}
|
||||
|
||||
return recoveredCellsAndProofs, nil
|
||||
}
|
||||
|
||||
func (s *Service) reconstructDataColumns(ctx context.Context, verifiedRODataColumn blocks.VerifiedRODataColumn) error {
|
||||
// Lock to prevent concurrent reconstruction.
|
||||
s.dataColumsnReconstructionLock.Lock()
|
||||
defer s.dataColumsnReconstructionLock.Unlock()
|
||||
|
||||
// Get the block root.
|
||||
blockRoot := verifiedRODataColumn.BlockRoot()
|
||||
|
||||
// Get the columns we store.
|
||||
storedDataColumns, err := s.cfg.blobStorage.ColumnIndices(blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "columns indices")
|
||||
}
|
||||
|
||||
storedColumnsCount := len(storedDataColumns)
|
||||
numberOfColumns := fieldparams.NumberOfColumns
|
||||
|
||||
// If less than half of the columns are stored, reconstruction is not possible.
|
||||
// If all columns are stored, no need to reconstruct.
|
||||
if storedColumnsCount < numberOfColumns/2 || storedColumnsCount == numberOfColumns {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the custodied columns.
|
||||
custodiedColumns, err := peerdas.CustodyColumns(s.cfg.p2p.NodeID(), peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custodied columns")
|
||||
}
|
||||
|
||||
// Load the data columns sidecars.
|
||||
dataColumnSideCars := make([]*ethpb.DataColumnSidecar, 0, storedColumnsCount)
|
||||
for index := range storedDataColumns {
|
||||
dataColumnSidecar, err := s.cfg.blobStorage.GetColumn(blockRoot, index)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get column")
|
||||
}
|
||||
|
||||
dataColumnSideCars = append(dataColumnSideCars, dataColumnSidecar)
|
||||
}
|
||||
|
||||
// Recover cells and proofs
|
||||
recoveredCellsAndProofs, err := recoverCellsAndProofs(dataColumnSideCars, storedColumnsCount, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "recover cells and proofs")
|
||||
}
|
||||
|
||||
// Reconstruct the data columns sidecars.
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecarsForReconstruct(
|
||||
verifiedRODataColumn.KzgCommitments,
|
||||
verifiedRODataColumn.SignedBlockHeader,
|
||||
verifiedRODataColumn.KzgCommitmentsInclusionProof,
|
||||
recoveredCellsAndProofs,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column sidecars")
|
||||
}
|
||||
|
||||
// Save the data columns sidecars in the database.
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
shouldSave := custodiedColumns[dataColumnSidecar.ColumnIndex]
|
||||
if !shouldSave {
|
||||
// We do not custody this column, so we dot not need to save it.
|
||||
continue
|
||||
}
|
||||
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new read-only data column with root")
|
||||
}
|
||||
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
if err := s.cfg.blobStorage.SaveDataColumn(verifiedRoDataColumn); err != nil {
|
||||
return errors.Wrap(err, "save column")
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("root", fmt.Sprintf("%x", blockRoot)).Debug("Data columns reconstructed and saved successfully")
|
||||
|
||||
// Schedule the broadcast.
|
||||
if err := s.scheduleReconstructedDataColumnsBroadcast(ctx, blockRoot, verifiedRODataColumn); err != nil {
|
||||
return errors.Wrap(err, "schedule reconstructed data columns broadcast")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) scheduleReconstructedDataColumnsBroadcast(
|
||||
ctx context.Context,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
dataColumn blocks.VerifiedRODataColumn,
|
||||
) error {
|
||||
// Retrieve the slot of the block.
|
||||
slot := dataColumn.Slot()
|
||||
|
||||
// Get the time corresponding to the start of the slot.
|
||||
slotStart, err := slots.ToTime(uint64(s.cfg.chain.GenesisTime().Unix()), slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "to time")
|
||||
}
|
||||
|
||||
// Compute when to broadcast the missing data columns.
|
||||
broadcastTime := slotStart.Add(broadCastMissingDataColumnsTimeIntoSlot)
|
||||
|
||||
// Compute the waiting time. This could be negative. In such a case, broadcast immediately.
|
||||
waitingTime := time.Until(broadcastTime)
|
||||
|
||||
time.AfterFunc(waitingTime, func() {
|
||||
s.dataColumsnReconstructionLock.Lock()
|
||||
defer s.deleteReceivedDataColumns(blockRoot)
|
||||
defer s.dataColumsnReconstructionLock.Unlock()
|
||||
|
||||
// Get the received by gossip data columns.
|
||||
receivedDataColumns := s.receivedDataColumns(blockRoot)
|
||||
if receivedDataColumns == nil {
|
||||
log.WithField("root", fmt.Sprintf("%x", blockRoot)).Error("No received data columns")
|
||||
}
|
||||
|
||||
// Get the data columns we should store.
|
||||
custodiedDataColumns, err := peerdas.CustodyColumns(s.cfg.p2p.NodeID(), peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Custody columns")
|
||||
}
|
||||
|
||||
// Get the data columns we actually store.
|
||||
storedDataColumns, err := s.cfg.blobStorage.ColumnIndices(blockRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%x", blockRoot)).WithError(err).Error("Columns indices")
|
||||
return
|
||||
}
|
||||
|
||||
// Compute the missing data columns (data columns we should custody but we do not have received via gossip.)
|
||||
missingColumns := make(map[uint64]bool, len(custodiedDataColumns))
|
||||
for column := range custodiedDataColumns {
|
||||
if ok := receivedDataColumns[column]; !ok {
|
||||
missingColumns[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Exit early if there are no missing data columns.
|
||||
// This is the happy path.
|
||||
if len(missingColumns) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for column := range missingColumns {
|
||||
if ok := storedDataColumns[column]; !ok {
|
||||
// This column was not received nor reconstructed. This should not happen.
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%x", blockRoot),
|
||||
"slot": slot,
|
||||
"column": column,
|
||||
}).Error("Data column not received nor reconstructed.")
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the non received but reconstructed data column.
|
||||
dataColumnSidecar, err := s.cfg.blobStorage.GetColumn(blockRoot, column)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Get column")
|
||||
continue
|
||||
}
|
||||
|
||||
// Compute the subnet for this column.
|
||||
subnet := column % params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Broadcast the missing data column.
|
||||
if err := s.cfg.p2p.BroadcastDataColumn(ctx, subnet, dataColumnSidecar); err != nil {
|
||||
log.WithError(err).Error("Broadcast data column")
|
||||
}
|
||||
}
|
||||
|
||||
// Get the missing data columns under sorted form.
|
||||
missingColumnsList := make([]uint64, 0, len(missingColumns))
|
||||
for column := range missingColumns {
|
||||
missingColumnsList = append(missingColumnsList, column)
|
||||
}
|
||||
|
||||
// Sort the missing data columns.
|
||||
sort.Slice(missingColumnsList, func(i, j int) bool {
|
||||
return missingColumnsList[i] < missingColumnsList[j]
|
||||
})
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%x", blockRoot),
|
||||
"slot": slot,
|
||||
"timeIntoSlot": broadCastMissingDataColumnsTimeIntoSlot,
|
||||
"columns": missingColumnsList,
|
||||
}).Debug("Broadcasting not seen via gossip but reconstructed data columns.")
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setReceivedDataColumn marks the data column for a given root as received.
|
||||
func (s *Service) setReceivedDataColumn(root [fieldparams.RootLength]byte, columnIndex uint64) {
|
||||
s.receivedDataColumnsFromRootLock.Lock()
|
||||
defer s.receivedDataColumnsFromRootLock.Unlock()
|
||||
|
||||
// Get all the received data columns for this root.
|
||||
receivedDataColumns, ok := s.receivedDataColumnsFromRoot[root]
|
||||
if !ok {
|
||||
// Create the map for this block root if needed.
|
||||
receivedDataColumns = make(map[uint64]bool, params.BeaconConfig().NumberOfColumns)
|
||||
s.receivedDataColumnsFromRoot[root] = receivedDataColumns
|
||||
}
|
||||
|
||||
// Mark the data column as received.
|
||||
receivedDataColumns[columnIndex] = true
|
||||
}
|
||||
|
||||
// receivedDataColumns returns the received data columns for a given root.
|
||||
func (s *Service) receivedDataColumns(root [fieldparams.RootLength]byte) map[uint64]bool {
|
||||
s.receivedDataColumnsFromRootLock.RLock()
|
||||
defer s.receivedDataColumnsFromRootLock.RUnlock()
|
||||
|
||||
// Get all the received data columns for this root.
|
||||
receivedDataColumns, ok := s.receivedDataColumnsFromRoot[root]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy the received data columns.
|
||||
copied := make(map[uint64]bool, len(receivedDataColumns))
|
||||
for column, received := range receivedDataColumns {
|
||||
copied[column] = received
|
||||
}
|
||||
|
||||
return copied
|
||||
}
|
||||
|
||||
// deleteReceivedDataColumns deletes the received data columns for a given root.
|
||||
func (s *Service) deleteReceivedDataColumns(root [fieldparams.RootLength]byte) {
|
||||
s.receivedDataColumnsFromRootLock.Lock()
|
||||
defer s.receivedDataColumnsFromRootLock.Unlock()
|
||||
|
||||
delete(s.receivedDataColumnsFromRoot, root)
|
||||
}
|
||||
558
beacon-chain/sync/data_columns_sampling.go
Normal file
558
beacon-chain/sync/data_columns_sampling.go
Normal file
@@ -0,0 +1,558 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
const PeerRefreshInterval = 1 * time.Minute
|
||||
|
||||
type roundSummary struct {
|
||||
RequestedColumns []uint64
|
||||
MissingColumns map[uint64]bool
|
||||
}
|
||||
|
||||
// DataColumnSampler defines the interface for sampling data columns from peers for requested block root and samples count.
|
||||
type DataColumnSampler interface {
|
||||
// Run starts the data column sampling service.
|
||||
Run(ctx context.Context)
|
||||
}
|
||||
|
||||
var _ DataColumnSampler = (*dataColumnSampler1D)(nil)
|
||||
|
||||
// dataColumnSampler1D implements the DataColumnSampler interface for PeerDAS 1D.
|
||||
type dataColumnSampler1D struct {
|
||||
sync.RWMutex
|
||||
|
||||
p2p p2p.P2P
|
||||
clock *startup.Clock
|
||||
ctxMap ContextByteVersions
|
||||
stateNotifier statefeed.Notifier
|
||||
|
||||
// nonCustodyColumns is a set of columns that are not custodied by the node.
|
||||
nonCustodyColumns map[uint64]bool
|
||||
// columnFromPeer maps a peer to the columns it is responsible for custody.
|
||||
columnFromPeer map[peer.ID]map[uint64]bool
|
||||
// peerFromColumn maps a column to the peer responsible for custody.
|
||||
peerFromColumn map[uint64]map[peer.ID]bool
|
||||
}
|
||||
|
||||
// newDataColumnSampler1D creates a new 1D data column sampler.
|
||||
func newDataColumnSampler1D(
|
||||
p2p p2p.P2P,
|
||||
clock *startup.Clock,
|
||||
ctxMap ContextByteVersions,
|
||||
stateNotifier statefeed.Notifier,
|
||||
) *dataColumnSampler1D {
|
||||
numColumns := params.BeaconConfig().NumberOfColumns
|
||||
peerFromColumn := make(map[uint64]map[peer.ID]bool, numColumns)
|
||||
for i := uint64(0); i < numColumns; i++ {
|
||||
peerFromColumn[i] = make(map[peer.ID]bool)
|
||||
}
|
||||
|
||||
return &dataColumnSampler1D{
|
||||
p2p: p2p,
|
||||
clock: clock,
|
||||
ctxMap: ctxMap,
|
||||
stateNotifier: stateNotifier,
|
||||
columnFromPeer: make(map[peer.ID]map[uint64]bool),
|
||||
peerFromColumn: peerFromColumn,
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements DataColumnSampler.
|
||||
func (d *dataColumnSampler1D) Run(ctx context.Context) {
|
||||
// verify if we need to run sampling or not, if not, return directly
|
||||
csc := peerdas.CustodySubnetCount()
|
||||
columns, err := peerdas.CustodyColumns(d.p2p.NodeID(), csc)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to determine local custody columns")
|
||||
return
|
||||
}
|
||||
|
||||
custodyColumnsCount := uint64(len(columns))
|
||||
if peerdas.CanSelfReconstruct(custodyColumnsCount) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"custodyColumnsCount": custodyColumnsCount,
|
||||
"totalColumns": params.BeaconConfig().NumberOfColumns,
|
||||
}).Debug("The node custodies at least the half the data columns, no need to sample")
|
||||
return
|
||||
}
|
||||
|
||||
// initialize non custody columns.
|
||||
d.nonCustodyColumns = make(map[uint64]bool)
|
||||
for i := uint64(0); i < params.BeaconConfig().NumberOfColumns; i++ {
|
||||
if exists := columns[i]; !exists {
|
||||
d.nonCustodyColumns[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// initialize peer info first.
|
||||
d.refreshPeerInfo()
|
||||
|
||||
// periodically refresh peer info to keep peer <-> column mapping up to date.
|
||||
async.RunEvery(ctx, PeerRefreshInterval, d.refreshPeerInfo)
|
||||
|
||||
// start the sampling loop.
|
||||
d.samplingRoutine(ctx)
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) samplingRoutine(ctx context.Context) {
|
||||
stateCh := make(chan *feed.Event, 1)
|
||||
stateSub := d.stateNotifier.StateFeed().Subscribe(stateCh)
|
||||
defer stateSub.Unsubscribe()
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt := <-stateCh:
|
||||
d.handleStateNotification(ctx, evt)
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("DataColumnSampler1D subscription to state feed failed")
|
||||
case <-ctx.Done():
|
||||
log.Debug("Context canceled, exiting data column sampling loop.")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Refresh peer information.
|
||||
func (d *dataColumnSampler1D) refreshPeerInfo() {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
activePeers := d.p2p.Peers().Active()
|
||||
d.prunePeerInfo(activePeers)
|
||||
|
||||
for _, pid := range activePeers {
|
||||
if _, ok := d.columnFromPeer[pid]; ok {
|
||||
// TODO: need to update peer info here after validator custody.
|
||||
continue
|
||||
}
|
||||
|
||||
csc := d.p2p.CustodyCountFromRemotePeer(pid)
|
||||
nid, err := p2p.ConvertPeerIDToNodeID(pid)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to convert peer ID to node ID")
|
||||
continue
|
||||
}
|
||||
|
||||
columns, err := peerdas.CustodyColumns(nid, csc)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("peerID", pid).Error("Failed to determine peer custody columns")
|
||||
continue
|
||||
}
|
||||
|
||||
d.columnFromPeer[pid] = columns
|
||||
for column := range columns {
|
||||
d.peerFromColumn[column][pid] = true
|
||||
}
|
||||
}
|
||||
|
||||
log.WithField("columnFromPeer", d.columnFromPeer).Debug("Peer info refreshed")
|
||||
|
||||
columnWithNoPeers := make([]uint64, 0)
|
||||
for column, peers := range d.peerFromColumn {
|
||||
if len(peers) == 0 {
|
||||
columnWithNoPeers = append(columnWithNoPeers, column)
|
||||
}
|
||||
}
|
||||
if len(columnWithNoPeers) > 0 {
|
||||
log.WithField("columnWithNoPeers", columnWithNoPeers).Warn("Some columns have no peers responsible for custody")
|
||||
}
|
||||
}
|
||||
|
||||
// prunePeerInfo prunes inactive peers from peerFromColumn and columnFromPeer.
|
||||
// This should not be called outside of refreshPeerInfo without being locked.
|
||||
func (d *dataColumnSampler1D) prunePeerInfo(activePeers []peer.ID) {
|
||||
active := make(map[peer.ID]bool)
|
||||
for _, pid := range activePeers {
|
||||
active[pid] = true
|
||||
}
|
||||
|
||||
for pid := range d.columnFromPeer {
|
||||
if !active[pid] {
|
||||
d.prunePeer(pid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// prunePeer removes a peer from stored peer info map, it should be called with lock held.
|
||||
func (d *dataColumnSampler1D) prunePeer(pid peer.ID) {
|
||||
delete(d.columnFromPeer, pid)
|
||||
for _, peers := range d.peerFromColumn {
|
||||
delete(peers, pid)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) handleStateNotification(ctx context.Context, event *feed.Event) {
|
||||
if event.Type != statefeed.BlockProcessed {
|
||||
return
|
||||
}
|
||||
|
||||
data, ok := event.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not of type *statefeed.BlockProcessedData")
|
||||
return
|
||||
}
|
||||
|
||||
if !data.Verified {
|
||||
// We only process blocks that have been verified
|
||||
log.Error("Data is not verified")
|
||||
return
|
||||
}
|
||||
|
||||
if data.SignedBlock.Version() < version.Deneb {
|
||||
log.Debug("Pre Deneb block, skipping data column sampling")
|
||||
return
|
||||
}
|
||||
|
||||
if coreTime.PeerDASIsActive(data.Slot) {
|
||||
// We do not trigger sampling if peerDAS is not active yet.
|
||||
return
|
||||
}
|
||||
|
||||
// Get the commitments for this block.
|
||||
commitments, err := data.SignedBlock.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
return
|
||||
}
|
||||
|
||||
// Skip if there are no commitments.
|
||||
if len(commitments) == 0 {
|
||||
log.Debug("No commitments in block, skipping data column sampling")
|
||||
return
|
||||
}
|
||||
|
||||
// Randomize columns for sample selection.
|
||||
randomizedColumns := randomizeColumns(d.nonCustodyColumns)
|
||||
samplesCount := min(params.BeaconConfig().SamplesPerSlot, uint64(len(d.nonCustodyColumns))-params.BeaconConfig().NumberOfColumns/2)
|
||||
ok, _, err = d.incrementalDAS(ctx, data.BlockRoot, randomizedColumns, samplesCount)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to run incremental DAS")
|
||||
}
|
||||
|
||||
if ok {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", data.BlockRoot),
|
||||
"columns": randomizedColumns,
|
||||
}).Debug("Data column sampling successful")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", data.BlockRoot),
|
||||
"columns": randomizedColumns,
|
||||
}).Warning("Data column sampling failed")
|
||||
}
|
||||
}
|
||||
|
||||
// incrementalDAS samples data columns from active peers using incremental DAS.
|
||||
// https://ethresear.ch/t/lossydas-lossy-incremental-and-diagonal-sampling-for-data-availability/18963#incrementaldas-dynamically-increase-the-sample-size-10
|
||||
// According to https://github.com/ethereum/consensus-specs/issues/3825, we're going to select query samples exclusively from the non custody columns.
|
||||
func (d *dataColumnSampler1D) incrementalDAS(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columns []uint64,
|
||||
sampleCount uint64,
|
||||
) (bool, []roundSummary, error) {
|
||||
allowedFailures := uint64(0)
|
||||
firstColumnToSample, extendedSampleCount := uint64(0), peerdas.ExtendedSampleCount(sampleCount, allowedFailures)
|
||||
roundSummaries := make([]roundSummary, 0, 1) // We optimistically allocate only one round summary.
|
||||
|
||||
for round := 1; ; /*No exit condition */ round++ {
|
||||
if extendedSampleCount > uint64(len(columns)) {
|
||||
// We already tried to sample all possible columns, this is the unhappy path.
|
||||
log.WithField("root", fmt.Sprintf("%#x", root)).Warning("Some columns are still missing after sampling all possible columns")
|
||||
return false, roundSummaries, nil
|
||||
}
|
||||
|
||||
// Get the columns to sample for this round.
|
||||
columnsToSample := columns[firstColumnToSample:extendedSampleCount]
|
||||
columnsToSampleCount := extendedSampleCount - firstColumnToSample
|
||||
|
||||
// Sample data columns from peers in parallel.
|
||||
retrievedSamples := d.sampleDataColumns(ctx, root, columnsToSample)
|
||||
|
||||
missingSamples := make(map[uint64]bool)
|
||||
for _, column := range columnsToSample {
|
||||
if !retrievedSamples[column] {
|
||||
missingSamples[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
roundSummaries = append(roundSummaries, roundSummary{
|
||||
RequestedColumns: columnsToSample,
|
||||
MissingColumns: missingSamples,
|
||||
})
|
||||
|
||||
retrievedSampleCount := uint64(len(retrievedSamples))
|
||||
if retrievedSampleCount == columnsToSampleCount {
|
||||
// All columns were correctly sampled, this is the happy path.
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"roundsNeeded": round,
|
||||
}).Debug("All columns were successfully sampled")
|
||||
return true, roundSummaries, nil
|
||||
}
|
||||
|
||||
if retrievedSampleCount > columnsToSampleCount {
|
||||
// This should never happen.
|
||||
return false, nil, errors.New("retrieved more columns than requested")
|
||||
}
|
||||
|
||||
// missing columns, extend the samples.
|
||||
allowedFailures += columnsToSampleCount - retrievedSampleCount
|
||||
oldExtendedSampleCount := extendedSampleCount
|
||||
firstColumnToSample = extendedSampleCount
|
||||
extendedSampleCount = peerdas.ExtendedSampleCount(sampleCount, allowedFailures)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"round": round,
|
||||
"missingColumnsCount": allowedFailures,
|
||||
"currentSampleIndex": oldExtendedSampleCount,
|
||||
"nextSampleIndex": extendedSampleCount,
|
||||
}).Debug("Some columns are still missing after sampling this round.")
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) sampleDataColumns(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columns []uint64,
|
||||
) map[uint64]bool {
|
||||
// distribute samples to peer
|
||||
peerToColumns := d.distributeSamplesToPeer(columns)
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
res := make(map[uint64]bool)
|
||||
sampleFromPeer := func(pid peer.ID, cols map[uint64]bool) {
|
||||
defer wg.Done()
|
||||
retrieved := d.sampleDataColumnsFromPeer(ctx, pid, root, cols)
|
||||
|
||||
mu.Lock()
|
||||
for col := range retrieved {
|
||||
res[col] = true
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// sample from peers in parallel
|
||||
for pid, cols := range peerToColumns {
|
||||
wg.Add(1)
|
||||
go sampleFromPeer(pid, cols)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return res
|
||||
}
|
||||
|
||||
// distributeSamplesToPeer distributes samples to peers based on the columns they are responsible for.
|
||||
// Currently it randomizes peer selection for a column and did not take into account whole peer distribution balance. It could be improved if needed.
|
||||
func (d *dataColumnSampler1D) distributeSamplesToPeer(
|
||||
columns []uint64,
|
||||
) map[peer.ID]map[uint64]bool {
|
||||
dist := make(map[peer.ID]map[uint64]bool)
|
||||
|
||||
for _, col := range columns {
|
||||
peers := d.peerFromColumn[col]
|
||||
if len(peers) == 0 {
|
||||
log.WithField("column", col).Warn("No peers responsible for custody of column")
|
||||
continue
|
||||
}
|
||||
|
||||
pid := selectRandomPeer(peers)
|
||||
if _, ok := dist[pid]; !ok {
|
||||
dist[pid] = make(map[uint64]bool)
|
||||
}
|
||||
dist[pid][col] = true
|
||||
}
|
||||
|
||||
return dist
|
||||
}
|
||||
|
||||
func (d *dataColumnSampler1D) sampleDataColumnsFromPeer(
|
||||
ctx context.Context,
|
||||
pid peer.ID,
|
||||
root [fieldparams.RootLength]byte,
|
||||
requestedColumns map[uint64]bool,
|
||||
) map[uint64]bool {
|
||||
retrievedColumns := make(map[uint64]bool)
|
||||
|
||||
req := make(types.DataColumnSidecarsByRootReq, 0)
|
||||
for col := range requestedColumns {
|
||||
req = append(req, ð.DataColumnIdentifier{
|
||||
BlockRoot: root[:],
|
||||
ColumnIndex: col,
|
||||
})
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
roDataColumns, err := SendDataColumnSidecarByRoot(ctx, d.clock, d.p2p, pid, d.ctxMap, &req)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to send data column sidecar by root")
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
if verifyColumn(roDataColumn, root, pid, requestedColumns) {
|
||||
retrievedColumns[roDataColumn.ColumnIndex] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(retrievedColumns) == len(requestedColumns) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"requestedColumns": sortedSliceFromMap(requestedColumns),
|
||||
}).Debug("All requested columns were successfully sampled from peer")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"requestedColumns": sortedSliceFromMap(requestedColumns),
|
||||
"retrievedColumns": sortedSliceFromMap(retrievedColumns),
|
||||
}).Debug("Some requested columns were not sampled from peer")
|
||||
}
|
||||
|
||||
return retrievedColumns
|
||||
}
|
||||
|
||||
// randomizeColumns returns a slice containing all the numbers between 0 and colNum in a random order.
|
||||
func randomizeColumns(columns map[uint64]bool) []uint64 {
|
||||
// Create a slice from columns.
|
||||
randomized := make([]uint64, 0, len(columns))
|
||||
for column := range columns {
|
||||
randomized = append(randomized, column)
|
||||
}
|
||||
|
||||
// Shuffle the slice.
|
||||
rand.NewGenerator().Shuffle(len(randomized), func(i, j int) {
|
||||
randomized[i], randomized[j] = randomized[j], randomized[i]
|
||||
})
|
||||
|
||||
return randomized
|
||||
}
|
||||
|
||||
// sortedSliceFromMap returns a sorted list of keys from a map.
|
||||
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return result[i] < result[j]
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// selectRandomPeer returns a random peer from the given list of peers.
|
||||
func selectRandomPeer(peers map[peer.ID]bool) peer.ID {
|
||||
pick := rand.NewGenerator().Uint64() % uint64(len(peers))
|
||||
for k := range peers {
|
||||
if pick == 0 {
|
||||
return k
|
||||
}
|
||||
pick--
|
||||
}
|
||||
|
||||
// This should never be reached.
|
||||
return peer.ID("")
|
||||
}
|
||||
|
||||
// verifyColumn verifies the retrieved column against the root, the index,
|
||||
// the KZG inclusion and the KZG proof.
|
||||
func verifyColumn(
|
||||
roDataColumn blocks.RODataColumn,
|
||||
root [32]byte,
|
||||
pid peer.ID,
|
||||
requestedColumns map[uint64]bool,
|
||||
) bool {
|
||||
retrievedColumn := roDataColumn.ColumnIndex
|
||||
|
||||
// Filter out columns with incorrect root.
|
||||
actualRoot := roDataColumn.BlockRoot()
|
||||
if actualRoot != root {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"requestedRoot": fmt.Sprintf("%#x", root),
|
||||
"actualRoot": fmt.Sprintf("%#x", actualRoot),
|
||||
}).Debug("Retrieved root does not match requested root")
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Filter out columns that were not requested.
|
||||
if !requestedColumns[retrievedColumn] {
|
||||
columnsToSampleList := sortedSliceFromMap(requestedColumns)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"requestedColumns": columnsToSampleList,
|
||||
"retrievedColumn": retrievedColumn,
|
||||
}).Debug("Retrieved column was not requested")
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Filter out columns which did not pass the KZG inclusion proof verification.
|
||||
if err := blocks.VerifyKZGInclusionProofColumn(roDataColumn.DataColumnSidecar); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"index": retrievedColumn,
|
||||
}).Debug("Failed to verify KZG inclusion proof for retrieved column")
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Filter out columns which did not pass the KZG proof verification.
|
||||
verified, err := peerdas.VerifyDataColumnSidecarKZGProofs(roDataColumn.DataColumnSidecar)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"index": retrievedColumn,
|
||||
}).Debug("Error when verifying KZG proof for retrieved column")
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if !verified {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"index": retrievedColumn,
|
||||
}).Debug("Failed to verify KZG proof for retrieved column")
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
507
beacon-chain/sync/data_columns_sampling_test.go
Normal file
507
beacon-chain/sync/data_columns_sampling_test.go
Normal file
@@ -0,0 +1,507 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
kzg "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestRandomizeColumns(t *testing.T) {
|
||||
const count uint64 = 128
|
||||
|
||||
// Generate columns.
|
||||
columns := make(map[uint64]bool, count)
|
||||
for i := uint64(0); i < count; i++ {
|
||||
columns[i] = true
|
||||
}
|
||||
|
||||
// Randomize columns.
|
||||
randomizedColumns := randomizeColumns(columns)
|
||||
|
||||
// Convert back to a map.
|
||||
randomizedColumnsMap := make(map[uint64]bool, count)
|
||||
for _, column := range randomizedColumns {
|
||||
randomizedColumnsMap[column] = true
|
||||
}
|
||||
|
||||
// Check duplicates and missing columns.
|
||||
require.Equal(t, len(columns), len(randomizedColumnsMap))
|
||||
|
||||
// Check the values.
|
||||
for column := range randomizedColumnsMap {
|
||||
require.Equal(t, true, column < count)
|
||||
}
|
||||
}
|
||||
|
||||
// createAndConnectPeer creates a peer with a private key `offset` fixed.
|
||||
// The peer is added and connected to `p2pService`
|
||||
func createAndConnectPeer(
|
||||
t *testing.T,
|
||||
p2pService *p2ptest.TestP2P,
|
||||
chainService *mock.ChainService,
|
||||
dataColumnSidecars []*ethpb.DataColumnSidecar,
|
||||
custodySubnetCount uint64,
|
||||
columnsNotToRespond map[uint64]bool,
|
||||
offset int,
|
||||
) *p2ptest.TestP2P {
|
||||
// Create the private key, depending on the offset.
|
||||
privateKeyBytes := make([]byte, 32)
|
||||
for i := 0; i < 32; i++ {
|
||||
privateKeyBytes[i] = byte(offset + i)
|
||||
}
|
||||
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the peer.
|
||||
peer := p2ptest.NewTestP2P(t, swarmt.OptPeerPrivateKey(privateKey))
|
||||
|
||||
// TODO: Do not hardcode the topic.
|
||||
peer.SetStreamHandler("/eth2/beacon_chain/req/data_column_sidecars_by_root/1/ssz_snappy", func(stream network.Stream) {
|
||||
// Decode the request.
|
||||
req := new(p2pTypes.DataColumnSidecarsByRootReq)
|
||||
err := peer.Encoding().DecodeWithMaxLength(stream, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, identifier := range *req {
|
||||
// Filter out the columns not to respond.
|
||||
if columnsNotToRespond[identifier.ColumnIndex] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Create the response.
|
||||
resp := dataColumnSidecars[identifier.ColumnIndex]
|
||||
|
||||
// Send the response.
|
||||
err := WriteDataColumnSidecarChunk(stream, chainService, p2pService.Encoding(), resp)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Close the stream.
|
||||
closeStream(stream, log)
|
||||
})
|
||||
|
||||
// Create the record and set the custody count.
|
||||
enr := &enr.Record{}
|
||||
enr.Set(peerdas.Csc(custodySubnetCount))
|
||||
|
||||
// Add the peer and connect it.
|
||||
p2pService.Peers().Add(enr, peer.PeerID(), nil, network.DirOutbound)
|
||||
p2pService.Peers().SetConnectionState(peer.PeerID(), peers.PeerConnected)
|
||||
p2pService.Connect(peer)
|
||||
|
||||
return peer
|
||||
}
|
||||
|
||||
type dataSamplerTest struct {
|
||||
ctx context.Context
|
||||
p2pSvc *p2ptest.TestP2P
|
||||
peers []*p2ptest.TestP2P
|
||||
ctxMap map[[4]byte]int
|
||||
chainSvc *mock.ChainService
|
||||
blockRoot [32]byte
|
||||
blobs []kzg.Blob
|
||||
kzgCommitments [][]byte
|
||||
kzgProofs [][]byte
|
||||
dataColumnSidecars []*ethpb.DataColumnSidecar
|
||||
}
|
||||
|
||||
func setupDefaultDataColumnSamplerTest(t *testing.T) (*dataSamplerTest, *dataColumnSampler1D) {
|
||||
const (
|
||||
blobCount uint64 = 3
|
||||
custodyRequirement uint64 = 1
|
||||
)
|
||||
|
||||
test, sampler := setupDataColumnSamplerTest(t, blobCount)
|
||||
// Custody columns: [6, 38, 70, 102]
|
||||
p1 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 1)
|
||||
// Custody columns: [3, 35, 67, 99]
|
||||
p2 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 2)
|
||||
// Custody columns: [12, 44, 76, 108]
|
||||
p3 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, custodyRequirement, map[uint64]bool{}, 3)
|
||||
test.peers = []*p2ptest.TestP2P{p1, p2, p3}
|
||||
|
||||
return test, sampler
|
||||
}
|
||||
|
||||
func setupDataColumnSamplerTest(t *testing.T, blobCount uint64) (*dataSamplerTest, *dataColumnSampler1D) {
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
// Generate random blobs, commitments and inclusion proofs.
|
||||
blobs := make([]kzg.Blob, blobCount)
|
||||
kzgCommitments := make([][]byte, blobCount)
|
||||
kzgProofs := make([][]byte, blobCount)
|
||||
|
||||
for i := uint64(0); i < blobCount; i++ {
|
||||
blob := getRandBlob(int64(i))
|
||||
|
||||
kzgCommitment, kzgProof, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob
|
||||
kzgCommitments[i] = kzgCommitment[:]
|
||||
kzgProofs[i] = kzgProof[:]
|
||||
}
|
||||
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
dbBlock.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(sBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot, err := dataColumnSidecars[0].GetSignedBlockHeader().Header.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
p2pSvc := p2ptest.NewTestP2P(t)
|
||||
chainSvc, clock := defaultMockChain(t)
|
||||
|
||||
test := &dataSamplerTest{
|
||||
ctx: context.Background(),
|
||||
p2pSvc: p2pSvc,
|
||||
peers: []*p2ptest.TestP2P{},
|
||||
ctxMap: map[[4]byte]int{{245, 165, 253, 66}: version.Deneb},
|
||||
chainSvc: chainSvc,
|
||||
blockRoot: blockRoot,
|
||||
blobs: blobs,
|
||||
kzgCommitments: kzgCommitments,
|
||||
kzgProofs: kzgProofs,
|
||||
dataColumnSidecars: dataColumnSidecars,
|
||||
}
|
||||
sampler := newDataColumnSampler1D(p2pSvc, clock, test.ctxMap, nil)
|
||||
|
||||
return test, sampler
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_PeerManagement(t *testing.T) {
|
||||
testCases := []struct {
|
||||
numPeers int
|
||||
custodyRequirement uint64
|
||||
expectedColumns [][]uint64
|
||||
prunePeers map[int]bool // Peers to prune.
|
||||
}{
|
||||
{
|
||||
numPeers: 3,
|
||||
custodyRequirement: 1,
|
||||
expectedColumns: [][]uint64{
|
||||
{6, 38, 70, 102},
|
||||
{3, 35, 67, 99},
|
||||
{12, 44, 76, 108},
|
||||
},
|
||||
prunePeers: map[int]bool{
|
||||
0: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
numPeers: 3,
|
||||
custodyRequirement: 2,
|
||||
expectedColumns: [][]uint64{
|
||||
{6, 16, 38, 48, 70, 80, 102, 112},
|
||||
{3, 13, 35, 45, 67, 77, 99, 109},
|
||||
{12, 31, 44, 63, 76, 95, 108, 127},
|
||||
},
|
||||
prunePeers: map[int]bool{
|
||||
0: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
test, sampler := setupDataColumnSamplerTest(t, uint64(tc.numPeers))
|
||||
for i := 0; i < tc.numPeers; i++ {
|
||||
p := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, tc.custodyRequirement, nil, i+1)
|
||||
test.peers = append(test.peers, p)
|
||||
}
|
||||
|
||||
// confirm everything works
|
||||
sampler.refreshPeerInfo()
|
||||
require.Equal(t, params.BeaconConfig().NumberOfColumns, uint64(len(sampler.peerFromColumn)))
|
||||
|
||||
require.Equal(t, tc.numPeers, len(sampler.columnFromPeer))
|
||||
for i, peer := range test.peers {
|
||||
// confirm peer has the expected columns
|
||||
require.Equal(t, len(tc.expectedColumns[i]), len(sampler.columnFromPeer[peer.PeerID()]))
|
||||
for _, column := range tc.expectedColumns[i] {
|
||||
require.Equal(t, true, sampler.columnFromPeer[peer.PeerID()][column])
|
||||
}
|
||||
|
||||
// confirm column to peer mapping are correct
|
||||
for _, column := range tc.expectedColumns[i] {
|
||||
require.Equal(t, true, sampler.peerFromColumn[column][peer.PeerID()])
|
||||
}
|
||||
}
|
||||
|
||||
// prune peers
|
||||
for peer := range tc.prunePeers {
|
||||
err := test.p2pSvc.Disconnect(test.peers[peer].PeerID())
|
||||
test.p2pSvc.Peers().SetConnectionState(test.peers[peer].PeerID(), peers.PeerDisconnected)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
require.Equal(t, tc.numPeers-len(tc.prunePeers), len(sampler.columnFromPeer))
|
||||
for i, peer := range test.peers {
|
||||
for _, column := range tc.expectedColumns[i] {
|
||||
expected := true
|
||||
if tc.prunePeers[i] {
|
||||
expected = false
|
||||
}
|
||||
require.Equal(t, expected, sampler.peerFromColumn[column][peer.PeerID()])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_SampleDistribution(t *testing.T) {
|
||||
testCases := []struct {
|
||||
numPeers int
|
||||
custodyRequirement uint64
|
||||
columnsToDistribute [][]uint64
|
||||
expectedDistribution []map[int][]uint64
|
||||
}{
|
||||
{
|
||||
numPeers: 3,
|
||||
custodyRequirement: 1,
|
||||
// peer custody maps
|
||||
// p0: {6, 38, 70, 102},
|
||||
// p1: {3, 35, 67, 99},
|
||||
// p2: {12, 44, 76, 108},
|
||||
columnsToDistribute: [][]uint64{
|
||||
{3, 6, 12},
|
||||
{6, 3, 12, 38, 35, 44},
|
||||
{6, 38, 70},
|
||||
{11},
|
||||
},
|
||||
expectedDistribution: []map[int][]uint64{
|
||||
{
|
||||
0: {6}, // p1
|
||||
1: {3}, // p2
|
||||
2: {12}, // p3
|
||||
},
|
||||
{
|
||||
0: {6, 38}, // p1
|
||||
1: {3, 35}, // p2
|
||||
2: {12, 44}, // p3
|
||||
},
|
||||
{
|
||||
0: {6, 38, 70}, // p1
|
||||
},
|
||||
{},
|
||||
},
|
||||
},
|
||||
{
|
||||
numPeers: 3,
|
||||
custodyRequirement: 2,
|
||||
// peer custody maps
|
||||
// p0: {6, 16, 38, 48, 70, 80, 102, 112},
|
||||
// p1: {3, 13, 35, 45, 67, 77, 99, 109},
|
||||
// p2: {12, 31, 44, 63, 76, 95, 108, 127},
|
||||
columnsToDistribute: [][]uint64{
|
||||
{3, 6, 12, 109, 112, 127}, // all covered by peers
|
||||
{13, 16, 31, 32}, // 32 not in covered by peers
|
||||
},
|
||||
expectedDistribution: []map[int][]uint64{
|
||||
{
|
||||
0: {6, 112}, // p1
|
||||
1: {3, 109}, // p2
|
||||
2: {12, 127}, // p3
|
||||
},
|
||||
{
|
||||
0: {16}, // p1
|
||||
1: {13}, // p2
|
||||
2: {31}, // p3
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
test, sampler := setupDataColumnSamplerTest(t, uint64(tc.numPeers))
|
||||
for i := 0; i < tc.numPeers; i++ {
|
||||
p := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, tc.custodyRequirement, nil, i+1)
|
||||
test.peers = append(test.peers, p)
|
||||
}
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
for idx, columns := range tc.columnsToDistribute {
|
||||
result := sampler.distributeSamplesToPeer(columns)
|
||||
require.Equal(t, len(tc.expectedDistribution[idx]), len(result))
|
||||
|
||||
for peerIdx, dist := range tc.expectedDistribution[idx] {
|
||||
for _, column := range dist {
|
||||
peerID := test.peers[peerIdx].PeerID()
|
||||
require.Equal(t, true, result[peerID][column])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_SampleDataColumns(t *testing.T) {
|
||||
test, sampler := setupDefaultDataColumnSamplerTest(t)
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
// Sample all columns.
|
||||
sampleColumns := []uint64{6, 3, 12, 38, 35, 44, 70, 67, 76, 102, 99, 108}
|
||||
retrieved := sampler.sampleDataColumns(test.ctx, test.blockRoot, sampleColumns)
|
||||
require.Equal(t, 12, len(retrieved))
|
||||
for _, column := range sampleColumns {
|
||||
require.Equal(t, true, retrieved[column])
|
||||
}
|
||||
|
||||
// Sample a subset of columns.
|
||||
sampleColumns = []uint64{6, 3, 12, 38, 35, 44}
|
||||
retrieved = sampler.sampleDataColumns(test.ctx, test.blockRoot, sampleColumns)
|
||||
require.Equal(t, 6, len(retrieved))
|
||||
for _, column := range sampleColumns {
|
||||
require.Equal(t, true, retrieved[column])
|
||||
}
|
||||
|
||||
// Sample a subset of columns with missing columns.
|
||||
sampleColumns = []uint64{6, 3, 12, 127}
|
||||
retrieved = sampler.sampleDataColumns(test.ctx, test.blockRoot, sampleColumns)
|
||||
require.Equal(t, 3, len(retrieved))
|
||||
require.DeepEqual(t, map[uint64]bool{6: true, 3: true, 12: true}, retrieved)
|
||||
}
|
||||
|
||||
func TestDataColumnSampler1D_IncrementalDAS(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
samplesCount uint64
|
||||
possibleColumnsToRequest []uint64
|
||||
columnsNotToRespond map[uint64]bool
|
||||
expectedSuccess bool
|
||||
expectedRoundSummaries []roundSummary
|
||||
}{
|
||||
{
|
||||
name: "All columns are correctly sampled in a single round",
|
||||
samplesCount: 5,
|
||||
possibleColumnsToRequest: []uint64{70, 35, 99, 6, 38, 3, 67, 102, 12, 44, 76, 108},
|
||||
columnsNotToRespond: map[uint64]bool{},
|
||||
expectedSuccess: true,
|
||||
expectedRoundSummaries: []roundSummary{
|
||||
{
|
||||
RequestedColumns: []uint64{70, 35, 99, 6, 38},
|
||||
MissingColumns: map[uint64]bool{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Two missing columns in the first round, ok in the second round",
|
||||
samplesCount: 5,
|
||||
possibleColumnsToRequest: []uint64{70, 35, 99, 6, 38, 3, 67, 102, 12, 44, 76, 108},
|
||||
columnsNotToRespond: map[uint64]bool{6: true, 70: true},
|
||||
expectedSuccess: true,
|
||||
expectedRoundSummaries: []roundSummary{
|
||||
{
|
||||
RequestedColumns: []uint64{70, 35, 99, 6, 38},
|
||||
MissingColumns: map[uint64]bool{70: true, 6: true},
|
||||
},
|
||||
{
|
||||
RequestedColumns: []uint64{3, 67, 102, 12, 44, 76},
|
||||
MissingColumns: map[uint64]bool{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Two missing columns in the first round, one missing in the second round. Fail to sample.",
|
||||
samplesCount: 5,
|
||||
possibleColumnsToRequest: []uint64{70, 35, 99, 6, 38, 3, 67, 102, 12, 44, 76, 108},
|
||||
columnsNotToRespond: map[uint64]bool{6: true, 70: true, 3: true},
|
||||
expectedSuccess: false,
|
||||
expectedRoundSummaries: []roundSummary{
|
||||
{
|
||||
RequestedColumns: []uint64{70, 35, 99, 6, 38},
|
||||
MissingColumns: map[uint64]bool{70: true, 6: true},
|
||||
},
|
||||
{
|
||||
RequestedColumns: []uint64{3, 67, 102, 12, 44, 76},
|
||||
MissingColumns: map[uint64]bool{3: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
test, sampler := setupDataColumnSamplerTest(t, 3)
|
||||
p1 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, 1, tc.columnsNotToRespond, 1)
|
||||
p2 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, 1, tc.columnsNotToRespond, 2)
|
||||
p3 := createAndConnectPeer(t, test.p2pSvc, test.chainSvc, test.dataColumnSidecars, 1, tc.columnsNotToRespond, 3)
|
||||
test.peers = []*p2ptest.TestP2P{p1, p2, p3}
|
||||
|
||||
sampler.refreshPeerInfo()
|
||||
|
||||
success, summaries, err := sampler.incrementalDAS(test.ctx, test.blockRoot, tc.possibleColumnsToRequest, tc.samplesCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedSuccess, success)
|
||||
require.DeepEqual(t, tc.expectedRoundSummaries, summaries)
|
||||
}
|
||||
}
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func getRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func getRandBlob(seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
for i := 0; i < len(blob); i += 32 {
|
||||
fieldElementBytes := getRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+32], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
@@ -45,6 +45,8 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
case strings.Contains(topic, p2p.GossipBlobSidecarMessage):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.BlobSidecar{})]
|
||||
case strings.Contains(topic, p2p.GossipDataColumnSidecarMessage):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.DataColumnSidecar{})]
|
||||
}
|
||||
|
||||
base := p2p.GossipTopicMappings(topic, 0)
|
||||
|
||||
@@ -67,6 +67,11 @@ func (s *Service) registerForUpcomingFork(currEpoch primitives.Epoch) error {
|
||||
s.registerRPCHandlersDeneb()
|
||||
}
|
||||
}
|
||||
// Specially handle peerDAS
|
||||
if params.PeerDASEnabled() && currEpoch+1 == params.BeaconConfig().Eip7594ForkEpoch {
|
||||
s.registerRPCHandlersPeerDAS()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -121,5 +126,9 @@ func (s *Service) deregisterFromPastFork(currEpoch primitives.Epoch) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Handle PeerDAS as its a special case.
|
||||
if params.PeerDASEnabled() && currEpoch > 0 && (currEpoch-1) == params.BeaconConfig().Eip7594ForkEpoch {
|
||||
s.unregisterBlobHandlers()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,8 @@ go_library(
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
|
||||
@@ -10,6 +10,11 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
@@ -29,8 +34,6 @@ import (
|
||||
p2ppb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -316,11 +319,19 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
|
||||
|
||||
response.bwb, response.pid, response.err = f.fetchBlocksFromPeer(ctx, start, count, peers)
|
||||
if response.err == nil {
|
||||
bwb, err := f.fetchBlobsFromPeer(ctx, response.bwb, response.pid, peers)
|
||||
if err != nil {
|
||||
response.err = err
|
||||
if coreTime.PeerDASIsActive(start) {
|
||||
bwb, err := f.fetchColumnsFromPeer(ctx, response.bwb, response.pid, peers)
|
||||
if err != nil {
|
||||
response.err = err
|
||||
}
|
||||
response.bwb = bwb
|
||||
} else {
|
||||
bwb, err := f.fetchBlobsFromPeer(ctx, response.bwb, response.pid, peers)
|
||||
if err != nil {
|
||||
response.err = err
|
||||
}
|
||||
response.bwb = bwb
|
||||
}
|
||||
response.bwb = bwb
|
||||
}
|
||||
return response
|
||||
}
|
||||
@@ -465,6 +476,16 @@ func (r *blobRange) Request() *p2ppb.BlobSidecarsByRangeRequest {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *blobRange) RequestDataColumns() *p2ppb.DataColumnSidecarsByRangeRequest {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
return &p2ppb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: r.low,
|
||||
Count: uint64(r.high.SubSlot(r.low)) + 1,
|
||||
}
|
||||
}
|
||||
|
||||
var errBlobVerification = errors.New("peer unable to serve aligned BlobSidecarsByRange and BeaconBlockSidecarsByRange responses")
|
||||
var errMissingBlobsForBlockCommitments = errors.Wrap(errBlobVerification, "blobs unavailable for processing block with kzg commitments")
|
||||
|
||||
@@ -490,6 +511,28 @@ func verifyAndPopulateBlobs(bwb []blocks2.BlockWithROBlobs, blobs []blocks.ROBlo
|
||||
return bwb, nil
|
||||
}
|
||||
|
||||
func verifyAndPopulateColumns(bwb []blocks2.BlockWithROBlobs, columns []blocks.RODataColumn, req *p2ppb.DataColumnSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) ([]blocks2.BlockWithROBlobs, error) {
|
||||
columnsByRoot := make(map[[32]byte][]blocks.RODataColumn)
|
||||
for i := range columns {
|
||||
if columns[i].Slot() < req.StartSlot {
|
||||
continue
|
||||
}
|
||||
br := columns[i].BlockRoot()
|
||||
columnsByRoot[br] = append(columnsByRoot[br], columns[i])
|
||||
}
|
||||
for i := range bwb {
|
||||
bwi, err := populateBlockWithColumns(bwb[i], columnsByRoot[bwb[i].Block.Root()], req, bss)
|
||||
if err != nil {
|
||||
if errors.Is(err, errDidntPopulate) {
|
||||
continue
|
||||
}
|
||||
return bwb, err
|
||||
}
|
||||
bwb[i] = bwi
|
||||
}
|
||||
return bwb, nil
|
||||
}
|
||||
|
||||
var errDidntPopulate = errors.New("skipping population of block")
|
||||
|
||||
func populateBlock(bw blocks2.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) (blocks2.BlockWithROBlobs, error) {
|
||||
@@ -520,6 +563,32 @@ func populateBlock(bw blocks2.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2pp
|
||||
return bw, nil
|
||||
}
|
||||
|
||||
func populateBlockWithColumns(bw blocks2.BlockWithROBlobs, columns []blocks.RODataColumn, req *p2ppb.DataColumnSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) (blocks2.BlockWithROBlobs, error) {
|
||||
blk := bw.Block
|
||||
if blk.Version() < version.Deneb || blk.Block().Slot() < req.StartSlot {
|
||||
return bw, errDidntPopulate
|
||||
}
|
||||
commits, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return bw, errDidntPopulate
|
||||
}
|
||||
if len(commits) == 0 {
|
||||
return bw, errDidntPopulate
|
||||
}
|
||||
colsPersub := params.BeaconConfig().NumberOfColumns / params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
subnetCount := peerdas.CustodySubnetCount()
|
||||
if len(columns) != int(subnetCount*colsPersub) {
|
||||
return bw, errors.Errorf("unequal custodied columns provided, got %d instead of %d", len(columns), subnetCount)
|
||||
}
|
||||
for ci := range columns {
|
||||
if err := verify.ColumnAlignsWithBlock(columns[ci], blk); err != nil {
|
||||
return bw, err
|
||||
}
|
||||
}
|
||||
bw.Columns = columns
|
||||
return bw, nil
|
||||
}
|
||||
|
||||
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
|
||||
missStr := make([]string, 0, len(missing))
|
||||
for k := range missing {
|
||||
@@ -571,6 +640,72 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.Bl
|
||||
return nil, errNoPeersAvailable
|
||||
}
|
||||
|
||||
// fetchColumnsFromPeer fetches blocks from a single randomly selected peer.
|
||||
func (f *blocksFetcher) fetchColumnsFromPeer(ctx context.Context, bwb []blocks2.BlockWithROBlobs, pid peer.ID, peers []peer.ID) ([]blocks2.BlockWithROBlobs, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "initialsync.fetchColumnsFromPeer")
|
||||
defer span.End()
|
||||
if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch {
|
||||
return bwb, nil
|
||||
}
|
||||
columnWindowStart, err := prysmsync.DataColumnsRPCMinValidSlot(f.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Construct request message based on observed interval of blocks in need of columns.
|
||||
req := countCommitments(bwb, columnWindowStart).blobRange(f.bs).RequestDataColumns()
|
||||
if req == nil {
|
||||
return bwb, nil
|
||||
}
|
||||
// Construct request message based on required custodied columns.
|
||||
custodyCols, err := peerdas.CustodyColumns(f.p2p.NodeID(), peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
colIdxs := make([]uint64, 0, len(custodyCols))
|
||||
for c := range custodyCols {
|
||||
colIdxs = append(colIdxs, c)
|
||||
}
|
||||
req.Columns = colIdxs
|
||||
peers = f.filterPeers(ctx, peers, peersPercentagePerRequest)
|
||||
// We dial the initial peer first to ensure that we get the desired set of columns.
|
||||
wantedPeers := append([]peer.ID{pid}, peers...)
|
||||
bestPeers := f.hasSufficientBandwidth(wantedPeers, req.Count)
|
||||
// We append the best peers to the front so that higher capacity
|
||||
// peers are dialed first. If all of them fail, we fallback to the
|
||||
// initial peer we wanted to request blobs from.
|
||||
peers = append(bestPeers, pid)
|
||||
for i := 0; i < len(peers); i++ {
|
||||
p := peers[i]
|
||||
nid, err := p2p.ConvertPeerIDToNodeID(pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
remoteCustody, err := peerdas.CustodyColumns(nid, f.p2p.CustodyCountFromRemotePeer(p))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !remotePeerHasCustody(req.Columns, remoteCustody) {
|
||||
// TODO: For easier interop we do not skip for now
|
||||
log.Warnf("Remote peer %s does not have wanted columns", p.String())
|
||||
}
|
||||
columns, err := f.requestColumns(ctx, req, p)
|
||||
if err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Could not request data columns by range from peer")
|
||||
continue
|
||||
}
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
||||
robs, err := verifyAndPopulateColumns(bwb, columns, req, f.bs)
|
||||
if err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Invalid DataColumnByRange response")
|
||||
continue
|
||||
}
|
||||
return robs, err
|
||||
}
|
||||
return nil, errNoPeersAvailable
|
||||
}
|
||||
|
||||
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
|
||||
func (f *blocksFetcher) requestBlocks(
|
||||
ctx context.Context,
|
||||
@@ -625,9 +760,38 @@ func (f *blocksFetcher) requestBlobs(ctx context.Context, req *p2ppb.BlobSidecar
|
||||
}
|
||||
f.rateLimiter.Add(pid.String(), int64(req.Count))
|
||||
l.Unlock()
|
||||
|
||||
return prysmsync.SendBlobsByRangeRequest(ctx, f.clock, f.p2p, pid, f.ctxMap, req)
|
||||
}
|
||||
|
||||
func (f *blocksFetcher) requestColumns(ctx context.Context, req *p2ppb.DataColumnSidecarsByRangeRequest, pid peer.ID) ([]blocks.RODataColumn, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
l := f.peerLock(pid)
|
||||
l.Lock()
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"start": req.StartSlot,
|
||||
"count": req.Count,
|
||||
"capacity": f.rateLimiter.Remaining(pid.String()),
|
||||
"score": f.p2p.Peers().Scorers().BlockProviderScorer().FormatScorePretty(pid),
|
||||
}).Debug("Requesting Columns")
|
||||
// We're intentionally abusing the block rate limit here, treating data column requests as if they were block requests.
|
||||
// Since column requests take more bandwidth than blocks, we should improve how we account for the different kinds
|
||||
// of requests, more in proportion to the cost of serving them.
|
||||
if f.rateLimiter.Remaining(pid.String()) < int64(req.Count) {
|
||||
if err := f.waitForBandwidth(pid, req.Count); err != nil {
|
||||
l.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
f.rateLimiter.Add(pid.String(), int64(req.Count))
|
||||
l.Unlock()
|
||||
|
||||
return prysmsync.SendDataColumnsByRangeRequest(ctx, f.clock, f.p2p, pid, f.ctxMap, req)
|
||||
}
|
||||
|
||||
// requestBlocksByRoot is a wrapper for handling BeaconBlockByRootsReq requests/streams.
|
||||
func (f *blocksFetcher) requestBlocksByRoot(
|
||||
ctx context.Context,
|
||||
@@ -728,3 +892,12 @@ func dedupPeers(peers []peer.ID) []peer.ID {
|
||||
}
|
||||
return newPeerList
|
||||
}
|
||||
|
||||
func remotePeerHasCustody(wantedIdxs []uint64, remoteCustMap map[uint64]bool) bool {
|
||||
for _, wIdx := range wantedIdxs {
|
||||
if !remoteCustMap[wIdx] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -236,18 +237,18 @@ func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, slot
|
||||
Count: reqCount,
|
||||
Step: 1,
|
||||
}
|
||||
blocks, err := f.requestBlocks(ctx, req, pid)
|
||||
reqBlocks, err := f.requestBlocks(ctx, req, pid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot fetch blocks: %w", err)
|
||||
}
|
||||
if len(blocks) == 0 {
|
||||
if len(reqBlocks) == 0 {
|
||||
return nil, errNoAlternateBlocks
|
||||
}
|
||||
|
||||
// If the first block is not connected to the current canonical chain, we'll stop processing this batch.
|
||||
// Instead, we'll work backwards from the first block until we find a common ancestor,
|
||||
// and then begin processing from there.
|
||||
first := blocks[0]
|
||||
first := reqBlocks[0]
|
||||
if !f.chain.HasBlock(ctx, first.Block().ParentRoot()) {
|
||||
// Backtrack on a root, to find a common ancestor from which we can resume syncing.
|
||||
fork, err := f.findAncestor(ctx, pid, first)
|
||||
@@ -260,8 +261,8 @@ func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, slot
|
||||
// Traverse blocks, and if we've got one that doesn't have parent in DB, backtrack on it.
|
||||
// Note that we start from the second element in the array, because we know that the first element is in the db,
|
||||
// otherwise we would have gone into the findAncestor early return path above.
|
||||
for i := 1; i < len(blocks); i++ {
|
||||
block := blocks[i]
|
||||
for i := 1; i < len(reqBlocks); i++ {
|
||||
block := reqBlocks[i]
|
||||
parentRoot := block.Block().ParentRoot()
|
||||
// Step through blocks until we find one that is not in the chain. The goal is to find the point where the
|
||||
// chain observed in the peer diverges from the locally known chain, and then collect up the remainder of the
|
||||
@@ -274,16 +275,25 @@ func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, slot
|
||||
"slot": block.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", parentRoot),
|
||||
}).Debug("Block with unknown parent root has been found")
|
||||
altBlocks, err := sortedBlockWithVerifiedBlobSlice(blocks[i-1:])
|
||||
altBlocks, err := sortedBlockWithVerifiedBlobSlice(reqBlocks[i-1:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid blocks received in findForkWithPeer")
|
||||
}
|
||||
var bwb []blocks.BlockWithROBlobs
|
||||
if coreTime.PeerDASIsActive(block.Block().Slot()) {
|
||||
bwb, err = f.fetchColumnsFromPeer(ctx, altBlocks, pid, []peer.ID{pid})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findForkWithPeer")
|
||||
}
|
||||
} else {
|
||||
bwb, err = f.fetchBlobsFromPeer(ctx, altBlocks, pid, []peer.ID{pid})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findForkWithPeer")
|
||||
}
|
||||
}
|
||||
// We need to fetch the blobs for the given alt-chain if any exist, so that we can try to verify and import
|
||||
// the blocks.
|
||||
bwb, err := f.fetchBlobsFromPeer(ctx, altBlocks, pid, []peer.ID{pid})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findForkWithPeer")
|
||||
}
|
||||
|
||||
// The caller will use the BlocksWith VerifiedBlobs in bwb as the starting point for
|
||||
// round-robin syncing the alternate chain.
|
||||
return &forkData{peer: pid, bwb: bwb}, nil
|
||||
@@ -302,9 +312,16 @@ func (f *blocksFetcher) findAncestor(ctx context.Context, pid peer.ID, b interfa
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "received invalid blocks in findAncestor")
|
||||
}
|
||||
bwb, err = f.fetchBlobsFromPeer(ctx, bwb, pid, []peer.ID{pid})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findAncestor")
|
||||
if coreTime.PeerDASIsActive(b.Block().Slot()) {
|
||||
bwb, err = f.fetchColumnsFromPeer(ctx, bwb, pid, []peer.ID{pid})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve columns for blocks found in findAncestor")
|
||||
}
|
||||
} else {
|
||||
bwb, err = f.fetchBlobsFromPeer(ctx, bwb, pid, []peer.ID{pid})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findAncestor")
|
||||
}
|
||||
}
|
||||
return &forkData{
|
||||
peer: pid,
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/paulbellamy/ratecounter"
|
||||
"github.com/pkg/errors"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
@@ -172,27 +173,52 @@ func (s *Service) processFetchedDataRegSync(
|
||||
if len(bwb) == 0 {
|
||||
return
|
||||
}
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
batchFields := logrus.Fields{
|
||||
"firstSlot": data.bwb[0].Block.Block().Slot(),
|
||||
"firstUnprocessed": bwb[0].Block.Block().Slot(),
|
||||
}
|
||||
for _, b := range bwb {
|
||||
if err := avs.Persist(s.clock.CurrentSlot(), b.Blobs...); err != nil {
|
||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Batch failure due to BlobSidecar issues")
|
||||
return
|
||||
if coreTime.PeerDASIsActive(startSlot) {
|
||||
avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, emptyVerifier{}, s.cfg.P2P.NodeID())
|
||||
batchFields := logrus.Fields{
|
||||
"firstSlot": data.bwb[0].Block.Block().Slot(),
|
||||
"firstUnprocessed": bwb[0].Block.Block().Slot(),
|
||||
}
|
||||
if err := s.processBlock(ctx, genesis, b, s.cfg.Chain.ReceiveBlock, avs); err != nil {
|
||||
switch {
|
||||
case errors.Is(err, errParentDoesNotExist):
|
||||
log.WithFields(batchFields).WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||
WithFields(syncFields(b.Block)).Debug("Could not process batch blocks due to missing parent")
|
||||
for _, b := range bwb {
|
||||
if err := avs.PersistColumns(s.clock.CurrentSlot(), b.Columns...); err != nil {
|
||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Batch failure due to DataColumnSidecar issues")
|
||||
return
|
||||
default:
|
||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Block processing failure")
|
||||
}
|
||||
if err := s.processBlock(ctx, genesis, b, s.cfg.Chain.ReceiveBlock, avs); err != nil {
|
||||
switch {
|
||||
case errors.Is(err, errParentDoesNotExist):
|
||||
log.WithFields(batchFields).WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||
WithFields(syncFields(b.Block)).Debug("Could not process batch blocks due to missing parent")
|
||||
return
|
||||
default:
|
||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Block processing failure")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
batchFields := logrus.Fields{
|
||||
"firstSlot": data.bwb[0].Block.Block().Slot(),
|
||||
"firstUnprocessed": bwb[0].Block.Block().Slot(),
|
||||
}
|
||||
for _, b := range bwb {
|
||||
if err := avs.Persist(s.clock.CurrentSlot(), b.Blobs...); err != nil {
|
||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Batch failure due to BlobSidecar issues")
|
||||
return
|
||||
}
|
||||
if err := s.processBlock(ctx, genesis, b, s.cfg.Chain.ReceiveBlock, avs); err != nil {
|
||||
switch {
|
||||
case errors.Is(err, errParentDoesNotExist):
|
||||
log.WithFields(batchFields).WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||
WithFields(syncFields(b.Block)).Debug("Could not process batch blocks due to missing parent")
|
||||
return
|
||||
default:
|
||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Block processing failure")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -330,20 +356,34 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
|
||||
return fmt.Errorf("%w: %#x (in processBatchedBlocks, slot=%d)",
|
||||
errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot())
|
||||
}
|
||||
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
s.logBatchSyncStatus(genesis, first, len(bwb))
|
||||
for _, bb := range bwb {
|
||||
if len(bb.Blobs) == 0 {
|
||||
continue
|
||||
var aStore das.AvailabilityStore
|
||||
if coreTime.PeerDASIsActive(first.Block().Slot()) {
|
||||
avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, emptyVerifier{}, s.cfg.P2P.NodeID())
|
||||
s.logBatchSyncStatus(genesis, first, len(bwb))
|
||||
for _, bb := range bwb {
|
||||
if len(bb.Columns) == 0 {
|
||||
continue
|
||||
}
|
||||
if err := avs.PersistColumns(s.clock.CurrentSlot(), bb.Columns...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := avs.Persist(s.clock.CurrentSlot(), bb.Blobs...); err != nil {
|
||||
return err
|
||||
aStore = avs
|
||||
} else {
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
s.logBatchSyncStatus(genesis, first, len(bwb))
|
||||
for _, bb := range bwb {
|
||||
if len(bb.Blobs) == 0 {
|
||||
continue
|
||||
}
|
||||
if err := avs.Persist(s.clock.CurrentSlot(), bb.Blobs...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
aStore = avs
|
||||
}
|
||||
|
||||
return bFunc(ctx, blocks.BlockWithROBlobsSlice(bwb).ROBlocks(), avs)
|
||||
return bFunc(ctx, blocks.BlockWithROBlobsSlice(bwb).ROBlocks(), aStore)
|
||||
}
|
||||
|
||||
// updatePeerScorerStats adjusts monitored metrics for a peer.
|
||||
@@ -380,3 +420,15 @@ func (s *Service) isProcessedBlock(ctx context.Context, blk blocks.ROBlock) bool
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type emptyVerifier struct {
|
||||
}
|
||||
|
||||
func (_ emptyVerifier) VerifiedRODataColumns(_ context.Context, _ blocks.ROBlock, cols []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
var verCols []blocks.VerifiedRODataColumn
|
||||
for _, col := range cols {
|
||||
vCol := blocks.NewVerifiedRODataColumn(col)
|
||||
verCols = append(verCols, vCol)
|
||||
}
|
||||
return verCols, nil
|
||||
}
|
||||
|
||||
@@ -11,10 +11,14 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/paulbellamy/ratecounter"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async/abool"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/block"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
@@ -32,7 +36,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ runtime.Service = (*Service)(nil)
|
||||
@@ -184,9 +187,16 @@ func (s *Service) Start() {
|
||||
log.WithError(err).Error("Error waiting for minimum number of peers")
|
||||
return
|
||||
}
|
||||
if err := s.fetchOriginBlobs(peers); err != nil {
|
||||
log.WithError(err).Error("Failed to fetch missing blobs for checkpoint origin")
|
||||
return
|
||||
if coreTime.PeerDASIsActive(s.cfg.Chain.HeadSlot()) {
|
||||
if err := s.fetchOriginColumns(peers); err != nil {
|
||||
log.WithError(err).Error("Failed to fetch missing columns for checkpoint origin")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err := s.fetchOriginBlobs(peers); err != nil {
|
||||
log.WithError(err).Error("Failed to fetch missing blobs for checkpoint origin")
|
||||
return
|
||||
}
|
||||
}
|
||||
if err := s.roundRobinSync(gt); err != nil {
|
||||
if errors.Is(s.ctx.Err(), context.Canceled) {
|
||||
@@ -306,6 +316,56 @@ func missingBlobRequest(blk blocks.ROBlock, store *filesystem.BlobStorage) (p2pt
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (s *Service) missingColumnRequest(roBlock blocks.ROBlock, store *filesystem.BlobStorage) (p2ptypes.DataColumnSidecarsByRootReq, error) {
|
||||
// No columns for pre-Deneb blocks.
|
||||
if roBlock.Version() < version.Deneb {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the block root.
|
||||
blockRoot := roBlock.Root()
|
||||
|
||||
// Get the commitments from the block.
|
||||
commitments, err := roBlock.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get blob KZG commitments")
|
||||
}
|
||||
|
||||
// Return early if there are no commitments.
|
||||
if len(commitments) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Check which columns are already on disk.
|
||||
storedColumns, err := store.ColumnIndices(blockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking existing blobs for checkpoint sync block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
// Get our node ID.
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Get the custodied columns.
|
||||
custodiedColumns, err := peerdas.CustodyColumns(nodeID, peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// Build blob sidecars by root requests based on missing columns.
|
||||
req := make(p2ptypes.DataColumnSidecarsByRootReq, 0, len(commitments))
|
||||
for columnIndex := range custodiedColumns {
|
||||
isColumnAvailable := storedColumns[columnIndex]
|
||||
if !isColumnAvailable {
|
||||
req = append(req, ð.DataColumnIdentifier{
|
||||
BlockRoot: blockRoot[:],
|
||||
ColumnIndex: columnIndex,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
r, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
@@ -356,6 +416,59 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginColumns(pids []peer.ID) error {
|
||||
r, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
return nil
|
||||
}
|
||||
blk, err := s.cfg.DB.Block(s.ctx, r)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).Error("Block for checkpoint sync origin root not found in db")
|
||||
return err
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(blk.Block().Slot()), slots.ToEpoch(s.clock.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
rob, err := blocks.NewROBlockWithRoot(blk, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := s.missingColumnRequest(rob, s.cfg.BlobStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(req) == 0 {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).Debug("All columns for checkpoint block are present")
|
||||
return nil
|
||||
}
|
||||
shufflePeers(pids)
|
||||
pids, err = s.cfg.P2P.GetValidCustodyPeers(pids)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range pids {
|
||||
sidecars, err := sync.SendDataColumnSidecarByRoot(s.ctx, s.clock, s.cfg.P2P, pids[i], s.ctxMap, &req)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if len(sidecars) != len(req) {
|
||||
continue
|
||||
}
|
||||
avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, emptyVerifier{}, s.cfg.P2P.NodeID())
|
||||
current := s.clock.CurrentSlot()
|
||||
if err := avs.PersistColumns(current, sidecars...); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := avs.IsDataAvailable(s.ctx, current, rob); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).WithField("peerID", pids[i]).Warn("Columns from peer for origin block were unusable")
|
||||
continue
|
||||
}
|
||||
log.WithField("nColumns", len(sidecars)).WithField("root", fmt.Sprintf("%#x", r)).Info("Successfully downloaded blobs for checkpoint sync block")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("no connected peer able to provide columns for checkpoint sync block %#x", r)
|
||||
}
|
||||
|
||||
func shufflePeers(pids []peer.ID) {
|
||||
rg := rand.NewGenerator()
|
||||
rg.Shuffle(len(pids), func(i, j int) {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -204,19 +205,40 @@ func (s *Service) processAndBroadcastBlock(ctx context.Context, b interfaces.Rea
|
||||
}
|
||||
}
|
||||
|
||||
request, err := s.pendingBlobsRequestForBlock(blkRoot, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(request) > 0 {
|
||||
peers := s.getBestPeers()
|
||||
peerCount := len(peers)
|
||||
if peerCount == 0 {
|
||||
return errors.Wrapf(errNoPeersForPending, "block root=%#x", blkRoot)
|
||||
}
|
||||
if err := s.sendAndSaveBlobSidecars(ctx, request, peers[rand.NewGenerator().Int()%peerCount], b); err != nil {
|
||||
if coreTime.PeerDASIsActive(b.Block().Slot()) {
|
||||
request, err := s.pendingDataColumnRequestForBlock(blkRoot, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(request) > 0 {
|
||||
peers := s.getBestPeers()
|
||||
peers, err = s.cfg.p2p.GetValidCustodyPeers(peers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
peerCount := len(peers)
|
||||
if peerCount == 0 {
|
||||
return errors.Wrapf(errNoPeersForPending, "block root=%#x", blkRoot)
|
||||
}
|
||||
if err := s.sendAndSaveDataColumnSidecars(ctx, request, peers[rand.NewGenerator().Int()%peerCount], b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
request, err := s.pendingBlobsRequestForBlock(blkRoot, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(request) > 0 {
|
||||
peers := s.getBestPeers()
|
||||
peerCount := len(peers)
|
||||
if peerCount == 0 {
|
||||
return errors.Wrapf(errNoPeersForPending, "block root=%#x", blkRoot)
|
||||
}
|
||||
if err := s.sendAndSaveBlobSidecars(ctx, request, peers[rand.NewGenerator().Int()%peerCount], b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.cfg.chain.ReceiveBlock(ctx, b, blkRoot, nil); err != nil {
|
||||
|
||||
@@ -7,12 +7,14 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
)
|
||||
|
||||
const defaultBurstLimit = 5
|
||||
@@ -46,6 +48,10 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
allowedBlobsPerSecond := float64(flags.Get().BlobBatchLimit)
|
||||
allowedBlobsBurst := int64(flags.Get().BlobBatchLimitBurstFactor * flags.Get().BlobBatchLimit)
|
||||
|
||||
// Initialize data column limits.
|
||||
allowedDataColumnsPerSecond := float64(flags.Get().DataColumnBatchLimit * int(params.BeaconConfig().CustodyRequirement))
|
||||
allowedDataColumnsBurst := int64(flags.Get().DataColumnBatchLimitBurstFactor * flags.Get().DataColumnBatchLimit * int(params.BeaconConfig().CustodyRequirement))
|
||||
|
||||
// Set topic map for all rpc topics.
|
||||
topicMap := make(map[string]*leakybucket.Collector, len(p2p.RPCTopicMappings))
|
||||
// Goodbye Message
|
||||
@@ -66,6 +72,9 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
// for BlobSidecarsByRoot and BlobSidecarsByRange
|
||||
blobCollector := leakybucket.NewCollector(allowedBlobsPerSecond, allowedBlobsBurst, blockBucketPeriod, false)
|
||||
|
||||
// for DataColumnSidecarsByRoot and DataColumnSidecarsByRange
|
||||
columnCollector := leakybucket.NewCollector(allowedDataColumnsPerSecond, allowedDataColumnsBurst, blockBucketPeriod, false)
|
||||
|
||||
// BlocksByRoots requests
|
||||
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV1)] = blockCollector
|
||||
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV2)] = blockCollectorV2
|
||||
@@ -79,6 +88,11 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
// BlobSidecarsByRangeV1
|
||||
topicMap[addEncoding(p2p.RPCBlobSidecarsByRangeTopicV1)] = blobCollector
|
||||
|
||||
// DataColumnSidecarsByRootV1
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRootTopicV1)] = columnCollector
|
||||
// DataColumnSidecarsByRangeV1
|
||||
topicMap[addEncoding(p2p.RPCDataColumnSidecarsByRangeTopicV1)] = columnCollector
|
||||
|
||||
// General topic for all rpc requests.
|
||||
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, leakyBucketPeriod, false /* deleteEmptyBuckets */)
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
|
||||
func TestNewRateLimiter(t *testing.T) {
|
||||
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 12, "correct number of topics not registered")
|
||||
assert.Equal(t, 14, len(rlimiter.limiterMap), "correct number of topics not registered")
|
||||
}
|
||||
|
||||
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -51,7 +52,9 @@ func (s *Service) registerRPCHandlers() {
|
||||
s.pingHandler,
|
||||
)
|
||||
s.registerRPCHandlersAltair()
|
||||
if currEpoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
if coreTime.PeerDASIsActive(slots.UnsafeEpochStart(currEpoch)) {
|
||||
s.registerRPCHandlersPeerDAS()
|
||||
} else if currEpoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
s.registerRPCHandlersDeneb()
|
||||
}
|
||||
return
|
||||
@@ -109,6 +112,17 @@ func (s *Service) registerRPCHandlersDeneb() {
|
||||
)
|
||||
}
|
||||
|
||||
func (s *Service) registerRPCHandlersPeerDAS() {
|
||||
s.registerRPC(
|
||||
p2p.RPCDataColumnSidecarsByRootTopicV1,
|
||||
s.dataColumnSidecarByRootRPCHandler,
|
||||
)
|
||||
s.registerRPC(
|
||||
p2p.RPCDataColumnSidecarsByRangeTopicV1,
|
||||
s.dataColumnSidecarsByRangeRPCHandler,
|
||||
)
|
||||
}
|
||||
|
||||
// Remove all v1 Stream handlers that are no longer supported
|
||||
// from altair onwards.
|
||||
func (s *Service) unregisterPhase0Handlers() {
|
||||
@@ -121,6 +135,14 @@ func (s *Service) unregisterPhase0Handlers() {
|
||||
s.cfg.p2p.Host().RemoveStreamHandler(protocol.ID(fullMetadataTopic))
|
||||
}
|
||||
|
||||
func (s *Service) unregisterBlobHandlers() {
|
||||
fullBlobRangeTopic := p2p.RPCBlobSidecarsByRangeTopicV1 + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
fullBlobRootTopic := p2p.RPCBlobSidecarsByRootTopicV1 + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
|
||||
s.cfg.p2p.Host().RemoveStreamHandler(protocol.ID(fullBlobRangeTopic))
|
||||
s.cfg.p2p.Host().RemoveStreamHandler(protocol.ID(fullBlobRootTopic))
|
||||
}
|
||||
|
||||
// registerRPC for a given topic with an expected protobuf message type.
|
||||
func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
topic := baseTopic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
|
||||
@@ -7,6 +7,9 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/verify"
|
||||
@@ -55,15 +58,28 @@ func (s *Service) sendRecentBeaconBlocksRequest(ctx context.Context, requests *t
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
request, err := s.pendingBlobsRequestForBlock(blkRoot, blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(request) == 0 {
|
||||
continue
|
||||
}
|
||||
if err := s.sendAndSaveBlobSidecars(ctx, request, id, blk); err != nil {
|
||||
return err
|
||||
if coreTime.PeerDASIsActive(blk.Block().Slot()) {
|
||||
request, err := s.pendingDataColumnRequestForBlock(blkRoot, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "pending data column request for block")
|
||||
}
|
||||
if len(request) == 0 {
|
||||
continue
|
||||
}
|
||||
if err := s.sendAndSaveDataColumnSidecars(ctx, request, id, blk); err != nil {
|
||||
return errors.Wrap(err, "send and save data column sidecars")
|
||||
}
|
||||
} else {
|
||||
request, err := s.pendingBlobsRequestForBlock(blkRoot, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "pending blobs request for block")
|
||||
}
|
||||
if len(request) == 0 {
|
||||
continue
|
||||
}
|
||||
if err := s.sendAndSaveBlobSidecars(ctx, request, id, blk); err != nil {
|
||||
return errors.Wrap(err, "send and save blob sidecars")
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
@@ -170,6 +186,36 @@ func (s *Service) sendAndSaveBlobSidecars(ctx context.Context, request types.Blo
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) sendAndSaveDataColumnSidecars(ctx context.Context, request types.DataColumnSidecarsByRootReq, peerID peer.ID, block interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if len(request) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sidecars, err := SendDataColumnSidecarByRoot(ctx, s.cfg.clock, s.cfg.p2p, peerID, s.ctxMap, &request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
RoBlock, err := blocks.NewROBlock(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, sidecar := range sidecars {
|
||||
if err := verify.ColumnAlignsWithBlock(sidecar, RoBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(columnFields(sidecar)).Debug("Received data column sidecar RPC")
|
||||
}
|
||||
|
||||
for i := range sidecars {
|
||||
verfiedCol := blocks.NewVerifiedRODataColumn(sidecars[i])
|
||||
if err := s.cfg.blobStorage.SaveDataColumn(verfiedCol); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) pendingBlobsRequestForBlock(root [32]byte, b interfaces.ReadOnlySignedBeaconBlock) (types.BlobSidecarsByRootReq, error) {
|
||||
if b.Version() < version.Deneb {
|
||||
return nil, nil // Block before deneb has no blob.
|
||||
@@ -181,7 +227,27 @@ func (s *Service) pendingBlobsRequestForBlock(root [32]byte, b interfaces.ReadOn
|
||||
if len(cc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return s.constructPendingBlobsRequest(root, len(cc))
|
||||
|
||||
blobIdentifiers, err := s.constructPendingBlobsRequest(root, len(cc))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "construct pending blobs request")
|
||||
}
|
||||
|
||||
return blobIdentifiers, nil
|
||||
}
|
||||
|
||||
func (s *Service) pendingDataColumnRequestForBlock(root [32]byte, b interfaces.ReadOnlySignedBeaconBlock) (types.DataColumnSidecarsByRootReq, error) {
|
||||
if b.Version() < version.Deneb {
|
||||
return nil, nil // Block before deneb has no blob.
|
||||
}
|
||||
cc, err := b.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(cc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return s.constructPendingColumnRequest(root)
|
||||
}
|
||||
|
||||
// constructPendingBlobsRequest creates a request for BlobSidecars by root, considering blobs already in DB.
|
||||
@@ -191,12 +257,40 @@ func (s *Service) constructPendingBlobsRequest(root [32]byte, commitments int) (
|
||||
}
|
||||
stored, err := s.cfg.blobStorage.Indices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "indices")
|
||||
}
|
||||
|
||||
return requestsForMissingIndices(stored, commitments, root), nil
|
||||
}
|
||||
|
||||
func (s *Service) constructPendingColumnRequest(root [32]byte) (types.DataColumnSidecarsByRootReq, error) {
|
||||
// Retrieve the storedColumns columns for the current root.
|
||||
storedColumns, err := s.cfg.blobStorage.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "column indices")
|
||||
}
|
||||
|
||||
// Retrieve the columns we should custody.
|
||||
custodiedColumns, err := peerdas.CustodyColumns(s.cfg.p2p.NodeID(), peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// Build the request for the missing columns.
|
||||
req := make(types.DataColumnSidecarsByRootReq, 0, len(custodiedColumns))
|
||||
for column := range custodiedColumns {
|
||||
isColumnStored := storedColumns[column]
|
||||
if !isColumnStored {
|
||||
req = append(req, ð.DataColumnIdentifier{
|
||||
BlockRoot: root[:],
|
||||
ColumnIndex: column,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// requestsForMissingIndices constructs a slice of BlobIdentifiers that are missing from
|
||||
// local storage, based on a mapping that represents which indices are locally stored,
|
||||
// and the highest expected index.
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
@@ -155,3 +156,22 @@ func WriteBlobSidecarChunk(stream libp2pcore.Stream, tor blockchain.TemporalOrac
|
||||
_, err = encoding.EncodeWithMaxLength(stream, sidecar)
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteDataColumnSidecarChunk writes data column chunk object to stream.
|
||||
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
||||
func WriteDataColumnSidecarChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, encoding encoder.NetworkEncoding, sidecar *ethpb.DataColumnSidecar) error {
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
}
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(sidecar.SignedBlockHeader.Header.Slot), valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := writeContextToStream(ctxBytes[:], stream); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = encoding.EncodeWithMaxLength(stream, sidecar)
|
||||
return err
|
||||
}
|
||||
|
||||
196
beacon-chain/sync/rpc_data_column_sidecars_by_range.go
Normal file
196
beacon-chain/sync/rpc_data_column_sidecars_by_range.go
Normal file
@@ -0,0 +1,196 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func (s *Service) streamDataColumnBatch(ctx context.Context, batch blockBatch, wQuota uint64, wantedIndexes map[uint64]bool, stream libp2pcore.Stream) (uint64, error) {
|
||||
// Defensive check to guard against underflow.
|
||||
if wQuota == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
_, span := trace.StartSpan(ctx, "sync.streamDataColumnBatch")
|
||||
defer span.End()
|
||||
for _, b := range batch.canonical() {
|
||||
root := b.Root()
|
||||
idxs, err := s.cfg.blobStorage.ColumnIndices(b.Root())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
return wQuota, errors.Wrapf(err, "could not retrieve sidecars for block root %#x", root)
|
||||
}
|
||||
for i, l := uint64(0), uint64(len(idxs)); i < l; i++ {
|
||||
// index not available or unwanted, skip
|
||||
if !idxs[i] || !wantedIndexes[i] {
|
||||
continue
|
||||
}
|
||||
// We won't check for file not found since the .Indices method should normally prevent that from happening.
|
||||
sc, err := s.cfg.blobStorage.GetColumn(b.Root(), i)
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
return wQuota, errors.Wrapf(err, "could not retrieve data column sidecar: index %d, block root %#x", i, root)
|
||||
}
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteDataColumnSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, chunkErr)
|
||||
return wQuota, chunkErr
|
||||
}
|
||||
s.rateLimiter.add(stream, 1)
|
||||
wQuota -= 1
|
||||
// Stop streaming results once the quota of writes for the request is consumed.
|
||||
if wQuota == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return wQuota, nil
|
||||
}
|
||||
|
||||
// dataColumnSidecarsByRangeRPCHandler looks up the request data columns from the database from a given start slot index
|
||||
func (s *Service) dataColumnSidecarsByRangeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
var err error
|
||||
ctx, span := trace.StartSpan(ctx, "sync.DataColumnSidecarsByRangeHandler")
|
||||
defer span.End()
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
SetRPCStreamDeadlines(stream)
|
||||
log := log.WithField("handler", p2p.DataColumnSidecarsByRangeName[1:]) // slice the leading slash off the name var
|
||||
|
||||
r, ok := msg.(*pb.DataColumnSidecarsByRangeRequest)
|
||||
if !ok {
|
||||
return errors.New("message is not type *pb.DataColumnSidecarsByRangeRequest")
|
||||
}
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
rp, err := validateDataColumnsByRange(r, s.cfg.chain.CurrentSlot())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Ticker to stagger out large requests.
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
batcher, err := newBlockRangeBatcher(rp, s.cfg.beaconDB, s.rateLimiter, s.cfg.chain.IsCanonical, ticker)
|
||||
if err != nil {
|
||||
log.WithError(err).Info("error in DataColumnSidecarsByRange batch")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
// Derive the wanted columns for the request.
|
||||
wantedColumns := map[uint64]bool{}
|
||||
for _, c := range r.Columns {
|
||||
wantedColumns[c] = true
|
||||
}
|
||||
|
||||
var batch blockBatch
|
||||
wQuota := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
for batch, ok = batcher.next(ctx, stream); ok; batch, ok = batcher.next(ctx, stream) {
|
||||
batchStart := time.Now()
|
||||
wQuota, err = s.streamDataColumnBatch(ctx, batch, wQuota, wantedColumns, stream)
|
||||
rpcBlobsByRangeResponseLatency.Observe(float64(time.Since(batchStart).Milliseconds()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// once we have written MAX_REQUEST_BLOB_SIDECARS, we're done serving the request
|
||||
if wQuota == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err := batch.error(); err != nil {
|
||||
log.WithError(err).Debug("error in DataColumnSidecarsByRange batch")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
closeStream(stream, log)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the count limit to the number of blobs in a batch.
|
||||
func columnBatchLimit() uint64 {
|
||||
return uint64(flags.Get().BlockBatchLimit) / fieldparams.MaxBlobsPerBlock
|
||||
}
|
||||
|
||||
// TODO: Generalize between data columns and blobs, while the validation parameters used are different they
|
||||
// are the same value in the config. Can this be safely abstracted ?
|
||||
func validateDataColumnsByRange(r *pb.DataColumnSidecarsByRangeRequest, current primitives.Slot) (rangeParams, error) {
|
||||
if r.Count == 0 {
|
||||
return rangeParams{}, errors.Wrap(p2ptypes.ErrInvalidRequest, "invalid request Count parameter")
|
||||
}
|
||||
rp := rangeParams{
|
||||
start: r.StartSlot,
|
||||
size: r.Count,
|
||||
}
|
||||
// Peers may overshoot the current slot when in initial sync, so we don't want to penalize them by treating the
|
||||
// request as an error. So instead we return a set of params that acts as a noop.
|
||||
if rp.start > current {
|
||||
return rangeParams{start: current, end: current, size: 0}, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
rp.end, err = rp.start.SafeAdd(rp.size - 1)
|
||||
if err != nil {
|
||||
return rangeParams{}, errors.Wrap(p2ptypes.ErrInvalidRequest, "overflow start + count -1")
|
||||
}
|
||||
|
||||
maxRequest := params.MaxRequestBlock(slots.ToEpoch(current))
|
||||
// Allow some wiggle room, up to double the MaxRequestBlocks past the current slot,
|
||||
// to give nodes syncing close to the head of the chain some margin for error.
|
||||
maxStart, err := current.SafeAdd(maxRequest * 2)
|
||||
if err != nil {
|
||||
return rangeParams{}, errors.Wrap(p2ptypes.ErrInvalidRequest, "current + maxRequest * 2 > max uint")
|
||||
}
|
||||
|
||||
// Clients MUST keep a record of signed data column sidecars seen on the epoch range
|
||||
// [max(current_epoch - MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]
|
||||
// where current_epoch is defined by the current wall-clock time,
|
||||
// and clients MUST support serving requests of data columns on this range.
|
||||
minStartSlot, err := DataColumnsRPCMinValidSlot(current)
|
||||
if err != nil {
|
||||
return rangeParams{}, errors.Wrap(p2ptypes.ErrInvalidRequest, "DataColumnsRPCMinValidSlot error")
|
||||
}
|
||||
if rp.start > maxStart {
|
||||
return rangeParams{}, errors.Wrap(p2ptypes.ErrInvalidRequest, "start > maxStart")
|
||||
}
|
||||
if rp.start < minStartSlot {
|
||||
rp.start = minStartSlot
|
||||
}
|
||||
|
||||
if rp.end > current {
|
||||
rp.end = current
|
||||
}
|
||||
if rp.end < rp.start {
|
||||
rp.end = rp.start
|
||||
}
|
||||
|
||||
limit := columnBatchLimit()
|
||||
if limit > maxRequest {
|
||||
limit = maxRequest
|
||||
}
|
||||
if rp.size > limit {
|
||||
rp.size = limit
|
||||
}
|
||||
|
||||
return rp, nil
|
||||
}
|
||||
223
beacon-chain/sync/rpc_data_column_sidecars_by_root.go
Normal file
223
beacon-chain/sync/rpc_data_column_sidecars_by_root.go
Normal file
@@ -0,0 +1,223 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func (s *Service) dataColumnSidecarByRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.dataColumnSidecarByRootRPCHandler")
|
||||
defer span.End()
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||
defer cancel()
|
||||
|
||||
SetRPCStreamDeadlines(stream)
|
||||
log := log.WithField("handler", p2p.DataColumnSidecarsByRootName[1:]) // slice the leading slash off the name var
|
||||
|
||||
// We use the same type as for blobs as they are the same data structure.
|
||||
// TODO: Make the type naming more generic to be extensible to data columns
|
||||
ref, ok := msg.(*types.DataColumnSidecarsByRootReq)
|
||||
if !ok {
|
||||
return errors.New("message is not type DataColumnSidecarsByRootReq")
|
||||
}
|
||||
|
||||
requestedColumnIdents := *ref
|
||||
if err := validateDataColumnsByRootRequest(requestedColumnIdents); err != nil {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, err.Error(), stream)
|
||||
return errors.Wrap(err, "validate data columns by root request")
|
||||
}
|
||||
|
||||
// Sort the identifiers so that requests for the same blob root will be adjacent, minimizing db lookups.
|
||||
sort.Sort(requestedColumnIdents)
|
||||
|
||||
requestedColumnsList := make([]uint64, 0, len(requestedColumnIdents))
|
||||
for _, ident := range requestedColumnIdents {
|
||||
requestedColumnsList = append(requestedColumnsList, ident.ColumnIndex)
|
||||
}
|
||||
|
||||
batchSize := flags.Get().DataColumnBatchLimit
|
||||
var ticker *time.Ticker
|
||||
if len(requestedColumnIdents) > batchSize {
|
||||
ticker = time.NewTicker(time.Second)
|
||||
}
|
||||
|
||||
// Compute the oldest slot we'll allow a peer to request, based on the current slot.
|
||||
cs := s.cfg.clock.CurrentSlot()
|
||||
minReqSlot, err := DataColumnsRPCMinValidSlot(cs)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unexpected error computing min valid blob request slot, current_slot=%d", cs)
|
||||
}
|
||||
|
||||
// Compute all custodied columns.
|
||||
custodiedColumns, err := peerdas.CustodyColumns(s.cfg.p2p.NodeID(), peerdas.CustodySubnetCount())
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("unexpected error retrieving the node id")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
return errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
custodiedColumnsList := make([]uint64, 0, len(custodiedColumns))
|
||||
for column := range custodiedColumns {
|
||||
custodiedColumnsList = append(custodiedColumnsList, column)
|
||||
}
|
||||
|
||||
// Sort the custodied columns by index.
|
||||
sort.Slice(custodiedColumnsList, func(i, j int) bool {
|
||||
return custodiedColumnsList[i] < custodiedColumnsList[j]
|
||||
})
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"custodied": custodiedColumnsList,
|
||||
"requested": requestedColumnsList,
|
||||
"custodiedCount": len(custodiedColumnsList),
|
||||
"requestedCount": len(requestedColumnsList),
|
||||
}).Debug("Received data column sidecar by root request")
|
||||
|
||||
// Subscribe to the data column feed.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
subscription := s.cfg.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
for i := range requestedColumnIdents {
|
||||
if err := ctx.Err(); err != nil {
|
||||
closeStream(stream, log)
|
||||
return errors.Wrap(err, "context error")
|
||||
}
|
||||
|
||||
// Throttle request processing to no more than batchSize/sec.
|
||||
if ticker != nil && i != 0 && i%batchSize == 0 {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
log.Debug("Throttling data column sidecar request")
|
||||
case <-ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.rateLimiter.add(stream, 1)
|
||||
requestedRoot, requestedIndex := bytesutil.ToBytes32(requestedColumnIdents[i].BlockRoot), requestedColumnIdents[i].ColumnIndex
|
||||
|
||||
// Decrease the peer's score if it requests a column that is not custodied.
|
||||
isCustodied := custodiedColumns[requestedIndex]
|
||||
if !isCustodied {
|
||||
s.cfg.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, types.ErrInvalidColumnIndex.Error(), stream)
|
||||
return types.ErrInvalidColumnIndex
|
||||
}
|
||||
|
||||
// TODO: Differentiate between blobs and columns for our storage engine
|
||||
// If the data column is nil, it means it is not yet available in the db.
|
||||
// We wait for it to be available.
|
||||
|
||||
// Retrieve the data column from the database.
|
||||
dataColumnSidecar, err := s.cfg.blobStorage.GetColumn(requestedRoot, requestedIndex)
|
||||
|
||||
if err != nil && !db.IsNotFound(err) {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
return errors.Wrap(err, "get column")
|
||||
}
|
||||
|
||||
if err != nil && db.IsNotFound(err) {
|
||||
fields := logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", requestedRoot),
|
||||
"index": requestedIndex,
|
||||
}
|
||||
|
||||
log.WithFields(fields).Debug("Peer requested data column sidecar by root not found in db, waiting for it to be available")
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case receivedRootIndex := <-rootIndexChan:
|
||||
if receivedRootIndex.Root == requestedRoot && receivedRootIndex.Index == requestedIndex {
|
||||
// This is the data column we are looking for.
|
||||
log.WithFields(fields).Debug("Data column sidecar by root is now available in the db")
|
||||
|
||||
break loop
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
closeStream(stream, log)
|
||||
return errors.Errorf("context closed while waiting for data column with root %#x and index %d", requestedRoot, requestedIndex)
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the data column from the db.
|
||||
dataColumnSidecar, err = s.cfg.blobStorage.GetColumn(requestedRoot, requestedIndex)
|
||||
if err != nil {
|
||||
// This time, no error (even not found error) should be returned.
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
return errors.Wrap(err, "get column")
|
||||
}
|
||||
}
|
||||
|
||||
// If any root in the request content references a block earlier than minimum_request_epoch,
|
||||
// peers MAY respond with error code 3: ResourceUnavailable or not include the data column in the response.
|
||||
// note: we are deviating from the spec to allow requests for data column that are before minimum_request_epoch,
|
||||
// up to the beginning of the retention period.
|
||||
if dataColumnSidecar.SignedBlockHeader.Header.Slot < minReqSlot {
|
||||
s.writeErrorResponseToStream(responseCodeResourceUnavailable, types.ErrDataColumnLTMinRequest.Error(), stream)
|
||||
log.WithError(types.ErrDataColumnLTMinRequest).
|
||||
Debugf("requested data column for block %#x before minimum_request_epoch", requestedColumnIdents[i].BlockRoot)
|
||||
return types.ErrDataColumnLTMinRequest
|
||||
}
|
||||
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteDataColumnSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), dataColumnSidecar); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
tracing.AnnotateError(span, chunkErr)
|
||||
return chunkErr
|
||||
}
|
||||
}
|
||||
|
||||
closeStream(stream, log)
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDataColumnsByRootRequest(colIdents types.DataColumnSidecarsByRootReq) error {
|
||||
if uint64(len(colIdents)) > params.BeaconConfig().MaxRequestDataColumnSidecars {
|
||||
return types.ErrMaxDataColumnReqExceeded
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DataColumnsRPCMinValidSlot(current primitives.Slot) (primitives.Slot, error) {
|
||||
// Avoid overflow if we're running on a config where deneb is set to far future epoch.
|
||||
if params.BeaconConfig().DenebForkEpoch == math.MaxUint64 || !coreTime.PeerDASIsActive(current) {
|
||||
return primitives.Slot(math.MaxUint64), nil
|
||||
}
|
||||
minReqEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
|
||||
currEpoch := slots.ToEpoch(current)
|
||||
minStart := params.BeaconConfig().Eip7594ForkEpoch
|
||||
if currEpoch > minReqEpochs && currEpoch-minReqEpochs > minStart {
|
||||
minStart = currEpoch - minReqEpochs
|
||||
}
|
||||
return slots.EpochStart(minStart)
|
||||
}
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder"
|
||||
@@ -18,11 +20,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errBlobChunkedReadFailure = errors.New("failed to read stream of chunk-encoded blobs")
|
||||
@@ -38,6 +38,7 @@ var (
|
||||
errChunkResponseIndexNotAsc = errors.Wrap(ErrInvalidFetchedData, "blob indices for a block must start at 0 and increase by 1")
|
||||
errUnrequested = errors.Wrap(ErrInvalidFetchedData, "received BlobSidecar in response that was not requested")
|
||||
errBlobResponseOutOfBounds = errors.Wrap(ErrInvalidFetchedData, "received BlobSidecar with slot outside BlobSidecarsByRangeRequest bounds")
|
||||
errDataColumnResponseOutOfBounds = errors.Wrap(ErrInvalidFetchedData, "received DataColumnSidecar with slot outside DataColumnSidecarsByRangeRequest bounds")
|
||||
errChunkResponseBlockMismatch = errors.Wrap(ErrInvalidFetchedData, "blob block details do not match")
|
||||
errChunkResponseParentMismatch = errors.Wrap(ErrInvalidFetchedData, "parent root for response element doesn't match previous element root")
|
||||
)
|
||||
@@ -208,10 +209,173 @@ func SendBlobSidecarByRoot(
|
||||
return readChunkEncodedBlobs(stream, p2pApi.Encoding(), ctxMap, blobValidatorFromRootReq(req), max)
|
||||
}
|
||||
|
||||
func SendDataColumnSidecarByRoot(
|
||||
ctx context.Context,
|
||||
tor blockchain.TemporalOracle,
|
||||
p2pApi p2p.P2P,
|
||||
pid peer.ID,
|
||||
ctxMap ContextByteVersions,
|
||||
req *p2ptypes.DataColumnSidecarsByRootReq,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
reqCount := uint64(len(*req))
|
||||
maxRequestDataColumnSideCar := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
|
||||
// Verify that the request count is within the maximum allowed.
|
||||
if reqCount > maxRequestDataColumnSideCar {
|
||||
return nil, errors.Wrapf(p2ptypes.ErrMaxDataColumnReqExceeded, "current: %d, max: %d", reqCount, maxRequestDataColumnSideCar)
|
||||
}
|
||||
|
||||
// Get the topic for the request.
|
||||
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRootName, slots.ToEpoch(tor.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "topic from message")
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
log.WithField("topic", topic).Debug("Sending data column sidecar request")
|
||||
stream, err := p2pApi.Send(ctx, req, topic, pid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "send")
|
||||
}
|
||||
|
||||
// Close the stream when done.
|
||||
defer closeStream(stream, log)
|
||||
|
||||
// Read the data column sidecars from the stream.
|
||||
roDataColumns := make([]blocks.RODataColumn, 0, reqCount)
|
||||
|
||||
for i := uint64(0); ; /* no stop condition */ i++ {
|
||||
roDataColumn, err := readChunkedDataColumnSideCar(stream, p2pApi, ctxMap, []DataColumnResponseValidation{dataColumnValidatorFromRootReq(req)})
|
||||
if errors.Is(err, io.EOF) {
|
||||
// End of stream.
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read chunked data column sidecar")
|
||||
}
|
||||
|
||||
if i >= reqCount {
|
||||
// The response MUST contain no more than `reqCount` blocks.
|
||||
// (`reqCount` is already capped by `maxRequestDataColumnSideCar`.)
|
||||
return nil, errors.Wrap(ErrInvalidFetchedData, "response contains more data column sidecars than requested")
|
||||
}
|
||||
|
||||
roDataColumns = append(roDataColumns, *roDataColumn)
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
func SendDataColumnsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle, p2pApi p2p.P2P, pid peer.ID, ctxMap ContextByteVersions, req *pb.DataColumnSidecarsByRangeRequest) ([]blocks.RODataColumn, error) {
|
||||
topic, err := p2p.TopicFromMessage(p2p.DataColumnSidecarsByRangeName, slots.ToEpoch(tor.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"startSlot": req.StartSlot,
|
||||
"count": req.Count,
|
||||
}).Debug("Sending data column by range request")
|
||||
stream, err := p2pApi.Send(ctx, req, topic, pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
max := params.BeaconConfig().MaxRequestDataColumnSidecars
|
||||
|
||||
if max > req.Count*fieldparams.NumberOfColumns {
|
||||
max = req.Count * fieldparams.NumberOfColumns
|
||||
}
|
||||
vfuncs := []DataColumnResponseValidation{dataColumnValidatorFromRangeReq(req), dataColumnIndexValidatorFromRangeReq(req)}
|
||||
|
||||
// Read the data column sidecars from the stream.
|
||||
roDataColumns := make([]blocks.RODataColumn, 0, max)
|
||||
|
||||
for i := uint64(0); ; /* no stop condition */ i++ {
|
||||
roDataColumn, err := readChunkedDataColumnSideCar(stream, p2pApi, ctxMap, vfuncs)
|
||||
if errors.Is(err, io.EOF) {
|
||||
// End of stream.
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read chunked data column sidecar")
|
||||
}
|
||||
|
||||
if i >= max {
|
||||
// The response MUST contain no more than `reqCount` blocks.
|
||||
// (`reqCount` is already capped by `maxRequestDataColumnSideCar`.)
|
||||
return nil, errors.Wrap(ErrInvalidFetchedData, "response contains more data column sidecars than maximum")
|
||||
}
|
||||
|
||||
roDataColumns = append(roDataColumns, *roDataColumn)
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
func readChunkedDataColumnSideCar(
|
||||
stream network.Stream,
|
||||
p2pApi p2p.P2P,
|
||||
ctxMap ContextByteVersions,
|
||||
validation []DataColumnResponseValidation,
|
||||
) (*blocks.RODataColumn, error) {
|
||||
// Read the status code from the stream.
|
||||
statusCode, errMessage, err := ReadStatusCode(stream, p2pApi.Encoding())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read status code")
|
||||
}
|
||||
|
||||
if statusCode != 0 {
|
||||
return nil, errors.Wrap(errBlobChunkedReadFailure, errMessage)
|
||||
}
|
||||
// Retrieve the fork digest.
|
||||
ctxBytes, err := readContextFromStream(stream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read context from stream")
|
||||
}
|
||||
|
||||
// Check if the fork digest is recognized.
|
||||
v, ok := ctxMap[bytesutil.ToBytes4(ctxBytes)]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("unrecognized fork digest %#x", ctxBytes)
|
||||
}
|
||||
|
||||
// Check if we are on debeb.
|
||||
// Only deneb is supported at this time, because we lack a fork-spanning interface/union type for blobs.
|
||||
if v != version.Deneb {
|
||||
return nil, errors.Errorf("unexpected context bytes for deneb DataColumnSidecar, ctx=%#x, v=%v", ctxBytes, v)
|
||||
}
|
||||
|
||||
// Decode the data column sidecar from the stream.
|
||||
dataColumnSidecar := new(pb.DataColumnSidecar)
|
||||
if err := p2pApi.Encoding().DecodeWithMaxLength(stream, dataColumnSidecar); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode the protobuf-encoded BlobSidecar message from RPC chunk stream")
|
||||
}
|
||||
|
||||
// Create a read-only data column from the data column sidecar.
|
||||
roDataColumn, err := blocks.NewRODataColumn(dataColumnSidecar)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new read only data column")
|
||||
}
|
||||
for _, val := range validation {
|
||||
if err := val(roDataColumn); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &roDataColumn, nil
|
||||
}
|
||||
|
||||
// BlobResponseValidation represents a function that can validate aspects of a single unmarshaled blob
|
||||
// that was received from a peer in response to an rpc request.
|
||||
type BlobResponseValidation func(blocks.ROBlob) error
|
||||
|
||||
// DataColumnResponseValidation represents a function that can validate aspects of a single unmarshaled data column
|
||||
// that was received from a peer in response to an rpc request.
|
||||
type DataColumnResponseValidation func(column blocks.RODataColumn) error
|
||||
|
||||
func composeBlobValidations(vf ...BlobResponseValidation) BlobResponseValidation {
|
||||
return func(blob blocks.ROBlob) error {
|
||||
for i := range vf {
|
||||
@@ -308,6 +472,52 @@ func blobValidatorFromRangeReq(req *pb.BlobSidecarsByRangeRequest) BlobResponseV
|
||||
}
|
||||
}
|
||||
|
||||
func dataColumnValidatorFromRootReq(req *p2ptypes.DataColumnSidecarsByRootReq) DataColumnResponseValidation {
|
||||
columnIds := make(map[[32]byte]map[uint64]bool)
|
||||
for _, sc := range *req {
|
||||
blockRoot := bytesutil.ToBytes32(sc.BlockRoot)
|
||||
if columnIds[blockRoot] == nil {
|
||||
columnIds[blockRoot] = make(map[uint64]bool)
|
||||
}
|
||||
columnIds[blockRoot][sc.ColumnIndex] = true
|
||||
}
|
||||
return func(sc blocks.RODataColumn) error {
|
||||
columnIndices := columnIds[sc.BlockRoot()]
|
||||
if columnIndices == nil {
|
||||
return errors.Wrapf(errUnrequested, "root=%#x", sc.BlockRoot())
|
||||
}
|
||||
requested := columnIndices[sc.ColumnIndex]
|
||||
if !requested {
|
||||
return errors.Wrapf(errUnrequested, "root=%#x index=%d", sc.BlockRoot(), sc.ColumnIndex)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func dataColumnIndexValidatorFromRangeReq(req *pb.DataColumnSidecarsByRangeRequest) DataColumnResponseValidation {
|
||||
columnIds := make(map[uint64]bool)
|
||||
for _, col := range req.Columns {
|
||||
columnIds[col] = true
|
||||
}
|
||||
return func(sc blocks.RODataColumn) error {
|
||||
requested := columnIds[sc.ColumnIndex]
|
||||
if !requested {
|
||||
return errors.Wrapf(errUnrequested, "root=%#x index=%d", sc.BlockRoot(), sc.ColumnIndex)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func dataColumnValidatorFromRangeReq(req *pb.DataColumnSidecarsByRangeRequest) DataColumnResponseValidation {
|
||||
end := req.StartSlot + primitives.Slot(req.Count)
|
||||
return func(sc blocks.RODataColumn) error {
|
||||
if sc.Slot() < req.StartSlot || sc.Slot() >= end {
|
||||
return errors.Wrapf(errDataColumnResponseOutOfBounds, "req start,end:%d,%d, resp:%d", req.StartSlot, end, sc.Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func readChunkEncodedBlobs(stream network.Stream, encoding encoder.NetworkEncoding, ctxMap ContextByteVersions, vf BlobResponseValidation, max uint64) ([]blocks.ROBlob, error) {
|
||||
sidecars := make([]blocks.ROBlob, 0)
|
||||
// Attempt an extra read beyond max to check if the peer is violating the spec by
|
||||
@@ -334,7 +544,7 @@ func readChunkEncodedBlobs(stream network.Stream, encoding encoder.NetworkEncodi
|
||||
|
||||
func readChunkedBlobSidecar(stream network.Stream, encoding encoder.NetworkEncoding, ctxMap ContextByteVersions, vf BlobResponseValidation) (blocks.ROBlob, error) {
|
||||
var b blocks.ROBlob
|
||||
pb := ðpb.BlobSidecar{}
|
||||
pb := &pb.BlobSidecar{}
|
||||
decode := encoding.DecodeWithMaxLength
|
||||
var (
|
||||
code uint8
|
||||
|
||||
@@ -15,6 +15,8 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/abool"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
@@ -36,6 +38,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/backfill/coverage"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v5/cache/lru"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -44,22 +47,24 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
)
|
||||
|
||||
var _ runtime.Service = (*Service)(nil)
|
||||
|
||||
const rangeLimit uint64 = 1024
|
||||
const seenBlockSize = 1000
|
||||
const seenBlobSize = seenBlockSize * 4 // Each block can have max 4 blobs. Worst case 164kB for cache.
|
||||
const seenUnaggregatedAttSize = 20000
|
||||
const seenAggregatedAttSize = 16384
|
||||
const seenSyncMsgSize = 1000 // Maximum of 512 sync committee members, 1000 is a safe amount.
|
||||
const seenSyncContributionSize = 512 // Maximum of SYNC_COMMITTEE_SIZE as specified by the spec.
|
||||
const seenExitSize = 100
|
||||
const seenProposerSlashingSize = 100
|
||||
const badBlockSize = 1000
|
||||
const syncMetricsInterval = 10 * time.Second
|
||||
const (
|
||||
rangeLimit uint64 = 1024
|
||||
seenBlockSize = 1000
|
||||
seenBlobSize = seenBlockSize * 6 // Each block can have max 6 blobs.
|
||||
seenDataColumnSize = seenBlockSize * 128 // Each block can have max 128 data columns.
|
||||
seenUnaggregatedAttSize = 20000
|
||||
seenAggregatedAttSize = 16384
|
||||
seenSyncMsgSize = 1000 // Maximum of 512 sync committee members, 1000 is a safe amount.
|
||||
seenSyncContributionSize = 512 // Maximum of SYNC_COMMITTEE_SIZE as specified by the spec.
|
||||
seenExitSize = 100
|
||||
seenProposerSlashingSize = 100
|
||||
badBlockSize = 1000
|
||||
syncMetricsInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
// Seconds in one epoch.
|
||||
@@ -102,6 +107,7 @@ type config struct {
|
||||
type blockchainService interface {
|
||||
blockchain.BlockReceiver
|
||||
blockchain.BlobReceiver
|
||||
blockchain.DataColumnReceiver
|
||||
blockchain.HeadFetcher
|
||||
blockchain.FinalizationFetcher
|
||||
blockchain.ForkFetcher
|
||||
@@ -133,6 +139,8 @@ type Service struct {
|
||||
seenBlockCache *lru.Cache
|
||||
seenBlobLock sync.RWMutex
|
||||
seenBlobCache *lru.Cache
|
||||
seenDataColumnLock sync.RWMutex
|
||||
seenDataColumnCache *lru.Cache
|
||||
seenAggregatedAttestationLock sync.RWMutex
|
||||
seenAggregatedAttestationCache *lru.Cache
|
||||
seenUnAggregatedAttestationLock sync.RWMutex
|
||||
@@ -156,8 +164,13 @@ type Service struct {
|
||||
initialSyncComplete chan struct{}
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
newColumnProposerVerifier verification.NewColumnVerifier
|
||||
availableBlocker coverage.AvailableBlocker
|
||||
dataColumsnReconstructionLock sync.Mutex
|
||||
receivedDataColumnsFromRoot map[[fieldparams.RootLength]byte]map[uint64]bool
|
||||
receivedDataColumnsFromRootLock sync.RWMutex
|
||||
ctxMap ContextByteVersions
|
||||
sampler DataColumnSampler
|
||||
}
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
@@ -165,14 +178,15 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
c := gcache.New(pendingBlockExpTime /* exp time */, 0 /* disable janitor */)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
chainStarted: abool.New(),
|
||||
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
|
||||
slotToPendingBlocks: c,
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
chainStarted: abool.New(),
|
||||
cfg: &config{clock: startup.NewClock(time.Unix(0, 0), [32]byte{})},
|
||||
slotToPendingBlocks: c,
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
signatureChan: make(chan *signatureVerifier, verifierLimit),
|
||||
receivedDataColumnsFromRoot: make(map[[32]byte]map[uint64]bool),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(r); err != nil {
|
||||
@@ -222,6 +236,7 @@ func (s *Service) Start() {
|
||||
return
|
||||
}
|
||||
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
||||
s.newColumnProposerVerifier = v.VerifyProposer
|
||||
|
||||
go s.verifierRoutine()
|
||||
go s.registerHandlers()
|
||||
@@ -239,6 +254,12 @@ func (s *Service) Start() {
|
||||
|
||||
// Update sync metrics.
|
||||
async.RunEvery(s.ctx, syncMetricsInterval, s.updateMetrics)
|
||||
|
||||
// Run data column sampling
|
||||
if params.PeerDASEnabled() {
|
||||
s.sampler = newDataColumnSampler1D(s.cfg.p2p, s.cfg.clock, s.ctxMap, s.cfg.stateNotifier)
|
||||
go s.sampler.Run(s.ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop the regular sync service.
|
||||
@@ -276,6 +297,7 @@ func (s *Service) Status() error {
|
||||
func (s *Service) initCaches() {
|
||||
s.seenBlockCache = lruwrpr.New(seenBlockSize)
|
||||
s.seenBlobCache = lruwrpr.New(seenBlobSize)
|
||||
s.seenDataColumnCache = lruwrpr.New(seenDataColumnSize)
|
||||
s.seenAggregatedAttestationCache = lruwrpr.New(seenAggregatedAttSize)
|
||||
s.seenUnAggregatedAttestationCache = lruwrpr.New(seenUnaggregatedAttSize)
|
||||
s.seenSyncMessageCache = lruwrpr.New(seenSyncMsgSize)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
@@ -137,13 +138,32 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
|
||||
// New Gossip Topic in Deneb
|
||||
if epoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.BlobSubnetTopicFormat,
|
||||
s.validateBlob, /* validator */
|
||||
s.blobSubscriber, /* message handler */
|
||||
digest,
|
||||
params.BeaconConfig().BlobsidecarSubnetCount,
|
||||
)
|
||||
if coreTime.PeerDASIsActive(slots.UnsafeEpochStart(epoch)) {
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.DataColumnSubnetTopicFormat,
|
||||
s.validateDataColumn, /* validator */
|
||||
s.dataColumnSubscriber, /* message handler */
|
||||
digest,
|
||||
params.BeaconConfig().DataColumnSidecarSubnetCount,
|
||||
)
|
||||
} else {
|
||||
s.subscribeDynamicWithColumnSubnets(
|
||||
p2p.DataColumnSubnetTopicFormat,
|
||||
s.validateDataColumn, /* validator */
|
||||
s.dataColumnSubscriber, /* message handler */
|
||||
digest,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
s.subscribeStaticWithSubnets(
|
||||
p2p.BlobSubnetTopicFormat,
|
||||
s.validateBlob, /* validator */
|
||||
s.blobSubscriber, /* message handler */
|
||||
digest,
|
||||
params.BeaconConfig().BlobsidecarSubnetCount,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -646,6 +666,87 @@ func (s *Service) subscribeDynamicWithSyncSubnets(
|
||||
}()
|
||||
}
|
||||
|
||||
// subscribe missing subnets for our persistent columns.
|
||||
func (s *Service) subscribeColumnSubnet(
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
idx uint64,
|
||||
digest [4]byte,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
) {
|
||||
// do not subscribe if we have no peers in the same
|
||||
// subnet
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.DataColumnSidecar{})]
|
||||
subnetTopic := fmt.Sprintf(topic, digest, idx)
|
||||
// check if subscription exists and if not subscribe the relevant subnet.
|
||||
if _, exists := subscriptions[idx]; !exists {
|
||||
subscriptions[idx] = s.subscribeWithBase(subnetTopic, validate, handle)
|
||||
}
|
||||
if !s.validPeersExist(subnetTopic) {
|
||||
log.Debugf("No peers found subscribed to column gossip subnet with "+
|
||||
"column index %d. Searching network for peers subscribed to the subnet.", idx)
|
||||
_, err := s.cfg.p2p.FindPeersWithSubnet(s.ctx, subnetTopic, idx, flags.Get().MinimumPeersPerSubnet)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) subscribeDynamicWithColumnSubnets(
|
||||
topicFormat string,
|
||||
validate wrappedVal,
|
||||
handle subHandler,
|
||||
digest [4]byte,
|
||||
) {
|
||||
genRoot := s.cfg.clock.GenesisValidatorsRoot()
|
||||
_, e, err := forks.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topicFormat, e)
|
||||
if base == nil {
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat))
|
||||
}
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().DataColumnSidecarSubnetCount)
|
||||
genesis := s.cfg.clock.GenesisTime()
|
||||
ticker := slots.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
case <-ticker.C():
|
||||
if s.chainStarted.IsSet() && s.cfg.initialSync.Syncing() {
|
||||
continue
|
||||
}
|
||||
valid, err := isDigestValid(digest, genesis, genRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
log.Warnf("Column subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||
// Unsubscribes from all our current subnets.
|
||||
s.reValidateSubscriptions(subscriptions, []uint64{}, topicFormat, digest)
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
|
||||
wantedSubs := s.retrieveActiveColumnSubnets()
|
||||
// Resize as appropriate.
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubs, topicFormat, digest)
|
||||
|
||||
// subscribe desired column subnets.
|
||||
for _, idx := range wantedSubs {
|
||||
s.subscribeColumnSubnet(subscriptions, idx, digest, validate, handle)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// lookup peers for attester specific subnets.
|
||||
func (s *Service) lookupAttesterSubnets(digest [4]byte, idx uint64) {
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.Attestation{})]
|
||||
@@ -697,6 +798,14 @@ func (*Service) retrieveActiveSyncSubnets(currEpoch primitives.Epoch) []uint64 {
|
||||
return slice.SetUint64(subs)
|
||||
}
|
||||
|
||||
func (*Service) retrieveActiveColumnSubnets() []uint64 {
|
||||
subs, ok, _ := cache.ColumnSubnetIDs.GetColumnSubnets()
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return subs
|
||||
}
|
||||
|
||||
// filters out required peers for the node to function, not
|
||||
// pruning peers who are in our attestation subnets.
|
||||
func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID {
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
func (s *Service) blobSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
b, ok := msg.(blocks.VerifiedROBlob)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type blocks.ROBlob, type=%T", msg)
|
||||
return fmt.Errorf("message was not type blocks.VerifiedROBlob, type=%T", msg)
|
||||
}
|
||||
|
||||
s.setSeenBlobIndex(b.Slot(), b.ProposerIndex(), b.Index)
|
||||
|
||||
40
beacon-chain/sync/subscriber_data_column_sidecar.go
Normal file
40
beacon-chain/sync/subscriber_data_column_sidecar.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
opfeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
dc, ok := msg.(blocks.VerifiedRODataColumn)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type blocks.VerifiedRODataColumn, type=%T", msg)
|
||||
}
|
||||
|
||||
s.setSeenDataColumnIndex(dc.SignedBlockHeader.Header.Slot, dc.SignedBlockHeader.Header.ProposerIndex, dc.ColumnIndex)
|
||||
s.setReceivedDataColumn(dc.BlockRoot(), dc.ColumnIndex)
|
||||
|
||||
if err := s.cfg.chain.ReceiveDataColumn(ctx, dc); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
s.cfg.operationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: opfeed.DataColumnSidecarReceived,
|
||||
Data: &opfeed.DataColumnSidecarReceivedData{
|
||||
DataColumn: &dc,
|
||||
},
|
||||
})
|
||||
|
||||
// Reconstruct the data columns if needed.
|
||||
if err := s.reconstructDataColumns(ctx, dc); err != nil {
|
||||
return errors.Wrap(err, "reconstruct data columns")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -169,6 +169,16 @@ func blobFields(b blocks.ROBlob) logrus.Fields {
|
||||
}
|
||||
}
|
||||
|
||||
func columnFields(b blocks.RODataColumn) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": b.Slot(),
|
||||
"proposerIndex": b.ProposerIndex(),
|
||||
"blockRoot": fmt.Sprintf("%#x", b.BlockRoot()),
|
||||
"kzgCommitments": fmt.Sprintf("%#x", b.KzgCommitments),
|
||||
"columnIndex": b.ColumnIndex,
|
||||
}
|
||||
}
|
||||
|
||||
func computeSubnetForBlobSidecar(index uint64) uint64 {
|
||||
return index % params.BeaconConfig().BlobsidecarSubnetCount
|
||||
}
|
||||
|
||||
190
beacon-chain/sync/validate_data_column.go
Normal file
190
beacon-chain/sync/validate_data_column.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
coreBlocks "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-gossip-domain-gossipsub
|
||||
func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
receivedTime := prysmTime.Now()
|
||||
|
||||
// Always accept messages our own messages.
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// Ignore messages during initial sync.
|
||||
if s.cfg.initialSync.Syncing() {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// Ignore message with a nil topic.
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, errInvalidTopic
|
||||
}
|
||||
|
||||
// Decode the message.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Ignore messages that are not of the expected type.
|
||||
ds, ok := m.(*eth.DataColumnSidecar)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *eth.DataColumnSidecar")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar's index is consistent with NUMBER_OF_COLUMNS -- i.e. sidecar.index < NUMBER_OF_COLUMNS.
|
||||
if ds.ColumnIndex >= params.BeaconConfig().NumberOfColumns {
|
||||
return pubsub.ValidationReject, errors.Errorf("invalid column index provided, got %d", ds.ColumnIndex)
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar is for the correct subnet -- i.e. compute_subnet_for_data_column_sidecar(sidecar.index) == subnet_id.
|
||||
want := fmt.Sprintf("data_column_sidecar_%d", computeSubnetForColumnSidecar(ds.ColumnIndex))
|
||||
if !strings.Contains(*msg.Topic, want) {
|
||||
log.Debug("Column Sidecar index does not match topic")
|
||||
return pubsub.ValidationReject, fmt.Errorf("wrong topic name: %s", *msg.Topic)
|
||||
}
|
||||
|
||||
// [IGNORE] The sidecar is not from a future slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. validate that block_header.slot <= current_slot (a client MAY queue future sidecars for processing at the appropriate slot).
|
||||
if err := slots.VerifyTime(uint64(s.cfg.clock.GenesisTime().Unix()), ds.SignedBlockHeader.Header.Slot, params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
|
||||
log.WithError(err).Debug("Ignored sidecar: could not verify slot time")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// [IGNORE] The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that block_header.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
|
||||
cp := s.cfg.chain.FinalizedCheckpt()
|
||||
startSlot, err := slots.EpochStart(cp.Epoch)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Ignored column sidecar: could not calculate epoch start slot")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
if startSlot >= ds.SignedBlockHeader.Header.Slot {
|
||||
err := fmt.Errorf("finalized slot %d greater or equal to block slot %d", startSlot, ds.SignedBlockHeader.Header.Slot)
|
||||
log.Debug(err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// [IGNORE] The sidecar's block's parent (defined by block_header.parent_root) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved).
|
||||
if !s.cfg.chain.HasBlock(ctx, [32]byte(ds.SignedBlockHeader.Header.ParentRoot)) {
|
||||
err := errors.Errorf("unknown parent for data column sidecar with slot %d and parent root %#x", ds.SignedBlockHeader.Header.Slot, ds.SignedBlockHeader.Header.ParentRoot)
|
||||
log.WithError(err).Debug("Could not identify parent for data column sidecar")
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar's block's parent (defined by block_header.parent_root) passes validation.
|
||||
if s.hasBadBlock([32]byte(ds.SignedBlockHeader.Header.ParentRoot)) {
|
||||
bRoot, err := ds.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// If parent is bad, we set the block as bad.
|
||||
s.setBadBlock(ctx, bRoot)
|
||||
return pubsub.ValidationReject, errors.Errorf("column sidecar with bad parent provided")
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar is from a higher slot than the sidecar's block's parent (defined by block_header.parent_root).
|
||||
parentSlot, err := s.cfg.chain.RecentBlockSlot([32]byte(ds.SignedBlockHeader.Header.ParentRoot))
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
if ds.SignedBlockHeader.Header.Slot <= parentSlot {
|
||||
return pubsub.ValidationReject, errors.Errorf("invalid column sidecar slot: %d", ds.SignedBlockHeader.Header.Slot)
|
||||
}
|
||||
|
||||
// [REJECT] The current finalized_checkpoint is an ancestor of the sidecar's block -- i.e. get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root.
|
||||
if !s.cfg.chain.InForkchoice([32]byte(ds.SignedBlockHeader.Header.ParentRoot)) {
|
||||
return pubsub.ValidationReject, blockchain.ErrNotDescendantOfFinalized
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar's kzg_commitments field inclusion proof is valid as verified by verify_data_column_sidecar_inclusion_proof(sidecar).
|
||||
if err := blocks.VerifyKZGInclusionProofColumn(ds); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// [REJECT] The sidecar's column data is valid as verified by verify_data_column_sidecar_kzg_proofs(sidecar).
|
||||
verified, err := peerdas.VerifyDataColumnSidecarKZGProofs(ds)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
if !verified {
|
||||
return pubsub.ValidationReject, errors.New("failed to verify kzg proof of column")
|
||||
}
|
||||
|
||||
// [REJECT] The proposer signature of sidecar.signed_block_header, is valid with respect to the block_header.proposer_index pubkey.
|
||||
parentState, err := s.cfg.stateGen.StateByRoot(ctx, [32]byte(ds.SignedBlockHeader.Header.ParentRoot))
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
if err := coreBlocks.VerifyBlockHeaderSignatureUsingCurrentFork(parentState, ds.SignedBlockHeader); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
roDataColumn, err := blocks.NewRODataColumn(ds)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, errors.Wrap(err, "new RO data columns")
|
||||
}
|
||||
|
||||
if err := s.newColumnProposerVerifier(ctx, roDataColumn); err != nil {
|
||||
return pubsub.ValidationReject, errors.Wrap(err, "could not verify proposer")
|
||||
}
|
||||
|
||||
// Get the time at slot start.
|
||||
startTime, err := slots.ToTime(uint64(s.cfg.chain.GenesisTime().Unix()), ds.SignedBlockHeader.Header.Slot)
|
||||
|
||||
// Add specific debug log.
|
||||
if err == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"sinceSlotStartTime": receivedTime.Sub(startTime),
|
||||
"validationTime": s.cfg.clock.Now().Sub(receivedTime),
|
||||
"columnIndex": ds.ColumnIndex,
|
||||
}).Debug("Received data column sidecar")
|
||||
} else {
|
||||
log.WithError(err).Error("Failed to calculate slot time")
|
||||
}
|
||||
|
||||
// TODO: Transform this whole function so it looks like to the `validateBlob`
|
||||
// with the tiny verifiers inside.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
|
||||
msg.ValidatorData = verifiedRODataColumn
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
// Sets the data column with the same slot, proposer index, and data column index as seen.
|
||||
func (s *Service) setSeenDataColumnIndex(slot primitives.Slot, proposerIndex primitives.ValidatorIndex, index uint64) {
|
||||
s.seenDataColumnLock.Lock()
|
||||
defer s.seenDataColumnLock.Unlock()
|
||||
|
||||
b := append(bytesutil.Bytes32(uint64(slot)), bytesutil.Bytes32(uint64(proposerIndex))...)
|
||||
b = append(b, bytesutil.Bytes32(index)...)
|
||||
s.seenDataColumnCache.Add(string(b), true)
|
||||
}
|
||||
|
||||
func computeSubnetForColumnSidecar(colIdx uint64) uint64 {
|
||||
return colIdx % params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
package verify
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -9,10 +11,17 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
errBlobVerification = errors.New("unable to verify blobs")
|
||||
ErrIncorrectBlobIndex = errors.New("incorrect blob index")
|
||||
ErrBlobBlockMisaligned = errors.Wrap(errBlobVerification, "root of block header in blob sidecar does not match block root")
|
||||
ErrMismatchedBlobCommitments = errors.Wrap(errBlobVerification, "commitments at given slot, root and index do not match")
|
||||
errBlobVerification = errors.New("unable to verify blobs")
|
||||
errColumnVerification = errors.New("unable to verify column")
|
||||
|
||||
ErrIncorrectBlobIndex = errors.New("incorrect blob index")
|
||||
ErrIncorrectColumnIndex = errors.New("incorrect column index")
|
||||
|
||||
ErrBlobBlockMisaligned = errors.Wrap(errBlobVerification, "root of block header in blob sidecar does not match block root")
|
||||
ErrColumnBlockMisaligned = errors.Wrap(errColumnVerification, "root of block header in column sidecar does not match block root")
|
||||
|
||||
ErrMismatchedBlobCommitments = errors.Wrap(errBlobVerification, "commitments at given slot, root and index do not match")
|
||||
ErrMismatchedColumnCommitments = errors.Wrap(errColumnVerification, "commitments at given slot, root and index do not match")
|
||||
)
|
||||
|
||||
// BlobAlignsWithBlock verifies if the blob aligns with the block.
|
||||
@@ -41,3 +50,26 @@ func BlobAlignsWithBlock(blob blocks.ROBlob, block blocks.ROBlock) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ColumnAlignsWithBlock(col blocks.RODataColumn, block blocks.ROBlock) error {
|
||||
if block.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
if col.ColumnIndex >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(ErrIncorrectColumnIndex, "index %d exceeds NUMBERS_OF_COLUMN %d", col.ColumnIndex, fieldparams.NumberOfColumns)
|
||||
}
|
||||
|
||||
if col.BlockRoot() != block.Root() {
|
||||
return ErrColumnBlockMisaligned
|
||||
}
|
||||
|
||||
// Verify commitment byte values match
|
||||
commits, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !reflect.DeepEqual(commits, col.KzgCommitments) {
|
||||
return errors.Wrapf(ErrMismatchedColumnCommitments, "commitment %#v != block commitment %#v for block root %#x at slot %d ", col.KzgCommitments, commits, block.Root(), col.Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Forkchoicer represents the forkchoice methods that the verifiers need.
|
||||
@@ -57,6 +59,37 @@ func (ini *Initializer) NewBlobVerifier(b blocks.ROBlob, reqs []Requirement) *RO
|
||||
}
|
||||
}
|
||||
|
||||
func (ini *Initializer) VerifyProposer(ctx context.Context, dc blocks.RODataColumn) error {
|
||||
e := slots.ToEpoch(dc.Slot())
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
r, err := ini.shared.fc.TargetRootForEpoch(dc.ParentRoot(), e)
|
||||
if err != nil {
|
||||
return ErrSidecarUnexpectedProposer
|
||||
}
|
||||
c := &forkchoicetypes.Checkpoint{Root: r, Epoch: e}
|
||||
idx, cached := ini.shared.pc.Proposer(c, dc.Slot())
|
||||
if !cached {
|
||||
pst, err := ini.shared.sr.StateByRoot(ctx, dc.ParentRoot())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("state replay to parent_root failed")
|
||||
return ErrSidecarUnexpectedProposer
|
||||
}
|
||||
idx, err = ini.shared.pc.ComputeProposer(ctx, dc.ParentRoot(), dc.Slot(), pst)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("error computing proposer index from parent state")
|
||||
return ErrSidecarUnexpectedProposer
|
||||
}
|
||||
}
|
||||
if idx != dc.ProposerIndex() {
|
||||
log.WithError(ErrSidecarUnexpectedProposer).WithField("expectedProposer", idx).
|
||||
Debug("unexpected blob proposer")
|
||||
return ErrSidecarUnexpectedProposer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InitializerWaiter provides an Initializer once all dependent resources are ready
|
||||
// via the WaitForInitializer method.
|
||||
type InitializerWaiter struct {
|
||||
|
||||
@@ -29,3 +29,7 @@ type BlobVerifier interface {
|
||||
// NewBlobVerifier is a function signature that can be used by code that needs to be
|
||||
// able to mock Initializer.NewBlobVerifier without complex setup.
|
||||
type NewBlobVerifier func(b blocks.ROBlob, reqs []Requirement) BlobVerifier
|
||||
|
||||
// NewColumnVerifier is a function signature that can be used to mock a setup where a
|
||||
// column verifier can be easily initialized.
|
||||
type NewColumnVerifier func(ctx context.Context, dc blocks.RODataColumn) error
|
||||
|
||||
@@ -3,9 +3,10 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -174,6 +175,19 @@ var (
|
||||
Usage: "The factor by which blob batch limit may increase on burst.",
|
||||
Value: 2,
|
||||
}
|
||||
// DataColumnBatchLimit specifies the requested data column batch size.
|
||||
DataColumnBatchLimit = &cli.IntFlag{
|
||||
Name: "data-column-batch-limit",
|
||||
Usage: "The amount of data columns the local peer is bounded to request and respond to in a batch.",
|
||||
// TODO: determine a good default value for this flag.
|
||||
Value: 4096,
|
||||
}
|
||||
// DataColumnBatchLimitBurstFactor specifies the factor by which data column batch size may increase.
|
||||
DataColumnBatchLimitBurstFactor = &cli.IntFlag{
|
||||
Name: "data-column-batch-limit-burst-factor",
|
||||
Usage: "The factor by which data column batch limit may increase on burst.",
|
||||
Value: 2,
|
||||
}
|
||||
// DisableDebugRPCEndpoints disables the debug Beacon API namespace.
|
||||
DisableDebugRPCEndpoints = &cli.BoolFlag{
|
||||
Name: "disable-debug-rpc-endpoints",
|
||||
|
||||
@@ -1,21 +1,24 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd"
|
||||
)
|
||||
|
||||
// GlobalFlags specifies all the global flags for the
|
||||
// beacon node.
|
||||
type GlobalFlags struct {
|
||||
SubscribeToAllSubnets bool
|
||||
MinimumSyncPeers int
|
||||
MinimumPeersPerSubnet int
|
||||
MaxConcurrentDials int
|
||||
BlockBatchLimit int
|
||||
BlockBatchLimitBurstFactor int
|
||||
BlobBatchLimit int
|
||||
BlobBatchLimitBurstFactor int
|
||||
SubscribeToAllSubnets bool
|
||||
MinimumSyncPeers int
|
||||
MinimumPeersPerSubnet int
|
||||
MaxConcurrentDials int
|
||||
BlockBatchLimit int
|
||||
BlockBatchLimitBurstFactor int
|
||||
BlobBatchLimit int
|
||||
BlobBatchLimitBurstFactor int
|
||||
DataColumnBatchLimit int
|
||||
DataColumnBatchLimitBurstFactor int
|
||||
}
|
||||
|
||||
var globalConfig *GlobalFlags
|
||||
@@ -45,6 +48,8 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
|
||||
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
|
||||
cfg.BlobBatchLimit = ctx.Int(BlobBatchLimit.Name)
|
||||
cfg.BlobBatchLimitBurstFactor = ctx.Int(BlobBatchLimitBurstFactor.Name)
|
||||
cfg.DataColumnBatchLimit = ctx.Int(DataColumnBatchLimit.Name)
|
||||
cfg.DataColumnBatchLimitBurstFactor = ctx.Int(DataColumnBatchLimitBurstFactor.Name)
|
||||
cfg.MinimumPeersPerSubnet = ctx.Int(MinPeersPerSubnet.Name)
|
||||
cfg.MaxConcurrentDials = ctx.Int(MaxConcurrentDials.Name)
|
||||
configureMinimumPeers(ctx, cfg)
|
||||
|
||||
@@ -12,6 +12,9 @@ import (
|
||||
golog "github.com/ipfs/go-log/v2"
|
||||
joonix "github.com/joonix/log"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/node"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd"
|
||||
@@ -35,8 +38,6 @@ import (
|
||||
_ "github.com/prysmaticlabs/prysm/v5/runtime/maxprocs"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/tos"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var appFlags = []cli.Flag{
|
||||
@@ -60,6 +61,8 @@ var appFlags = []cli.Flag{
|
||||
flags.BlockBatchLimitBurstFactor,
|
||||
flags.BlobBatchLimit,
|
||||
flags.BlobBatchLimitBurstFactor,
|
||||
flags.DataColumnBatchLimit,
|
||||
flags.DataColumnBatchLimitBurstFactor,
|
||||
flags.InteropMockEth1DataVotesFlag,
|
||||
flags.InteropNumValidatorsFlag,
|
||||
flags.InteropGenesisTimeFlag,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user