mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-08 21:08:10 -05:00
PeerDAS: Implement syncing in a disjoint network (Also know as "perfect PeerDAS" network). (#15644)
* `computeIndicesByRootByPeer`: Add 1 slack epoch regarding peer head slot. * `FetchDataColumnSidecars`: Switch mode. Before this commit, this function returned on error as long as at least ONE requested sidecar was not retrieved. Now, this function retrieves what it can (best effort mode) and returns an additional value which is the map of missing sidecars after running this function. It is now the role of the caller to check this extra returned value and decide what to do in case some requested sidecars are still missing. * `fetchOriginDataColumnSidecars`: Optimize Before this commit, when running `fetchOriginDataColumnSidecars`, all the missing sidecars had to been retrieved in a single shot for the sidecars to be considered as available. The issue was, if for example `sync.FetchDataColumnSidecars` returned all but one sidecar, the returned sidecars were NOT saved, and on the next iteration, all the previously fetched sidecars had to be requested again (from peers.) After this commit, we greedily save all fetched sidecars, solving this issue. * Initial sync: Do not fetch data column sidecars before the retention period. * Implement perfect peerdas syncing. * Add changelog. * Fix James' comment. * Fix James' comment. * Fix James' comment. * Fix James' comment. * Fix James' comment. * Fix James' comment. * Fix James' comment. * Update beacon-chain/sync/data_column_sidecars.go Co-authored-by: Potuz <potuz@prysmaticlabs.com> * Update beacon-chain/sync/data_column_sidecars.go Co-authored-by: Potuz <potuz@prysmaticlabs.com> * Update beacon-chain/sync/data_column_sidecars.go Co-authored-by: Potuz <potuz@prysmaticlabs.com> * Update after Potuz's comment. * Fix Potuz's commit. * Fix James' comment. --------- Co-authored-by: Potuz <potuz@prysmaticlabs.com>
This commit is contained in:
@@ -28,19 +28,19 @@ func MinimumColumnCountToReconstruct() uint64 {
|
||||
// ReconstructDataColumnSidecars reconstructs all the data column sidecars from the given input data column sidecars.
|
||||
// All input sidecars must be committed to the same block.
|
||||
// `inVerifiedRoSidecars` should contain enough (unique) sidecars to reconstruct the missing columns.
|
||||
func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if there is at least one input sidecar.
|
||||
if len(inVerifiedRoSidecars) == 0 {
|
||||
if len(verifiedRoSidecars) == 0 {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// Safely retrieve the first sidecar as a reference.
|
||||
referenceSidecar := inVerifiedRoSidecars[0]
|
||||
referenceSidecar := verifiedRoSidecars[0]
|
||||
|
||||
// Check if all columns have the same length and are commmitted to the same block.
|
||||
blobCount := len(referenceSidecar.Column)
|
||||
blockRoot := referenceSidecar.BlockRoot()
|
||||
for _, sidecar := range inVerifiedRoSidecars[1:] {
|
||||
for _, sidecar := range verifiedRoSidecars[1:] {
|
||||
if len(sidecar.Column) != blobCount {
|
||||
return nil, ErrColumnLengthsDiffer
|
||||
}
|
||||
@@ -51,8 +51,8 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
}
|
||||
|
||||
// Deduplicate sidecars.
|
||||
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(inVerifiedRoSidecars))
|
||||
for _, inVerifiedRoSidecar := range inVerifiedRoSidecars {
|
||||
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(verifiedRoSidecars))
|
||||
for _, inVerifiedRoSidecar := range verifiedRoSidecars {
|
||||
sidecarByIndex[inVerifiedRoSidecar.Index] = inVerifiedRoSidecar
|
||||
}
|
||||
|
||||
@@ -100,25 +100,25 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
return nil, errors.Wrap(err, "wait for RecoverCellsAndKZGProofs")
|
||||
}
|
||||
|
||||
outSidecars, err := dataColumnsSidecars(signedBlockHeader, kzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
reconstructedSidecars, err := dataColumnsSidecars(signedBlockHeader, kzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars from items")
|
||||
}
|
||||
|
||||
// Input sidecars are verified, and we reconstructed ourselves the missing sidecars.
|
||||
// As a consequence, reconstructed sidecars are also verified.
|
||||
outVerifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(outSidecars))
|
||||
for _, sidecar := range outSidecars {
|
||||
reconstructedVerifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(reconstructedSidecars))
|
||||
for _, sidecar := range reconstructedSidecars {
|
||||
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, blockRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new RO data column with root")
|
||||
}
|
||||
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
outVerifiedRoSidecars = append(outVerifiedRoSidecars, verifiedRoSidecar)
|
||||
reconstructedVerifiedRoSidecars = append(reconstructedVerifiedRoSidecars, verifiedRoSidecar)
|
||||
}
|
||||
|
||||
return outVerifiedRoSidecars, nil
|
||||
return reconstructedVerifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// ConstructDataColumnSidecars constructs data column sidecars from a block, (un-extended) blobs and
|
||||
|
||||
@@ -498,8 +498,39 @@ func (s *TestP2P) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custo
|
||||
return s.earliestAvailableSlot, s.custodyGroupCount, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCountFromPeer .
|
||||
// CustodyGroupCountFromPeer retrieves custody group count from a peer.
|
||||
// It first tries to get the custody group count from the peer's metadata,
|
||||
// then falls back to the ENR value if the metadata is not available, then
|
||||
// falls back to the minimum number of custody groups an honest node should custodiy
|
||||
// and serve samples from if ENR is not available.
|
||||
func (s *TestP2P) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
// Try to get the custody group count from the peer's metadata.
|
||||
metadata, err := s.peers.Metadata(pid)
|
||||
if err != nil {
|
||||
// On error, default to the ENR value.
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
// If the metadata is nil, default to the ENR value.
|
||||
if metadata == nil {
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
// Get the custody subnets count from the metadata.
|
||||
custodyCount := metadata.CustodyGroupCount()
|
||||
|
||||
// If the custody count is null, default to the ENR value.
|
||||
if custodyCount == 0 {
|
||||
return s.custodyGroupCountFromPeerENR(pid)
|
||||
}
|
||||
|
||||
return custodyCount
|
||||
}
|
||||
|
||||
// custodyGroupCountFromPeerENR retrieves the custody count from the peer's ENR.
|
||||
// If the ENR is not available, it defaults to the minimum number of custody groups
|
||||
// an honest node custodies and serves samples from.
|
||||
func (s *TestP2P) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
// By default, we assume the peer custodies the minimum number of groups.
|
||||
custodyRequirement := params.BeaconConfig().CustodyRequirement
|
||||
|
||||
@@ -509,7 +540,7 @@ func (s *TestP2P) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
return custodyRequirement
|
||||
}
|
||||
|
||||
// Retrieve the custody subnets count from the ENR.
|
||||
// Retrieve the custody group count from the ENR.
|
||||
custodyGroupCount, err := peerdas.CustodyGroupCountFromRecord(record)
|
||||
if err != nil {
|
||||
return custodyRequirement
|
||||
|
||||
@@ -39,232 +39,328 @@ type DataColumnSidecarsParams struct {
|
||||
DownscorePeerOnRPCFault bool // Downscore a peer if it commits an RPC fault. Not responding sidecars at all is considered as a fault.
|
||||
}
|
||||
|
||||
// FetchDataColumnSidecars retrieves data column sidecars from storage and peers for the given
|
||||
// blocks and requested data column indices. It employs a multi-step strategy:
|
||||
// FetchDataColumnSidecars retrieves data column sidecars for the given blocks and indices
|
||||
// using a series of fallback strategies.
|
||||
//
|
||||
// 1. Direct retrieval: If all requested columns are available in storage, they are
|
||||
// retrieved directly without reconstruction.
|
||||
// 2. Reconstruction-based retrieval: If some requested columns are missing but sufficient
|
||||
// stored columns exist (at least the minimum required for reconstruction), the function
|
||||
// reconstructs all columns and extracts the requested indices.
|
||||
// 3. Peer retrieval: If storage and reconstruction fail, missing columns are requested
|
||||
// from connected peers that are expected to custody the required data.
|
||||
// For each block in `roBlocks` that has commitments, the function attempts to obtain
|
||||
// all sidecars corresponding to the indices listed in `requestedIndices`.
|
||||
//
|
||||
// The function returns a map of block roots to their corresponding verified read-only data
|
||||
// columns. It returns an error if data column storage is unavailable, if storage/reconstruction
|
||||
// operations fail unexpectedly, or if not all requested columns could be retrieved from peers.
|
||||
// The function returns:
|
||||
// - A map from block root to the sidecars successfully retrieved.
|
||||
// - A set of block roots for which not all requested sidecars could be retrieved.
|
||||
//
|
||||
// Retrieval strategy (proceeds to the next step only if not all requested sidecars
|
||||
// were successfully obtained at the current step):
|
||||
// 1. Attempt to load the requested sidecars from storage, reconstructing them from
|
||||
// other available sidecars in storage if necessary.
|
||||
// 2. Request any missing sidecars from peers. If some are still missing, attempt to
|
||||
// reconstruct them using both stored sidecars and those retrieved from peers.
|
||||
// 3. Request all remaining possible sidecars from peers that are not already in storage
|
||||
// or retrieved in step 2. Stop once either all requested sidecars are retrieved,
|
||||
// or enough sidecars are available (from storage, step 2, and step 3) to reconstruct
|
||||
// the requested ones.
|
||||
func FetchDataColumnSidecars(
|
||||
params DataColumnSidecarsParams,
|
||||
roBlocks []blocks.ROBlock,
|
||||
indicesMap map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
if len(roBlocks) == 0 || len(indicesMap) == 0 {
|
||||
return nil, nil
|
||||
requestedIndices map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
|
||||
if len(roBlocks) == 0 || len(requestedIndices) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
indices := sortedSliceFromMap(indicesMap)
|
||||
slotsWithCommitments := make(map[primitives.Slot]bool)
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
indicesByRootStored := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
blockCount := len(roBlocks)
|
||||
|
||||
// We first consider all requested roots as incomplete, and remove roots from this
|
||||
// set as we retrieve them.
|
||||
incompleteRoots := make(map[[fieldparams.RootLength]byte]bool, blockCount)
|
||||
slotsWithCommitments := make(map[primitives.Slot]bool, blockCount)
|
||||
slotByRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, blockCount)
|
||||
storedIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, blockCount)
|
||||
|
||||
for _, roBlock := range roBlocks {
|
||||
// Filter out blocks without commitments.
|
||||
block := roBlock.Block()
|
||||
|
||||
commitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get blob kzg commitments for block root %#x", roBlock.Root())
|
||||
return nil, nil, errors.Wrapf(err, "get blob kzg commitments for block root %#x", roBlock.Root())
|
||||
}
|
||||
|
||||
if len(commitments) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
slotsWithCommitments[block.Slot()] = true
|
||||
root := roBlock.Root()
|
||||
slot := block.Slot()
|
||||
|
||||
// Step 1: Get the requested sidecars for this root if available in storage
|
||||
requestedColumns, err := tryGetStoredColumns(params.Storage, root, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
|
||||
}
|
||||
if requestedColumns != nil {
|
||||
result[root] = requestedColumns
|
||||
continue
|
||||
}
|
||||
incompleteRoots[root] = true
|
||||
slotByRoot[root] = slot
|
||||
slotsWithCommitments[slot] = true
|
||||
|
||||
// Step 2: If step 1 failed, reconstruct the requested sidecars from what is available in storage
|
||||
requestedColumns, err = tryGetReconstructedColumns(params.Storage, root, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get reconstructed columns for root %#x", root)
|
||||
}
|
||||
if requestedColumns != nil {
|
||||
result[root] = requestedColumns
|
||||
continue
|
||||
}
|
||||
|
||||
// Step 3a: If steps 1 and 2 failed, keep track of the sidecars that need to be queried from peers
|
||||
// and those that are already stored.
|
||||
indicesToQueryMap, indicesStoredMap := categorizeIndices(params.Storage, root, indices)
|
||||
|
||||
if len(indicesToQueryMap) > 0 {
|
||||
missingIndicesByRoot[root] = indicesToQueryMap
|
||||
}
|
||||
if len(indicesStoredMap) > 0 {
|
||||
indicesByRootStored[root] = indicesStoredMap
|
||||
storedIndices := params.Storage.Summary(root).Stored()
|
||||
if len(storedIndices) > 0 {
|
||||
storedIndicesByRoot[root] = storedIndices
|
||||
}
|
||||
}
|
||||
|
||||
// Early return if no sidecars need to be queried from peers.
|
||||
if len(missingIndicesByRoot) == 0 {
|
||||
return result, nil
|
||||
}
|
||||
initialMissingRootCount := len(incompleteRoots)
|
||||
|
||||
// Step 3b: Request missing sidecars from peers.
|
||||
start, count := time.Now(), computeTotalCount(missingIndicesByRoot)
|
||||
fromPeersResult, err := tryRequestingColumnsFromPeers(params, roBlocks, slotsWithCommitments, missingIndicesByRoot)
|
||||
// Request sidecars from storage (by reconstructing them from other available sidecars if needed).
|
||||
result, err := requestSidecarsFromStorage(params.Storage, storedIndicesByRoot, requestedIndices, incompleteRoots)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "request from peers")
|
||||
return nil, nil, errors.Wrap(err, "request sidecars from storage")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{"duration": time.Since(start), "count": count}).Debug("Requested data column sidecars from peers")
|
||||
log := log.WithField("initialMissingRootCount", initialMissingRootCount)
|
||||
|
||||
// Step 3c: If needed, try to reconstruct missing sidecars from storage and fetched data.
|
||||
fromReconstructionResult, err := tryReconstructFromStorageAndPeers(params.Storage, fromPeersResult, indicesMap, missingIndicesByRoot)
|
||||
if len(incompleteRoots) == 0 {
|
||||
log.WithField("finalMissingRootCount", 0).Debug("Fetched data column sidecars from storage")
|
||||
return result, nil, nil
|
||||
}
|
||||
|
||||
// Request direct sidecars from peers.
|
||||
directSidecarsByRoot, err := requestDirectSidecarsFromPeers(params, slotByRoot, requestedIndices, slotsWithCommitments, storedIndicesByRoot, incompleteRoots)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "reconstruct from storage and peers")
|
||||
return nil, nil, errors.Wrap(err, "request direct sidecars from peers")
|
||||
}
|
||||
|
||||
for root, verifiedSidecars := range fromReconstructionResult {
|
||||
result[root] = verifiedSidecars
|
||||
// Merge sidecars in storage and those received from peers. Reconstruct if needed.
|
||||
mergedSidecarsByRoot, err := mergeAvailableSidecars(params.Storage, requestedIndices, storedIndicesByRoot, incompleteRoots, directSidecarsByRoot)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "try merge storage and mandatory inputs")
|
||||
}
|
||||
|
||||
for root := range fromPeersResult {
|
||||
if _, ok := fromReconstructionResult[root]; ok {
|
||||
// We already have what we need from peers + reconstruction
|
||||
for root, sidecars := range mergedSidecarsByRoot {
|
||||
result[root] = sidecars
|
||||
}
|
||||
|
||||
if len(incompleteRoots) == 0 {
|
||||
log.WithField("finalMissingRootCount", 0).Debug("Fetched data column sidecars from storage and peers")
|
||||
return result, nil, nil
|
||||
}
|
||||
|
||||
// Request all possible indirect sidecars from peers which are neither stored nor in `directSidecarsByRoot`
|
||||
indirectSidecarsByRoot, err := requestIndirectSidecarsFromPeers(params, slotByRoot, slotsWithCommitments, storedIndicesByRoot, directSidecarsByRoot, requestedIndices, incompleteRoots)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "request all sidecars from peers")
|
||||
}
|
||||
|
||||
// Merge sidecars in storage and those received from peers. Reconstruct if needed.
|
||||
mergedSidecarsByRoot, err = mergeAvailableSidecars(params.Storage, requestedIndices, storedIndicesByRoot, incompleteRoots, indirectSidecarsByRoot)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "try merge storage and all inputs")
|
||||
}
|
||||
|
||||
for root, sidecars := range mergedSidecarsByRoot {
|
||||
result[root] = sidecars
|
||||
}
|
||||
|
||||
if len(incompleteRoots) == 0 {
|
||||
log.WithField("finalMissingRootCount", 0).Debug("Fetched data column sidecars from storage and peers using rescue mode")
|
||||
return result, nil, nil
|
||||
}
|
||||
|
||||
// For remaining incomplete roots, assemble what is available.
|
||||
incompleteSidecarsByRoot, missingByRoot, err := assembleAvailableSidecars(params.Storage, requestedIndices, incompleteRoots, directSidecarsByRoot)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "assemble available sidecars for incomplete roots")
|
||||
}
|
||||
|
||||
for root, sidecars := range incompleteSidecarsByRoot {
|
||||
result[root] = sidecars
|
||||
}
|
||||
|
||||
log.WithField("finalMissingRootCount", len(incompleteRoots)).Debug("Failed to fetch data column sidecars from storage and peers using rescue mode")
|
||||
return result, missingByRoot, nil
|
||||
}
|
||||
|
||||
// requestSidecarsFromStorage attempts to retrieve data column sidecars for each block root in `roots`
|
||||
// and for all indices specified in `requestedIndices`.
|
||||
//
|
||||
// If not all requested sidecars can be obtained for a given root, that root is excluded from the result.
|
||||
// It returns a map from each root to its successfully retrieved sidecars.
|
||||
//
|
||||
// WARNING: This function mutates `roots` by removing entries for which all requested sidecars
|
||||
// were successfully retrieved.
|
||||
func requestSidecarsFromStorage(
|
||||
storage filesystem.DataColumnStorageReader,
|
||||
storedIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
requestedIndicesMap map[uint64]bool,
|
||||
roots map[[fieldparams.RootLength]byte]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
requestedIndices := sortedSliceFromMap(requestedIndicesMap)
|
||||
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, len(roots))
|
||||
|
||||
for root := range roots {
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
|
||||
// Check if all requested indices are stored.
|
||||
allAvailable := true
|
||||
for index := range requestedIndicesMap {
|
||||
if !storedIndices[index] {
|
||||
allAvailable = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Skip if not all requested indices are stored.
|
||||
if !allAvailable {
|
||||
continue
|
||||
}
|
||||
|
||||
result[root] = append(result[root], fromPeersResult[root]...)
|
||||
|
||||
storedIndices := indicesByRootStored[root]
|
||||
if len(storedIndices) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
storedColumns, err := tryGetStoredColumns(params.Storage, root, sortedSliceFromMap(storedIndices))
|
||||
// All requested indices are stored, retrieve them.
|
||||
verifiedRoSidecars, err := storage.Get(root, requestedIndices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
|
||||
return nil, errors.Wrapf(err, "storage get for block root %#x", root)
|
||||
}
|
||||
|
||||
result[root] = append(result[root], storedColumns...)
|
||||
result[root] = verifiedRoSidecars
|
||||
delete(roots, root)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// tryGetStoredColumns attempts to retrieve all requested data column sidecars directly from storage
|
||||
// if they are all available. Returns the sidecars if successful, and nil if at least one
|
||||
// requested sidecar is not available in the storage.
|
||||
func tryGetStoredColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if all requested indices are present in cache
|
||||
storedIndices := storage.Summary(blockRoot).Stored()
|
||||
allRequestedPresent := true
|
||||
for _, requestedIndex := range indices {
|
||||
if !storedIndices[requestedIndex] {
|
||||
allRequestedPresent = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !allRequestedPresent {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// All requested data is present, retrieve directly from DB
|
||||
requestedColumns, err := storage.Get(blockRoot, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get data columns for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
return requestedColumns, nil
|
||||
}
|
||||
|
||||
// tryGetReconstructedColumns attempts to retrieve columns using reconstruction
|
||||
// if sufficient columns are available. Returns the columns if successful, nil and nil if insufficient columns,
|
||||
// or nil and error if an error occurs.
|
||||
func tryGetReconstructedColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if we have enough columns for reconstruction
|
||||
summary := storage.Summary(blockRoot)
|
||||
if summary.Count() < peerdas.MinimumColumnCountToReconstruct() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Retrieve all stored columns for reconstruction
|
||||
allStoredColumns, err := storage.Get(blockRoot, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get all stored columns for reconstruction for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
// Attempt reconstruction
|
||||
reconstructedColumns, err := peerdas.ReconstructDataColumnSidecars(allStoredColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reconstruct data columns for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
// Health check: ensure we have the expected number of columns
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if uint64(len(reconstructedColumns)) != numberOfColumns {
|
||||
return nil, errors.Errorf("reconstructed %d columns but expected %d for block root %#x", len(reconstructedColumns), numberOfColumns, blockRoot)
|
||||
}
|
||||
|
||||
// Extract only the requested indices from reconstructed data using direct indexing
|
||||
requestedColumns := make([]blocks.VerifiedRODataColumn, 0, len(indices))
|
||||
for _, requestedIndex := range indices {
|
||||
if requestedIndex >= numberOfColumns {
|
||||
return nil, errors.Errorf("requested column index %d exceeds maximum %d for block root %#x", requestedIndex, numberOfColumns-1, blockRoot)
|
||||
}
|
||||
requestedColumns = append(requestedColumns, reconstructedColumns[requestedIndex])
|
||||
}
|
||||
|
||||
return requestedColumns, nil
|
||||
}
|
||||
|
||||
// categorizeIndices separates indices into those that need to be queried from peers
|
||||
// and those that are already stored.
|
||||
func categorizeIndices(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) (map[uint64]bool, map[uint64]bool) {
|
||||
indicesToQuery := make(map[uint64]bool, len(indices))
|
||||
indicesStored := make(map[uint64]bool, len(indices))
|
||||
|
||||
allStoredIndices := storage.Summary(blockRoot).Stored()
|
||||
for _, index := range indices {
|
||||
if allStoredIndices[index] {
|
||||
indicesStored[index] = true
|
||||
continue
|
||||
}
|
||||
indicesToQuery[index] = true
|
||||
}
|
||||
|
||||
return indicesToQuery, indicesStored
|
||||
}
|
||||
|
||||
// tryRequestingColumnsFromPeers attempts to request missing data column sidecars from connected peers.
|
||||
// It explores the connected peers to find those that are expected to custody the requested columns
|
||||
// and returns only when all requested columns are either retrieved or have been tried to be retrieved
|
||||
// by all possible peers.
|
||||
// WARNING: This function alters `missingIndicesByRoot` by removing successfully retrieved columns.
|
||||
// After running this function, the user can check the content of the (modified) `missingIndicesByRoot` map
|
||||
// to check if some sidecars are still missing.
|
||||
func tryRequestingColumnsFromPeers(
|
||||
p DataColumnSidecarsParams,
|
||||
roBlocks []blocks.ROBlock,
|
||||
// requestDirectSidecarsFromPeers tries to fetch missing data column sidecars from connected peers.
|
||||
// It searches through the available peers to identify those responsible for the requested columns,
|
||||
// and returns only after all columns have either been successfully retrieved or all candidate peers
|
||||
// have been exhausted.
|
||||
//
|
||||
// It returns a map from each root to its successfully retrieved sidecars.
|
||||
func requestDirectSidecarsFromPeers(
|
||||
params DataColumnSidecarsParams,
|
||||
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||
requestedIndices map[uint64]bool,
|
||||
slotsWithCommitments map[primitives.Slot]bool,
|
||||
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
storedIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
incompleteRoots map[[fieldparams.RootLength]byte]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
start := time.Now()
|
||||
|
||||
// Create a new random source for peer selection.
|
||||
randomSource := rand.NewGenerator()
|
||||
|
||||
// Compute slots by block root.
|
||||
slotByRoot := computeSlotByBlockRoot(roBlocks)
|
||||
// Determine all sidecars each peers are expected to custody.
|
||||
connectedPeersSlice := params.P2P.Peers().Connected()
|
||||
connectedPeers := make(map[goPeer.ID]bool, len(connectedPeersSlice))
|
||||
for _, peer := range connectedPeersSlice {
|
||||
connectedPeers[peer] = true
|
||||
}
|
||||
|
||||
// Compute missing indices by root, excluding those already in storage.
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(incompleteRoots))
|
||||
for root := range incompleteRoots {
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
|
||||
missingIndices := make(map[uint64]bool, len(requestedIndices))
|
||||
for index := range requestedIndices {
|
||||
if !storedIndices[index] {
|
||||
missingIndices[index] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingIndices) > 0 {
|
||||
missingIndicesByRoot[root] = missingIndices
|
||||
}
|
||||
}
|
||||
|
||||
initialMissingCount := computeTotalCount(missingIndicesByRoot)
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(params.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "explore peers")
|
||||
}
|
||||
|
||||
verifiedColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
for len(missingIndicesByRoot) > 0 && len(indicesByRootByPeer) > 0 {
|
||||
// Select peers to query the missing sidecars from.
|
||||
indicesByRootByPeerToQuery, err := selectPeers(params, randomSource, len(missingIndicesByRoot), indicesByRootByPeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "select peers")
|
||||
}
|
||||
|
||||
// Remove selected peers from the maps.
|
||||
for peer := range indicesByRootByPeerToQuery {
|
||||
delete(connectedPeers, peer)
|
||||
}
|
||||
|
||||
// Fetch the sidecars from the chosen peers.
|
||||
roDataColumnsByPeer := fetchDataColumnSidecarsFromPeers(params, slotByRoot, slotsWithCommitments, indicesByRootByPeerToQuery)
|
||||
|
||||
// Verify the received data column sidecars.
|
||||
verifiedRoDataColumnSidecars, err := verifyDataColumnSidecarsByPeer(params.P2P, params.NewVerifier, roDataColumnsByPeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "verify data columns sidecars by peer")
|
||||
}
|
||||
|
||||
// Remove the verified sidecars from the missing indices map and compute the new verified columns by root.
|
||||
localVerifiedColumnsByRoot := updateResults(verifiedRoDataColumnSidecars, missingIndicesByRoot)
|
||||
for root, verifiedRoDataColumns := range localVerifiedColumnsByRoot {
|
||||
verifiedColumnsByRoot[root] = append(verifiedColumnsByRoot[root], verifiedRoDataColumns...)
|
||||
}
|
||||
|
||||
// Compute indices by root by peers with the updated missing indices and connected peers.
|
||||
indicesByRootByPeer, err = computeIndicesByRootByPeer(params.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "explore peers")
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
}).Debug("Requested direct data column sidecars from peers")
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
// requestIndirectSidecarsFromPeers requests, for all roots in `missingIndicesbyRootOrig`,
|
||||
// for all possible peers, taking into account sidecars available in `inputs` and in the storage,
|
||||
// all possible sidecars until either, for each root:
|
||||
// - all indices in `indices` are available, or
|
||||
// - enough sidecars are available to trigger a reconstruction, or
|
||||
// - all peers are exhausted.
|
||||
func requestIndirectSidecarsFromPeers(
|
||||
p DataColumnSidecarsParams,
|
||||
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||
slotsWithCommitments map[primitives.Slot]bool,
|
||||
storedIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
alreadyAvailableByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn,
|
||||
requestedIndices map[uint64]bool,
|
||||
roots map[[fieldparams.RootLength]byte]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
start := time.Now()
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
// Create a new random source for peer selection.
|
||||
randomSource := rand.NewGenerator()
|
||||
|
||||
// For each root compute all possible data column sidecar indices excluding
|
||||
// those already stored or already available.
|
||||
indicesToRetrieveByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
for root := range roots {
|
||||
alreadyAvailableIndices := make(map[uint64]bool, len(alreadyAvailableByRoot[root]))
|
||||
for _, sidecar := range alreadyAvailableByRoot[root] {
|
||||
alreadyAvailableIndices[sidecar.Index] = true
|
||||
}
|
||||
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
indicesToRetrieve := make(map[uint64]bool, numberOfColumns)
|
||||
for index := range numberOfColumns {
|
||||
if !(storedIndices[index] || alreadyAvailableIndices[index]) {
|
||||
indicesToRetrieve[index] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(indicesToRetrieve) > 0 {
|
||||
indicesToRetrieveByRoot[root] = indicesToRetrieve
|
||||
}
|
||||
}
|
||||
|
||||
initialToRetrieveRootCount := len(indicesToRetrieveByRoot)
|
||||
|
||||
// Determine all sidecars each peers are expected to custody.
|
||||
connectedPeersSlice := p.P2P.Peers().Connected()
|
||||
@@ -273,15 +369,22 @@ func tryRequestingColumnsFromPeers(
|
||||
connectedPeers[peer] = true
|
||||
}
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
// Compute which peers have which of the missing indices.
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(p.P2P, slotByRoot, indicesToRetrieveByRoot, connectedPeers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "explore peers")
|
||||
}
|
||||
|
||||
verifiedColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
for len(missingIndicesByRoot) > 0 && len(indicesByRootByPeer) > 0 {
|
||||
// Already add into results all sidecars present in `alreadyAvailableByRoot`.
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
for root := range roots {
|
||||
alreadyAvailable := alreadyAvailableByRoot[root]
|
||||
result[root] = append(result[root], alreadyAvailable...)
|
||||
}
|
||||
|
||||
for len(indicesToRetrieveByRoot) > 0 && len(indicesByRootByPeer) > 0 {
|
||||
// Select peers to query the missing sidecars from.
|
||||
indicesByRootByPeerToQuery, err := selectPeers(p, randomSource, len(missingIndicesByRoot), indicesByRootByPeer)
|
||||
indicesByRootByPeerToQuery, err := selectPeers(p, randomSource, len(indicesToRetrieveByRoot), indicesByRootByPeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "select peers")
|
||||
}
|
||||
@@ -300,82 +403,216 @@ func tryRequestingColumnsFromPeers(
|
||||
return nil, errors.Wrap(err, "verify data columns sidecars by peer")
|
||||
}
|
||||
|
||||
// Remove the verified sidecars from the missing indices map and compute the new verified columns by root.
|
||||
localVerifiedColumnsByRoot := updateResults(verifiedRoDataColumnSidecars, missingIndicesByRoot)
|
||||
// Add to results all verified sidecars.
|
||||
localVerifiedColumnsByRoot := updateResults(verifiedRoDataColumnSidecars, indicesToRetrieveByRoot)
|
||||
for root, verifiedRoDataColumns := range localVerifiedColumnsByRoot {
|
||||
verifiedColumnsByRoot[root] = append(verifiedColumnsByRoot[root], verifiedRoDataColumns...)
|
||||
result[root] = append(result[root], verifiedRoDataColumns...)
|
||||
}
|
||||
|
||||
// Unlabel a root as to retrieve if enough sidecars are retrieved to enable a reconstruction,
|
||||
// or if all requested sidecars are now available for this root.
|
||||
for root, indicesToRetrieve := range indicesToRetrieveByRoot {
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
storedCount := uint64(len(storedIndices))
|
||||
resultCount := uint64(len(result[root]))
|
||||
|
||||
if storedCount+resultCount >= minimumColumnCountToReconstruct {
|
||||
delete(indicesToRetrieveByRoot, root)
|
||||
continue
|
||||
}
|
||||
|
||||
allRequestedIndicesAvailable := true
|
||||
for index := range requestedIndices {
|
||||
if indicesToRetrieve[index] {
|
||||
// Still need this index.
|
||||
allRequestedIndicesAvailable = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if allRequestedIndicesAvailable {
|
||||
delete(indicesToRetrieveByRoot, root)
|
||||
}
|
||||
}
|
||||
|
||||
// Compute indices by root by peers with the updated missing indices and connected peers.
|
||||
indicesByRootByPeer, err = computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
indicesByRootByPeer, err = computeIndicesByRootByPeer(p.P2P, slotByRoot, indicesToRetrieveByRoot, connectedPeers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "explore peers")
|
||||
}
|
||||
}
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
// tryReconstructFromStorageAndPeers attempts to reconstruct missing data column sidecars
|
||||
// using the data available in the storage and the data fetched from peers.
|
||||
// If, for at least one root, the reconstruction is not possible, an error is returned.
|
||||
func tryReconstructFromStorageAndPeers(
|
||||
storage filesystem.DataColumnStorageReader,
|
||||
fromPeersByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn,
|
||||
indices map[uint64]bool,
|
||||
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
if len(missingIndicesByRoot) == 0 {
|
||||
// Nothing to do, return early.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
start := time.Now()
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, len(missingIndicesByRoot))
|
||||
for root := range missingIndicesByRoot {
|
||||
// Check if a reconstruction is possible based on what we have from the store and fetched from peers.
|
||||
summary := storage.Summary(root)
|
||||
storedCount := summary.Count()
|
||||
fetchedCount := uint64(len(fromPeersByRoot[root]))
|
||||
|
||||
if storedCount+fetchedCount < minimumColumnsCountToReconstruct {
|
||||
return nil, errors.Errorf("cannot reconstruct all needed columns for root %#x. stored: %d, fetched: %d, minimum: %d", root, storedCount, fetchedCount, minimumColumnsCountToReconstruct)
|
||||
}
|
||||
|
||||
// Load all we have in the store.
|
||||
storedSidecars, err := storage.Get(root, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get stored sidecars for root %#x", root)
|
||||
}
|
||||
|
||||
sidecars := make([]blocks.VerifiedRODataColumn, 0, storedCount+fetchedCount)
|
||||
sidecars = append(sidecars, storedSidecars...)
|
||||
sidecars = append(sidecars, fromPeersByRoot[root]...)
|
||||
|
||||
// Attempt reconstruction.
|
||||
reconstructedSidecars, err := peerdas.ReconstructDataColumnSidecars(sidecars)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reconstruct data columns for root %#x", root)
|
||||
}
|
||||
|
||||
// Select only sidecars we need.
|
||||
for _, sidecar := range reconstructedSidecars {
|
||||
if indices[sidecar.Index] {
|
||||
result[root] = append(result[root], sidecar)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"rootCount": len(missingIndicesByRoot),
|
||||
"elapsed": time.Since(start),
|
||||
}).Debug("Reconstructed from storage and peers")
|
||||
"duration": time.Since(start),
|
||||
"initialToRetrieveRootCount": initialToRetrieveRootCount,
|
||||
"finalToRetrieveRootCount": len(indicesToRetrieveByRoot),
|
||||
}).Debug("Requested all data column sidecars from peers")
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// mergeAvailableSidecars retrieves missing data column sidecars by combining
|
||||
// what is available in storage with the sidecars provided in `alreadyAvailableByRoot`,
|
||||
// reconstructing them when necessary.
|
||||
//
|
||||
// The function works in two modes depending on sidecar availability:
|
||||
// - If all requested sidecars are already available (no reconstruction needed),
|
||||
// it simply returns them directly from storage and inputs.
|
||||
// - If storage + inputs together provide enough sidecars to reconstruct all requested ones,
|
||||
// it reconstructs and returns the requested sidecars.
|
||||
//
|
||||
// If a root cannot yield all requested sidecars, that root is omitted from the result.
|
||||
//
|
||||
// Note: It is assumed that no sidecar in `alreadyAvailableByRoot` is already present in storage.
|
||||
//
|
||||
// WARNING: This function mutates `roots`, removing any block roots
|
||||
// for which all requested sidecars were successfully retrieved.
|
||||
func mergeAvailableSidecars(
|
||||
storage filesystem.DataColumnStorageReader,
|
||||
requestedIndices map[uint64]bool,
|
||||
storedIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
roots map[[fieldparams.RootLength]byte]bool,
|
||||
alreadyAvailableByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, len(roots))
|
||||
for root := range roots {
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
alreadyAvailable := alreadyAvailableByRoot[root]
|
||||
|
||||
// Compute already available indices.
|
||||
alreadyAvailableIndices := make(map[uint64]bool, len(alreadyAvailable))
|
||||
for _, sidecar := range alreadyAvailable {
|
||||
alreadyAvailableIndices[sidecar.Index] = true
|
||||
}
|
||||
|
||||
// Check if reconstruction is needed.
|
||||
isReconstructionNeeded := false
|
||||
for index := range requestedIndices {
|
||||
if !(storedIndices[index] || alreadyAvailableIndices[index]) {
|
||||
isReconstructionNeeded = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Check if reconstruction is possible.
|
||||
storedCount := uint64(len(storedIndices))
|
||||
alreadyAvailableCount := uint64(len(alreadyAvailableIndices))
|
||||
isReconstructionPossible := storedCount+alreadyAvailableCount >= minimumColumnsCountToReconstruct
|
||||
|
||||
// Skip if the reconstruction is needed and not possible.
|
||||
if isReconstructionNeeded && !isReconstructionPossible {
|
||||
continue
|
||||
}
|
||||
|
||||
// Reconstruct if reconstruction is needed and possible.
|
||||
if isReconstructionNeeded && isReconstructionPossible {
|
||||
// Load all we have in the store.
|
||||
stored, err := storage.Get(root, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "storage get for root %#x", root)
|
||||
}
|
||||
|
||||
allAvailable := make([]blocks.VerifiedRODataColumn, 0, storedCount+alreadyAvailableCount)
|
||||
allAvailable = append(allAvailable, stored...)
|
||||
allAvailable = append(allAvailable, alreadyAvailable...)
|
||||
|
||||
// Attempt reconstruction.
|
||||
reconstructedSidecars, err := peerdas.ReconstructDataColumnSidecars(allAvailable)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "reconstruct data column sidecars for root %#x", root)
|
||||
}
|
||||
|
||||
// Select only sidecars we need.
|
||||
for _, sidecar := range reconstructedSidecars {
|
||||
if requestedIndices[sidecar.Index] {
|
||||
result[root] = append(result[root], sidecar)
|
||||
}
|
||||
}
|
||||
|
||||
delete(roots, root)
|
||||
continue
|
||||
}
|
||||
|
||||
// Reconstruction is not needed, simply assemble what is available in storage and already available.
|
||||
allAvailable, err := assembleAvailableSidecarsForRoot(storage, alreadyAvailableByRoot, root, requestedIndices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "assemble available sidecars")
|
||||
}
|
||||
|
||||
result[root] = allAvailable
|
||||
delete(roots, root)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// assembleAvailableSidecars assembles all sidecars available in storage
|
||||
// and in `alreadyAvailableByRoot` corresponding to `roots`.
|
||||
// It also returns all missing indices by root.
|
||||
func assembleAvailableSidecars(
|
||||
storage filesystem.DataColumnStorageReader,
|
||||
requestedIndices map[uint64]bool,
|
||||
roots map[[fieldparams.RootLength]byte]bool,
|
||||
alreadyAvailableByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
|
||||
// Assemble results.
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, len(roots))
|
||||
for root := range roots {
|
||||
allAvailable, err := assembleAvailableSidecarsForRoot(storage, alreadyAvailableByRoot, root, requestedIndices)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "assemble sidecars for root")
|
||||
}
|
||||
|
||||
if len(allAvailable) > 0 {
|
||||
result[root] = allAvailable
|
||||
}
|
||||
}
|
||||
|
||||
// Compute still missing sidecars.
|
||||
missingByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(roots))
|
||||
for root := range roots {
|
||||
missing := make(map[uint64]bool, len(requestedIndices))
|
||||
for index := range requestedIndices {
|
||||
missing[index] = true
|
||||
}
|
||||
|
||||
allAvailable := result[root]
|
||||
for _, sidecar := range allAvailable {
|
||||
delete(missing, sidecar.Index)
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
missingByRoot[root] = missing
|
||||
}
|
||||
}
|
||||
|
||||
return result, missingByRoot, nil
|
||||
}
|
||||
|
||||
// assembleAvailableSidecarsForRoot assembles all sidecars available in storage
|
||||
// and in `alreadyAvailableByRoot` corresponding to `root` and `indices`.
|
||||
func assembleAvailableSidecarsForRoot(
|
||||
storage filesystem.DataColumnStorageReader,
|
||||
alreadyAvailableByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn,
|
||||
root [fieldparams.RootLength]byte,
|
||||
indices map[uint64]bool,
|
||||
) ([]blocks.VerifiedRODataColumn, error) {
|
||||
stored, err := storage.Get(root, sortedSliceFromMap(indices))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "storage get for root %#x", root)
|
||||
}
|
||||
|
||||
alreadyAvailable := alreadyAvailableByRoot[root]
|
||||
|
||||
allAvailable := make([]blocks.VerifiedRODataColumn, 0, len(stored)+len(alreadyAvailable))
|
||||
allAvailable = append(allAvailable, stored...)
|
||||
allAvailable = append(allAvailable, alreadyAvailable...)
|
||||
|
||||
return allAvailable, nil
|
||||
}
|
||||
|
||||
// selectPeers selects peers to query the sidecars.
|
||||
// It begins by randomly selecting a peer in `origIndicesByRootByPeer` that has enough bandwidth,
|
||||
// and assigns to it all its available sidecars. Then, it randomly select an other peer, until
|
||||
@@ -450,7 +687,6 @@ func updateResults(
|
||||
verifiedSidecars []blocks.VerifiedRODataColumn,
|
||||
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn {
|
||||
// Copy the original map to avoid modifying it directly.
|
||||
verifiedSidecarsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
for _, verifiedSidecar := range verifiedSidecars {
|
||||
blockRoot := verifiedSidecar.BlockRoot()
|
||||
@@ -566,11 +802,23 @@ func sendDataColumnSidecarsRequest(
|
||||
roDataColumns = append(roDataColumns, localRoDataColumns...)
|
||||
}
|
||||
|
||||
prettyByRangeRequests := make([]map[string]any, 0, len(byRangeRequests))
|
||||
for _, request := range byRangeRequests {
|
||||
prettyRequest := map[string]any{
|
||||
"startSlot": request.StartSlot,
|
||||
"count": request.Count,
|
||||
"columns": request.Columns,
|
||||
}
|
||||
|
||||
prettyByRangeRequests = append(prettyByRangeRequests, prettyRequest)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"respondedSidecars": len(roDataColumns),
|
||||
"requests": len(byRangeRequests),
|
||||
"requestCount": len(byRangeRequests),
|
||||
"type": "byRange",
|
||||
"duration": time.Since(start),
|
||||
"requests": prettyByRangeRequests,
|
||||
}).Debug("Received data column sidecars")
|
||||
|
||||
return roDataColumns, nil
|
||||
@@ -766,6 +1014,8 @@ func computeIndicesByRootByPeer(
|
||||
indicesByBlockRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
peers map[goPeer.ID]bool,
|
||||
) (map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// First, compute custody columns for all peers
|
||||
peersByIndex := make(map[uint64]map[goPeer.ID]bool)
|
||||
headSlotByPeer := make(map[goPeer.ID]primitives.Slot)
|
||||
@@ -800,7 +1050,10 @@ func computeIndicesByRootByPeer(
|
||||
return nil, errors.Errorf("chain state is nil for peer %s", peer)
|
||||
}
|
||||
|
||||
headSlotByPeer[peer] = peerChainState.HeadSlot
|
||||
// Our view of the head slot of a peer is not updated in real time.
|
||||
// We add an epoch to take into account the fact the real head slot of the peer
|
||||
// is higher than our view of it.
|
||||
headSlotByPeer[peer] = peerChainState.HeadSlot + slotsPerEpoch
|
||||
}
|
||||
|
||||
// For each block root and its indices, find suitable peers
|
||||
@@ -931,15 +1184,6 @@ func sortedSliceFromMap(m map[uint64]bool) []uint64 {
|
||||
return result
|
||||
}
|
||||
|
||||
// computeSlotByBlockRoot maps each block root to its corresponding slot.
|
||||
func computeSlotByBlockRoot(roBlocks []blocks.ROBlock) map[[fieldparams.RootLength]byte]primitives.Slot {
|
||||
slotByBlockRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, len(roBlocks))
|
||||
for _, roBlock := range roBlocks {
|
||||
slotByBlockRoot[roBlock.Root()] = roBlock.Block().Slot()
|
||||
}
|
||||
return slotByBlockRoot
|
||||
}
|
||||
|
||||
// computeTotalCount calculates the total count of indices across all roots.
|
||||
func computeTotalCount(input map[[fieldparams.RootLength]byte]map[uint64]bool) int {
|
||||
totalCount := 0
|
||||
|
||||
@@ -20,8 +20,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -33,11 +35,13 @@ import (
|
||||
|
||||
func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
// Slot 1: All needed sidecars are available in storage
|
||||
// Slot 2: No commitment
|
||||
// Slot 3: All sidecars are saved excepted the needed ones
|
||||
// Slot 4: Some sidecars are in the storage, other have to be retrieved from peers.
|
||||
// Slot 5: Some sidecars are in the storage, other have to be retrieved from peers but peers do not deliver all requested sidecars.
|
||||
// Slot 1: All needed sidecars are available in storage ==> Retrieval from storage only.
|
||||
// Slot 2: No commitment ==> Nothing to do.
|
||||
// Slot 3: Some sidecars are in the storage, other have to be retrieved from peers ==> Retrieval from storage and peers.
|
||||
// Slot 4: Some sidecars are in the storage, other have to be retrieved from peers but peers do not deliver all requested sidecars ==> Retrieval from storage and peers then reconstruction.
|
||||
// Slot 5: Some sidecars are in the storage, other have to be retrieved from peers ==> Retrieval from storage and peers but peers do not respond all needed on first attempt and respond needed sidecars on second attempt ==> Retrieval from storage and peers.
|
||||
// Slot 6: Some sidecars are in the storage, other have to be retrieved from peers ==> Retrieval from storage and peers but peers do not respond all needed on first attempt and respond not needed sidecars on second attempt ==> Retrieval from storage and peers then reconstruction.
|
||||
// Slot 7: Some sidecars are in the storage, other have to be retrieved from peers but peers do not send anything ==> Still missing.
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
@@ -75,67 +79,74 @@ func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
// Block 3
|
||||
block3, _, verifiedSidecars3 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(3))
|
||||
root3 := block3.Root()
|
||||
|
||||
toStore3 := make([]blocks.VerifiedRODataColumn, 0, numberOfColumns-uint64(len(indices)))
|
||||
for i := range numberOfColumns {
|
||||
if !indices[i] {
|
||||
sidecar := verifiedSidecars3[i]
|
||||
toStore3 = append(toStore3, sidecar)
|
||||
}
|
||||
}
|
||||
toStore3 := []blocks.VerifiedRODataColumn{verifiedSidecars3[106]}
|
||||
|
||||
err = storage.Save(toStore3)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Block 4
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
block4, _, verifiedSidecars4 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(4))
|
||||
root4 := block4.Root()
|
||||
toStore4 := []blocks.VerifiedRODataColumn{verifiedSidecars4[106]}
|
||||
|
||||
toStoreCount := minimumColumnsCountToReconstruct - 1
|
||||
toStore4 := make([]blocks.VerifiedRODataColumn, 0, toStoreCount)
|
||||
|
||||
for i := uint64(0); uint64(len(toStore4)) < toStoreCount; i++ {
|
||||
sidecar := verifiedSidecars4[minimumColumnsCountToReconstruct+i]
|
||||
if sidecar.Index == 81 {
|
||||
continue
|
||||
}
|
||||
|
||||
toStore4 = append(toStore4, sidecar)
|
||||
}
|
||||
|
||||
err = storage.Save(toStore4)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Block 5
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
block5, _, verifiedSidecars5 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(5))
|
||||
root5 := block5.Root()
|
||||
|
||||
toStoreCount := minimumColumnsCountToReconstruct - 1
|
||||
toStore5 := make([]blocks.VerifiedRODataColumn, 0, toStoreCount)
|
||||
|
||||
for i := uint64(0); uint64(len(toStore5)) < toStoreCount; i++ {
|
||||
sidecar := verifiedSidecars5[minimumColumnsCountToReconstruct+i]
|
||||
if sidecar.Index == 81 {
|
||||
continue
|
||||
}
|
||||
|
||||
toStore5 = append(toStore5, sidecar)
|
||||
}
|
||||
toStore5 := []blocks.VerifiedRODataColumn{verifiedSidecars5[106]}
|
||||
|
||||
err = storage.Save(toStore5)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Custody columns with this private key and 4-cgc: 31, 81, 97, 105
|
||||
privateKeyBytes := [32]byte{1}
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes[:])
|
||||
// Block 6
|
||||
block6, _, verifiedSidecars6 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(6))
|
||||
root6 := block6.Root()
|
||||
toStore6 := []blocks.VerifiedRODataColumn{verifiedSidecars6[106]}
|
||||
|
||||
err = storage.Save(toStore6)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Block 7
|
||||
block7, _, verifiedSidecars7 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(7))
|
||||
root7 := block7.Root()
|
||||
toStore7 := []blocks.VerifiedRODataColumn{verifiedSidecars7[106]}
|
||||
|
||||
err = storage.Save(toStore7)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Peers
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
byRangeProtocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
byRootProtocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
|
||||
privateKeyBytes := [32]byte{1}
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t, libp2p.Identity(privateKey))
|
||||
p2p.Peers().SetConnectionState(other.PeerID(), peers.Connected)
|
||||
p2p.Connect(other)
|
||||
|
||||
p2p.Peers().SetChainState(other.PeerID(), ðpb.StatusV2{
|
||||
HeadSlot: 5,
|
||||
HeadSlot: 8,
|
||||
})
|
||||
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 4,
|
||||
Count: 2,
|
||||
Columns: []uint64{31, 81},
|
||||
}
|
||||
p2p.Peers().SetMetadata(other.PeerID(), wrapper.WrappedMetadataV2(&pb.MetaDataV2{
|
||||
CustodyGroupCount: 128,
|
||||
}))
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
@@ -149,21 +160,82 @@ func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
|
||||
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
|
||||
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
other.SetStreamHandler(byRangeProtocol, func(stream network.Stream) {
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 3,
|
||||
Count: 5,
|
||||
Columns: []uint64{31, 81},
|
||||
}
|
||||
|
||||
actualRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, actualRequest)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars4[31].DataColumnSidecar)
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars3[31].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars3[81].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars4[81].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars5[31].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars6[31].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
other.SetStreamHandler(byRootProtocol, func(stream network.Stream) {
|
||||
allBut31And81And106 := make([]uint64, 0, numberOfColumns-3)
|
||||
allBut31And106 := make([]uint64, 0, numberOfColumns-2)
|
||||
allBut106 := make([]uint64, 0, numberOfColumns-1)
|
||||
for i := range numberOfColumns {
|
||||
if !map[uint64]bool{31: true, 81: true, 106: true}[i] {
|
||||
allBut31And81And106 = append(allBut31And81And106, i)
|
||||
}
|
||||
if !map[uint64]bool{31: true, 106: true}[i] {
|
||||
allBut31And106 = append(allBut31And106, i)
|
||||
}
|
||||
|
||||
if i != 106 {
|
||||
allBut106 = append(allBut106, i)
|
||||
}
|
||||
}
|
||||
|
||||
expectedRequest := &p2ptypes.DataColumnsByRootIdentifiers{
|
||||
{
|
||||
BlockRoot: root7[:],
|
||||
Columns: allBut106,
|
||||
},
|
||||
{
|
||||
BlockRoot: root5[:],
|
||||
Columns: allBut31And106,
|
||||
},
|
||||
{
|
||||
BlockRoot: root6[:],
|
||||
Columns: allBut31And106,
|
||||
},
|
||||
}
|
||||
|
||||
actualRequest := new(p2ptypes.DataColumnsByRootIdentifiers)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, actualRequest)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars5[81].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for _, index := range allBut31And81And106 {
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars6[index].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
@@ -178,51 +250,39 @@ func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
NewVerifier: newDataColumnsVerifier,
|
||||
}
|
||||
|
||||
expected := map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn{
|
||||
expectedResult := map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn{
|
||||
root1: {verifiedSidecars1[31], verifiedSidecars1[81], verifiedSidecars1[106]},
|
||||
// no root2 (no commitments in this block)
|
||||
root3: {verifiedSidecars3[31], verifiedSidecars3[81], verifiedSidecars3[106]},
|
||||
root3: {verifiedSidecars3[106], verifiedSidecars3[31], verifiedSidecars3[81]},
|
||||
root4: {verifiedSidecars4[31], verifiedSidecars4[81], verifiedSidecars4[106]},
|
||||
root5: {verifiedSidecars5[31], verifiedSidecars5[81], verifiedSidecars5[106]},
|
||||
root5: {verifiedSidecars5[106], verifiedSidecars5[31], verifiedSidecars5[81]},
|
||||
root6: {verifiedSidecars6[31], verifiedSidecars6[81], verifiedSidecars6[106]},
|
||||
root7: {verifiedSidecars7[106]},
|
||||
}
|
||||
|
||||
blocks := []blocks.ROBlock{block1, block2, block3, block4, block5}
|
||||
actual, err := FetchDataColumnSidecars(params, blocks, indices)
|
||||
expectedMissingIndicesBYRoots := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
root7: {31: true, 81: true},
|
||||
}
|
||||
|
||||
blocks := []blocks.ROBlock{block1, block2, block3, block4, block5, block6, block7}
|
||||
actualResult, actualMissingRoots, err := FetchDataColumnSidecars(params, blocks, indices)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for root := range expected {
|
||||
require.Equal(t, len(expected[root]), len(actual[root]))
|
||||
for i := range expected[root] {
|
||||
require.DeepSSZEqual(t, expected[root][i], actual[root][i])
|
||||
require.Equal(t, len(expectedResult), len(actualResult))
|
||||
for root := range expectedResult {
|
||||
require.Equal(t, len(expectedResult[root]), len(actualResult[root]))
|
||||
for i := range expectedResult[root] {
|
||||
require.DeepSSZEqual(t, expectedResult[root][i], actualResult[root][i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCategorizeIndices(t *testing.T) {
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 14, Column: [][]byte{{1}, {2}, {3}}},
|
||||
})
|
||||
|
||||
err := storage.Save(verifiedRoSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedToQuery := map[uint64]bool{13: true}
|
||||
expectedStored := map[uint64]bool{12: true, 14: true}
|
||||
|
||||
actualToQuery, actualStored := categorizeIndices(storage, verifiedRoSidecars[0].BlockRoot(), []uint64{12, 13, 14})
|
||||
|
||||
require.Equal(t, len(expectedToQuery), len(actualToQuery))
|
||||
require.Equal(t, len(expectedStored), len(actualStored))
|
||||
|
||||
for index := range expectedToQuery {
|
||||
require.Equal(t, true, actualToQuery[index])
|
||||
}
|
||||
for index := range expectedStored {
|
||||
require.Equal(t, true, actualStored[index])
|
||||
require.Equal(t, len(expectedMissingIndicesBYRoots), len(actualMissingRoots))
|
||||
for root, expectedMissingIndices := range expectedMissingIndicesBYRoots {
|
||||
actualMissingIndices := actualMissingRoots[root]
|
||||
require.Equal(t, len(expectedMissingIndices), len(actualMissingIndices))
|
||||
for index := range expectedMissingIndices {
|
||||
require.Equal(t, true, actualMissingIndices[index])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -841,6 +901,7 @@ func TestComputeIndicesByRootByPeer(t *testing.T) {
|
||||
[fieldparams.RootLength]byte{3}: {38: true},
|
||||
},
|
||||
peerIDs[3]: {
|
||||
[fieldparams.RootLength]byte{2}: {10: true},
|
||||
[fieldparams.RootLength]byte{3}: {10: true},
|
||||
},
|
||||
}
|
||||
@@ -953,39 +1014,6 @@ func TestSlortedSliceFromMap(t *testing.T) {
|
||||
require.DeepEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestComputeSlotByBlockRoot(t *testing.T) {
|
||||
const (
|
||||
count = 3
|
||||
multiplier = 10
|
||||
)
|
||||
|
||||
roBlocks := make([]blocks.ROBlock, 0, count)
|
||||
for i := range count {
|
||||
signedBlock := util.NewBeaconBlock()
|
||||
signedBlock.Block.Slot = primitives.Slot(i).Mul(multiplier)
|
||||
roSignedBlock, err := blocks.NewSignedBeaconBlock(signedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, [fieldparams.RootLength]byte{byte(i)})
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlocks = append(roBlocks, roBlock)
|
||||
}
|
||||
|
||||
expected := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
[fieldparams.RootLength]byte{0}: primitives.Slot(0),
|
||||
[fieldparams.RootLength]byte{1}: primitives.Slot(10),
|
||||
[fieldparams.RootLength]byte{2}: primitives.Slot(20),
|
||||
}
|
||||
|
||||
actual := computeSlotByBlockRoot(roBlocks)
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for k, v := range expected {
|
||||
require.Equal(t, v, actual[k])
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeTotalCount(t *testing.T) {
|
||||
input := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
[fieldparams.RootLength]byte{1}: {1: true, 3: true},
|
||||
|
||||
@@ -34,7 +34,6 @@ go_library(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
@@ -395,6 +395,25 @@ func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []
|
||||
return blobsPid, errors.Wrap(err, "custody info")
|
||||
}
|
||||
|
||||
currentSlot := f.clock.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
roBlocks := make([]blocks.ROBlock, 0, len(postFulu))
|
||||
for _, blockWithSidecars := range postFulu {
|
||||
blockSlot := blockWithSidecars.Block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
if params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
roBlocks = append(roBlocks, blockWithSidecars.Block)
|
||||
}
|
||||
}
|
||||
|
||||
// Return early if there are no blocks that need data column sidecars.
|
||||
if len(roBlocks) == 0 {
|
||||
return blobsPid, nil
|
||||
}
|
||||
|
||||
// Some blocks neesd data column sidecars, fetch them.
|
||||
params := prysmsync.DataColumnSidecarsParams{
|
||||
Ctx: ctx,
|
||||
Tor: f.clock,
|
||||
@@ -405,16 +424,19 @@ func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []
|
||||
NewVerifier: f.cv,
|
||||
}
|
||||
|
||||
roBlocks := make([]blocks.ROBlock, 0, len(postFulu))
|
||||
for _, block := range postFulu {
|
||||
roBlocks = append(roBlocks, block.Block)
|
||||
}
|
||||
|
||||
verifiedRoDataColumnsByRoot, err := prysmsync.FetchDataColumnSidecars(params, roBlocks, info.CustodyColumns)
|
||||
verifiedRoDataColumnsByRoot, missingIndicesByRoot, err := prysmsync.FetchDataColumnSidecars(params, roBlocks, info.CustodyColumns)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "fetch data column sidecars")
|
||||
}
|
||||
|
||||
if len(missingIndicesByRoot) > 0 {
|
||||
prettyMissingIndicesByRoot := make(map[string][]uint64, len(missingIndicesByRoot))
|
||||
for root, indices := range missingIndicesByRoot {
|
||||
prettyMissingIndicesByRoot[fmt.Sprintf("%#x", root)] = sortedSliceFromMap(indices)
|
||||
}
|
||||
return "", errors.Errorf("some sidecars are still missing after fetch: %v", prettyMissingIndicesByRoot)
|
||||
}
|
||||
|
||||
// Populate the response.
|
||||
for i := range bwScs {
|
||||
bwSc := &bwScs[i]
|
||||
|
||||
@@ -1385,6 +1385,11 @@ func TestFetchSidecars(t *testing.T) {
|
||||
nower := func() time.Time { return now }
|
||||
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
|
||||
|
||||
// Create a data columns storage.
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Define a Deneb block with blobs out of retention period.
|
||||
denebBlock := util.NewBeaconBlockDeneb()
|
||||
denebBlock.Block.Slot = 0 // Genesis slot, out of retention period.
|
||||
@@ -1393,33 +1398,52 @@ func TestFetchSidecars(t *testing.T) {
|
||||
roDebebBlock, err := blocks.NewROBlock(signedDenebBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Define a Fulu block with blobs in the retention period.
|
||||
fuluBlock := util.NewBeaconBlockFulu()
|
||||
fuluBlock.Block.Slot = slotsPerEpoch // Within retention period.
|
||||
fuluBlock.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
|
||||
// Define a Fulu block with blobs before the retention period.
|
||||
fuluBlock1 := util.NewBeaconBlockFulu()
|
||||
fuluBlock1.Block.Slot = slotsPerEpoch.Sub(1) // Before the retention period.
|
||||
fuluBlock1.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||
signedFuluBlock1, err := blocks.NewSignedBeaconBlock(fuluBlock1)
|
||||
require.NoError(t, err)
|
||||
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
|
||||
roFuluBlock1, err := blocks.NewROBlock(signedFuluBlock1)
|
||||
require.NoError(t, err)
|
||||
|
||||
bodyRoot, err := fuluBlock.Block.Body.HashTreeRoot()
|
||||
bodyRootFulu1, err := fuluBlock1.Block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and save data column sidecars for this fulu block in the database.
|
||||
params := make([]util.DataColumnParam, 0, numberOfColumns)
|
||||
// Create and save data column sidecars for fulu block 2 in the database.
|
||||
paramsFulu1 := make([]util.DataColumnParam, 0, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
param := util.DataColumnParam{Index: i, Slot: slotsPerEpoch, BodyRoot: bodyRoot[:]}
|
||||
params = append(params, param)
|
||||
param := util.DataColumnParam{Index: i, Slot: slotsPerEpoch, BodyRoot: bodyRootFulu1[:]}
|
||||
paramsFulu1 = append(paramsFulu1, param)
|
||||
}
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||
_, verifiedRoDataColumnSidecarsFulu1 := util.CreateTestVerifiedRoDataColumnSidecars(t, paramsFulu1)
|
||||
|
||||
// Create a data columns storage.
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||
// Save the data column sidecars for block fulu 1 to the storage.
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecarsFulu1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save the data column sidecars to the storage.
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
// Define a Fulu block with blobs in the retention period.
|
||||
fuluBlock2 := util.NewBeaconBlockFulu()
|
||||
fuluBlock2.Block.Slot = slotsPerEpoch // Within retention period.
|
||||
fuluBlock2.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||
signedFuluBlock2, err := blocks.NewSignedBeaconBlock(fuluBlock2)
|
||||
require.NoError(t, err)
|
||||
roFuluBlock2, err := blocks.NewROBlock(signedFuluBlock2)
|
||||
require.NoError(t, err)
|
||||
|
||||
bodyRootFulu2, err := fuluBlock2.Block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and save data column sidecars for fulu block 2 in the database.
|
||||
paramsFulu2 := make([]util.DataColumnParam, 0, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
param := util.DataColumnParam{Index: i, Slot: slotsPerEpoch, BodyRoot: bodyRootFulu2[:]}
|
||||
paramsFulu2 = append(paramsFulu2, param)
|
||||
}
|
||||
_, verifiedRoDataColumnSidecarsFulu2 := util.CreateTestVerifiedRoDataColumnSidecars(t, paramsFulu2)
|
||||
|
||||
// Save the data column sidecars for block fulu 2 to the storage.
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecarsFulu2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a blocks fetcher.
|
||||
@@ -1432,7 +1456,8 @@ func TestFetchSidecars(t *testing.T) {
|
||||
// Fetch sidecars.
|
||||
blocksWithSidecars := []blocks.BlockWithROSidecars{
|
||||
{Block: roDebebBlock},
|
||||
{Block: roFuluBlock},
|
||||
{Block: roFuluBlock1},
|
||||
{Block: roFuluBlock2},
|
||||
}
|
||||
pid, err := fetcher.fetchSidecars(ctx, "", nil, blocksWithSidecars)
|
||||
require.NoError(t, err)
|
||||
@@ -1442,12 +1467,15 @@ func TestFetchSidecars(t *testing.T) {
|
||||
require.Equal(t, 0, len(blocksWithSidecars[0].Blobs))
|
||||
require.Equal(t, 0, len(blocksWithSidecars[0].Columns))
|
||||
require.Equal(t, 0, len(blocksWithSidecars[1].Blobs))
|
||||
require.Equal(t, 0, len(blocksWithSidecars[1].Columns))
|
||||
require.Equal(t, 0, len(blocksWithSidecars[2].Blobs))
|
||||
|
||||
// We don't check the content of the columns here. The extensive test is done
|
||||
// in TestFetchDataColumnsSidecars.
|
||||
require.Equal(t, samplesPerSlot, uint64(len(blocksWithSidecars[1].Columns)))
|
||||
require.Equal(t, samplesPerSlot, uint64(len(blocksWithSidecars[2].Columns)))
|
||||
})
|
||||
}
|
||||
|
||||
func TestFirstFuluIndex(t *testing.T) {
|
||||
bellatrix := util.NewBeaconBlockBellatrix()
|
||||
signedBellatrix, err := blocks.NewSignedBeaconBlock(bellatrix)
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
@@ -237,14 +236,14 @@ func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||
blockVersion := roBlock.Version()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.fetchOriginColumns(roBlock, delay); err != nil {
|
||||
if err := s.fetchOriginDataColumnSidecars(roBlock, delay); err != nil {
|
||||
return errors.Wrap(err, "fetch origin columns")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := s.fetchOriginBlobs(peers, roBlock); err != nil {
|
||||
if err := s.fetchOriginBlobSidecars(peers, roBlock); err != nil {
|
||||
return errors.Wrap(err, "fetch origin blobs")
|
||||
}
|
||||
}
|
||||
@@ -356,7 +355,7 @@ func missingBlobRequest(blk blocks.ROBlock, store *filesystem.BlobStorage) (p2pt
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginBlobs(pids []peer.ID, rob blocks.ROBlock) error {
|
||||
func (s *Service) fetchOriginBlobSidecars(pids []peer.ID, rob blocks.ROBlock) error {
|
||||
r := rob.Root()
|
||||
|
||||
req, err := missingBlobRequest(rob, s.cfg.BlobStorage)
|
||||
@@ -394,11 +393,12 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID, rob blocks.ROBlock) error {
|
||||
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration) error {
|
||||
func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock, delay time.Duration) error {
|
||||
const (
|
||||
errorMessage = "Failed to fetch origin data column sidecars"
|
||||
warningIteration = 10
|
||||
)
|
||||
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early if the origin block has no blob commitments.
|
||||
@@ -411,7 +411,7 @@ func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the columns to request.
|
||||
// Compute the indices we need to custody.
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
@@ -423,9 +423,30 @@ func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration
|
||||
return errors.Wrap(err, "fetch peer info")
|
||||
}
|
||||
|
||||
// Fetch origin data column sidecars.
|
||||
root := roBlock.Root()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", roBlock.Root()),
|
||||
"blobCount": len(commitments),
|
||||
"dataColumnCount": len(info.CustodyColumns),
|
||||
})
|
||||
|
||||
// Check if some needed data column sidecars are missing.
|
||||
stored := s.cfg.DataColumnStorage.Summary(root).Stored()
|
||||
missing := make(map[uint64]bool, len(info.CustodyColumns))
|
||||
for column := range info.CustodyColumns {
|
||||
if !stored[column] {
|
||||
missing[column] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(missing) == 0 {
|
||||
// All needed data column sidecars are present, exit early.
|
||||
log.Info("All needed origin data column sidecars are already present")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
params := sync.DataColumnSidecarsParams{
|
||||
Ctx: s.ctx,
|
||||
Tor: s.clock,
|
||||
@@ -436,46 +457,39 @@ func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration
|
||||
DownscorePeerOnRPCFault: true,
|
||||
}
|
||||
|
||||
var verifiedRoDataColumnsByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn
|
||||
for attempt := uint64(0); ; attempt++ {
|
||||
verifiedRoDataColumnsByRoot, err = sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, info.CustodyColumns)
|
||||
if err == nil {
|
||||
break
|
||||
// Retrieve missing data column sidecars.
|
||||
verifiedRoSidecarsByRoot, missingIndicesByRoot, err := sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, missing)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch data column sidecars")
|
||||
}
|
||||
|
||||
log := log.WithError(err).WithFields(logrus.Fields{
|
||||
"attempt": attempt,
|
||||
"delay": delay,
|
||||
// Save retrieved data column sidecars.
|
||||
if err := s.cfg.DataColumnStorage.Save(verifiedRoSidecarsByRoot[root]); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
// Check if some needed data column sidecars are missing.
|
||||
if len(missingIndicesByRoot) == 0 {
|
||||
log.Info("Retrieved all needed origin data column sidecars")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Some sidecars are still missing.
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"attempt": attempt,
|
||||
"missingIndices": sortedSliceFromMap(missingIndicesByRoot[root]),
|
||||
"delay": delay,
|
||||
})
|
||||
|
||||
if attempt%warningIteration == 0 && attempt > 0 {
|
||||
log.Warning(errorMessage)
|
||||
time.Sleep(delay)
|
||||
|
||||
continue
|
||||
logFunc := log.Debug
|
||||
if attempt > 0 && attempt%warningIteration == 0 {
|
||||
logFunc = log.Warning
|
||||
}
|
||||
|
||||
log.Debug(errorMessage)
|
||||
time.Sleep(delay)
|
||||
logFunc("Failed to fetch some origin data column sidecars, retrying later")
|
||||
}
|
||||
|
||||
// Save origin data columns to disk.
|
||||
verifiedRoDataColumnsSidecars, ok := verifiedRoDataColumnsByRoot[root]
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot extract origins data column sidecars for block root %#x - should never happen", root)
|
||||
}
|
||||
|
||||
if err := s.cfg.DataColumnStorage.Save(verifiedRoDataColumnsSidecars); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", roBlock.Root()),
|
||||
"blobCount": len(commitments),
|
||||
"columnCount": len(verifiedRoDataColumnsSidecars),
|
||||
}).Info("Successfully downloaded data column sidecars for checkpoint sync block")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func shufflePeers(pids []peer.ID) {
|
||||
|
||||
@@ -699,7 +699,7 @@ func TestFetchOriginColumns(t *testing.T) {
|
||||
roBlock, err := blocks.NewROBlock(signedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fetchOriginColumns(roBlock, delay)
|
||||
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -721,7 +721,7 @@ func TestFetchOriginColumns(t *testing.T) {
|
||||
err := storage.Save(verifiedSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fetchOriginColumns(roBlock, delay)
|
||||
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -747,10 +747,35 @@ func TestFetchOriginColumns(t *testing.T) {
|
||||
other.ENR().Set(peerdas.Cgc(numberOfCustodyGroups))
|
||||
p2p.Peers().UpdateENR(other.ENR(), other.PeerID())
|
||||
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 0,
|
||||
Count: 1,
|
||||
Columns: []uint64{1, 17, 19, 42, 75, 87, 102, 117},
|
||||
allBut42 := make([]uint64, 0, numberOfCustodyGroups-1)
|
||||
for i := range numberOfCustodyGroups {
|
||||
if i != 42 {
|
||||
allBut42 = append(allBut42, i)
|
||||
}
|
||||
}
|
||||
|
||||
expectedRequests := []*ethpb.DataColumnSidecarsByRangeRequest{
|
||||
{
|
||||
StartSlot: 0,
|
||||
Count: 1,
|
||||
Columns: []uint64{1, 17, 19, 42, 75, 87, 102, 117},
|
||||
},
|
||||
{
|
||||
StartSlot: 0,
|
||||
Count: 1,
|
||||
Columns: allBut42,
|
||||
},
|
||||
{
|
||||
StartSlot: 0,
|
||||
Count: 1,
|
||||
Columns: []uint64{1, 17, 19, 75, 87, 102, 117},
|
||||
},
|
||||
}
|
||||
|
||||
toRespondByAttempt := [][]uint64{
|
||||
{42},
|
||||
{},
|
||||
{1, 17, 19, 75, 87, 102, 117},
|
||||
}
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
@@ -783,36 +808,33 @@ func TestFetchOriginColumns(t *testing.T) {
|
||||
}
|
||||
|
||||
// Do not respond any sidecar on the first attempt, and respond everything requested on the second one.
|
||||
firstAttempt := true
|
||||
attempt := 0
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
actualRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, actualRequest)
|
||||
assert.DeepEqual(t, expectedRequests[attempt], actualRequest)
|
||||
|
||||
if firstAttempt {
|
||||
firstAttempt = false
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, column := range actualRequest.Columns {
|
||||
for _, column := range toRespondByAttempt[attempt] {
|
||||
err = prysmSync.WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedRoSidecars[column].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
|
||||
attempt++
|
||||
})
|
||||
|
||||
err = service.fetchOriginColumns(roBlock, delay)
|
||||
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check all corresponding sidecars are saved in the store.
|
||||
summary := storage.Summary(roBlock.Root())
|
||||
for _, index := range expectedRequest.Columns {
|
||||
require.Equal(t, true, summary.HasIndex(index))
|
||||
for _, indices := range toRespondByAttempt {
|
||||
for _, index := range indices {
|
||||
require.Equal(t, true, summary.HasIndex(index))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -115,11 +115,19 @@ func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock)
|
||||
NewVerifier: s.newColumnsVerifier,
|
||||
}
|
||||
|
||||
sidecarsByRoot, err := FetchDataColumnSidecars(params, blks, info.CustodyColumns)
|
||||
sidecarsByRoot, missingIndicesByRoot, err := FetchDataColumnSidecars(params, blks, info.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch data column sidecars")
|
||||
}
|
||||
|
||||
if len(missingIndicesByRoot) > 0 {
|
||||
prettyMissingIndicesByRoot := make(map[string][]uint64, len(missingIndicesByRoot))
|
||||
for root, indices := range missingIndicesByRoot {
|
||||
prettyMissingIndicesByRoot[fmt.Sprintf("%#x", root)] = sortedSliceFromMap(indices)
|
||||
}
|
||||
return errors.Errorf("some sidecars are still missing after fetch: %v", prettyMissingIndicesByRoot)
|
||||
}
|
||||
|
||||
// Save the sidecars to the storage.
|
||||
count := 0
|
||||
for _, sidecars := range sidecarsByRoot {
|
||||
|
||||
10
changelog/manu-peerdas-syncing-disjoint-network.md
Normal file
10
changelog/manu-peerdas-syncing-disjoint-network.md
Normal file
@@ -0,0 +1,10 @@
|
||||
### Changed
|
||||
- Filtering peers for data column subnets: Added a one-epoch slack to the peer’s head slot view.
|
||||
- Fetching data column sidecars: If not all requested sidecars are available for a given root, return the successfully retrieved ones along with a map indicating which could not be fetched.
|
||||
- Fetching origin data column sidecars: If only some sidecars are fetched, save the retrieved ones and retry fetching the missing ones on the next attempt.
|
||||
|
||||
### Added
|
||||
- Implemented syncing in a disjoint network with respect to data column sidecars subscribed by peers.
|
||||
|
||||
## Fixed
|
||||
- Initial sync: Do not request data column sidecars for blocks before the retention period.
|
||||
Reference in New Issue
Block a user