PeerDAS: Implement syncing in a disjoint network (Also know as "perfect PeerDAS" network). (#15644)

* `computeIndicesByRootByPeer`: Add 1 slack epoch regarding peer head slot.

* `FetchDataColumnSidecars`: Switch mode.

Before this commit, this function returned on error as long as at least ONE requested sidecar was not retrieved.

Now, this function retrieves what it can (best effort mode) and returns an additional value which is the map of missing sidecars after running this function.

It is now the role of the caller to check this extra returned value and decide what to do in case some requested sidecars are still missing.

* `fetchOriginDataColumnSidecars`: Optimize

Before this commit, when running `fetchOriginDataColumnSidecars`, all the missing sidecars had to been retrieved in a single shot for the sidecars to be considered as available. The issue was, if for example `sync.FetchDataColumnSidecars` returned all but one sidecar, the returned sidecars were NOT saved, and on the next iteration, all the previously fetched sidecars had to be requested again (from peers.)

After this commit, we greedily save all fetched sidecars, solving this issue.

* Initial sync: Do not fetch data column sidecars before the retention period.

* Implement perfect peerdas syncing.

* Add changelog.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

* Fix James' comment.

* Update beacon-chain/sync/data_column_sidecars.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* Update beacon-chain/sync/data_column_sidecars.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* Update beacon-chain/sync/data_column_sidecars.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

* Update after Potuz's comment.

* Fix Potuz's commit.

* Fix James' comment.

---------

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
This commit is contained in:
Manu NALEPA
2025-09-15 21:21:49 +02:00
committed by GitHub
parent 76bc30e8ba
commit 2292d955a3
11 changed files with 866 additions and 460 deletions

View File

@@ -699,7 +699,7 @@ func TestFetchOriginColumns(t *testing.T) {
roBlock, err := blocks.NewROBlock(signedBlock)
require.NoError(t, err)
err = service.fetchOriginColumns(roBlock, delay)
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
require.NoError(t, err)
})
@@ -721,7 +721,7 @@ func TestFetchOriginColumns(t *testing.T) {
err := storage.Save(verifiedSidecars)
require.NoError(t, err)
err = service.fetchOriginColumns(roBlock, delay)
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
require.NoError(t, err)
})
@@ -747,10 +747,35 @@ func TestFetchOriginColumns(t *testing.T) {
other.ENR().Set(peerdas.Cgc(numberOfCustodyGroups))
p2p.Peers().UpdateENR(other.ENR(), other.PeerID())
expectedRequest := &ethpb.DataColumnSidecarsByRangeRequest{
StartSlot: 0,
Count: 1,
Columns: []uint64{1, 17, 19, 42, 75, 87, 102, 117},
allBut42 := make([]uint64, 0, numberOfCustodyGroups-1)
for i := range numberOfCustodyGroups {
if i != 42 {
allBut42 = append(allBut42, i)
}
}
expectedRequests := []*ethpb.DataColumnSidecarsByRangeRequest{
{
StartSlot: 0,
Count: 1,
Columns: []uint64{1, 17, 19, 42, 75, 87, 102, 117},
},
{
StartSlot: 0,
Count: 1,
Columns: allBut42,
},
{
StartSlot: 0,
Count: 1,
Columns: []uint64{1, 17, 19, 75, 87, 102, 117},
},
}
toRespondByAttempt := [][]uint64{
{42},
{},
{1, 17, 19, 75, 87, 102, 117},
}
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
@@ -783,36 +808,33 @@ func TestFetchOriginColumns(t *testing.T) {
}
// Do not respond any sidecar on the first attempt, and respond everything requested on the second one.
firstAttempt := true
attempt := 0
other.SetStreamHandler(protocol, func(stream network.Stream) {
actualRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
assert.NoError(t, err)
assert.DeepEqual(t, expectedRequest, actualRequest)
assert.DeepEqual(t, expectedRequests[attempt], actualRequest)
if firstAttempt {
firstAttempt = false
err = stream.CloseWrite()
assert.NoError(t, err)
return
}
for _, column := range actualRequest.Columns {
for _, column := range toRespondByAttempt[attempt] {
err = prysmSync.WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedRoSidecars[column].DataColumnSidecar)
assert.NoError(t, err)
}
err = stream.CloseWrite()
assert.NoError(t, err)
attempt++
})
err = service.fetchOriginColumns(roBlock, delay)
err = service.fetchOriginDataColumnSidecars(roBlock, delay)
require.NoError(t, err)
// Check all corresponding sidecars are saved in the store.
summary := storage.Summary(roBlock.Root())
for _, index := range expectedRequest.Columns {
require.Equal(t, true, summary.HasIndex(index))
for _, indices := range toRespondByAttempt {
for _, index := range indices {
require.Equal(t, true, summary.HasIndex(index))
}
}
})
}