mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-08 23:18:15 -05:00
sync blobs in initial-sync (#12522)
This commit is contained in:
@@ -3,6 +3,7 @@ package initialsync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -23,6 +24,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
@@ -273,6 +275,9 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
gt := time.Now()
|
||||
vr := [32]byte{}
|
||||
clock := startup.NewClock(gt, vr)
|
||||
mc := &mock.ChainService{
|
||||
State: st,
|
||||
Root: genesisRoot[:],
|
||||
@@ -288,6 +293,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p,
|
||||
clock: clock,
|
||||
})
|
||||
require.NoError(t, fetcher.start())
|
||||
|
||||
@@ -299,9 +305,9 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
fetcher.stop()
|
||||
}()
|
||||
|
||||
processFetchedBlocks := func() ([]interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
processFetchedBlocks := func() ([]blocks.BlockWithVerifiedBlobs, error) {
|
||||
defer cancel()
|
||||
var unionRespBlocks []interfaces.ReadOnlySignedBeaconBlock
|
||||
var unionRespBlocks []blocks.BlockWithVerifiedBlobs
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -313,8 +319,8 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
if resp.err != nil {
|
||||
log.WithError(resp.err).Debug("Block fetcher returned error")
|
||||
} else {
|
||||
unionRespBlocks = append(unionRespBlocks, resp.blocks...)
|
||||
if len(resp.blocks) == 0 {
|
||||
unionRespBlocks = append(unionRespBlocks, resp.bwb...)
|
||||
if len(resp.bwb) == 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"start": resp.start,
|
||||
"count": resp.count,
|
||||
@@ -337,30 +343,27 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
maxExpectedBlocks += requestParams.count
|
||||
}
|
||||
|
||||
blocks, err := processFetchedBlocks()
|
||||
bwb, err := processFetchedBlocks()
|
||||
assert.NoError(t, err)
|
||||
|
||||
sort.Slice(blocks, func(i, j int) bool {
|
||||
return blocks[i].Block().Slot() < blocks[j].Block().Slot()
|
||||
})
|
||||
|
||||
ss := make([]primitives.Slot, len(blocks))
|
||||
for i, block := range blocks {
|
||||
ss[i] = block.Block().Slot()
|
||||
sort.Sort(blocks.BlockWithVerifiedBlobsSlice(bwb))
|
||||
ss := make([]primitives.Slot, len(bwb))
|
||||
for i, b := range bwb {
|
||||
ss[i] = b.Block.Block().Slot()
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blocksLen": len(blocks),
|
||||
"blocksLen": len(bwb),
|
||||
"slots": ss,
|
||||
}).Debug("Finished block fetching")
|
||||
|
||||
if len(blocks) > int(maxExpectedBlocks) {
|
||||
t.Errorf("Too many blocks returned. Wanted %d got %d", maxExpectedBlocks, len(blocks))
|
||||
if len(bwb) > int(maxExpectedBlocks) {
|
||||
t.Errorf("Too many blocks returned. Wanted %d got %d", maxExpectedBlocks, len(bwb))
|
||||
}
|
||||
assert.Equal(t, len(tt.expectedBlockSlots), len(blocks), "Processes wrong number of blocks")
|
||||
assert.Equal(t, len(tt.expectedBlockSlots), len(bwb), "Processes wrong number of blocks")
|
||||
var receivedBlockSlots []primitives.Slot
|
||||
for _, blk := range blocks {
|
||||
receivedBlockSlots = append(receivedBlockSlots, blk.Block().Slot())
|
||||
for _, b := range bwb {
|
||||
receivedBlockSlots = append(receivedBlockSlots, b.Block.Block().Slot())
|
||||
}
|
||||
missing := slice.NotSlot(slice.IntersectionSlot(tt.expectedBlockSlots, receivedBlockSlots), tt.expectedBlockSlots)
|
||||
if len(missing) > 0 {
|
||||
@@ -417,11 +420,13 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
mc, p2p, _ := initializeTestServices(t, chainConfig.expectedBlockSlots, chainConfig.peers)
|
||||
mc.ValidatorsRoot = [32]byte{}
|
||||
mc.Genesis = time.Now()
|
||||
|
||||
t.Run("context cancellation", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
})
|
||||
|
||||
cancel()
|
||||
@@ -435,6 +440,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
clock: startup.NewClock(mc.Genesis, mc.ValidatorsRoot),
|
||||
})
|
||||
|
||||
requestCtx, reqCancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
@@ -447,7 +453,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
var blocks []interfaces.ReadOnlySignedBeaconBlock
|
||||
var bwb []blocks.BlockWithVerifiedBlobs
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Error(ctx.Err())
|
||||
@@ -455,16 +461,16 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
if resp.err != nil {
|
||||
t.Error(resp.err)
|
||||
} else {
|
||||
blocks = resp.blocks
|
||||
bwb = resp.bwb
|
||||
}
|
||||
}
|
||||
if uint64(len(blocks)) != uint64(blockBatchLimit) {
|
||||
t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchLimit, len(blocks))
|
||||
if uint64(len(bwb)) != uint64(blockBatchLimit) {
|
||||
t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchLimit, len(bwb))
|
||||
}
|
||||
|
||||
var receivedBlockSlots []primitives.Slot
|
||||
for _, blk := range blocks {
|
||||
receivedBlockSlots = append(receivedBlockSlots, blk.Block().Slot())
|
||||
for _, b := range bwb {
|
||||
receivedBlockSlots = append(receivedBlockSlots, b.Block.Block().Slot())
|
||||
}
|
||||
missing := slice.NotSlot(slice.IntersectionSlot(chainConfig.expectedBlockSlots, receivedBlockSlots), chainConfig.expectedBlockSlots)
|
||||
if len(missing) > 0 {
|
||||
@@ -952,3 +958,171 @@ func TestTimeToWait(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortBlobs(t *testing.T) {
|
||||
_, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
||||
shuffled := make([]*ethpb.BlobSidecar, len(blobs))
|
||||
for i := range blobs {
|
||||
shuffled[i] = blobs[i]
|
||||
}
|
||||
rand.Shuffle(len(shuffled), func(i, j int) {
|
||||
shuffled[i], shuffled[j] = shuffled[j], shuffled[i]
|
||||
})
|
||||
sorted := sortBlobs(shuffled)
|
||||
require.Equal(t, len(sorted), len(shuffled))
|
||||
for i := range blobs {
|
||||
expect := blobs[i]
|
||||
actual := sorted[i]
|
||||
require.Equal(t, expect.Slot, actual.Slot)
|
||||
require.Equal(t, expect.Index, actual.Index)
|
||||
require.Equal(t, bytesutil.ToBytes48(expect.KzgCommitment), bytesutil.ToBytes48(actual.KzgCommitment))
|
||||
require.Equal(t, bytesutil.ToBytes32(expect.BlockRoot), bytesutil.ToBytes32(actual.BlockRoot))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLowestSlotNeedsBlob(t *testing.T) {
|
||||
blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
for i := range blks {
|
||||
sbbs[i] = blks[i]
|
||||
}
|
||||
retentionStart := primitives.Slot(5)
|
||||
bwb, err := sortedBlockWithVerifiedBlobSlice(sbbs)
|
||||
require.NoError(t, err)
|
||||
lowest := lowestSlotNeedsBlob(retentionStart, bwb)
|
||||
require.Equal(t, retentionStart, *lowest)
|
||||
higher := primitives.Slot(len(blks) + 1)
|
||||
lowest = lowestSlotNeedsBlob(higher, bwb)
|
||||
var nilSlot *primitives.Slot
|
||||
require.Equal(t, nilSlot, lowest)
|
||||
|
||||
blks, _ = util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
||||
sbbs = make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
for i := range blks {
|
||||
sbbs[i] = blks[i]
|
||||
}
|
||||
bwb, err = sortedBlockWithVerifiedBlobSlice(sbbs)
|
||||
require.NoError(t, err)
|
||||
retentionStart = bwb[5].Block.Block().Slot()
|
||||
next := bwb[6].Block.Block().Slot()
|
||||
skip := bwb[5].Block.Block()
|
||||
bwb[5].Block, _ = util.GenerateTestDenebBlockWithSidecar(t, skip.ParentRoot(), skip.Slot(), 0)
|
||||
lowest = lowestSlotNeedsBlob(retentionStart, bwb)
|
||||
require.Equal(t, next, *lowest)
|
||||
}
|
||||
|
||||
func TestBlobRequest(t *testing.T) {
|
||||
var nilReq *ethpb.BlobSidecarsByRangeRequest
|
||||
// no blocks
|
||||
req := blobRequest([]blocks.BlockWithVerifiedBlobs{}, 0)
|
||||
require.Equal(t, nilReq, req)
|
||||
blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
for i := range blks {
|
||||
sbbs[i] = blks[i]
|
||||
}
|
||||
bwb, err := sortedBlockWithVerifiedBlobSlice(sbbs)
|
||||
require.NoError(t, err)
|
||||
maxBlkSlot := primitives.Slot(len(blks) - 1)
|
||||
|
||||
tooHigh := primitives.Slot(len(blks) + 1)
|
||||
req = blobRequest(bwb, tooHigh)
|
||||
require.Equal(t, nilReq, req)
|
||||
|
||||
req = blobRequest(bwb, maxBlkSlot)
|
||||
require.Equal(t, uint64(1), req.Count)
|
||||
require.Equal(t, maxBlkSlot, req.StartSlot)
|
||||
|
||||
halfway := primitives.Slot(5)
|
||||
req = blobRequest(bwb, halfway)
|
||||
require.Equal(t, halfway, req.StartSlot)
|
||||
// adding 1 to include the halfway slot itself
|
||||
require.Equal(t, uint64(1+maxBlkSlot-halfway), req.Count)
|
||||
|
||||
before := bwb[0].Block.Block().Slot()
|
||||
allAfter := bwb[1:]
|
||||
req = blobRequest(allAfter, before)
|
||||
require.Equal(t, allAfter[0].Block.Block().Slot(), req.StartSlot)
|
||||
require.Equal(t, len(allAfter), int(req.Count))
|
||||
}
|
||||
|
||||
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithVerifiedBlobs, []*ethpb.BlobSidecar) {
|
||||
blks, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, nblocks)
|
||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
for i := range blks {
|
||||
sbbs[i] = blks[i]
|
||||
}
|
||||
bwb, err := sortedBlockWithVerifiedBlobSlice(sbbs)
|
||||
require.NoError(t, err)
|
||||
return bwb, blobs
|
||||
}
|
||||
|
||||
func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
bwb, blobs := testSequenceBlockWithBlob(t, 10)
|
||||
lastBlobIdx := len(blobs) - 1
|
||||
// Blocks are all before the retention window, blobs argument is ignored.
|
||||
windowAfter := bwb[len(bwb)-1].Block.Block().Slot() + 1
|
||||
_, err := verifyAndPopulateBlobs(bwb, nil, windowAfter)
|
||||
require.NoError(t, err)
|
||||
|
||||
firstBlockSlot := bwb[0].Block.Block().Slot()
|
||||
// slice off blobs for the last block so we hit the out of bounds / blob exhaustion check.
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs[0:len(blobs)-6], firstBlockSlot)
|
||||
require.ErrorIs(t, err, errMissingBlobsForBlockCommitments)
|
||||
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
// Misalign the slots of the blobs for the first block to simulate them being missing from the response.
|
||||
offByOne := blobs[0].Slot
|
||||
for i := range blobs {
|
||||
if blobs[i].Slot == offByOne {
|
||||
blobs[i].Slot = offByOne + 1
|
||||
}
|
||||
}
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, errMissingBlobsForBlockCommitments)
|
||||
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
blobs[lastBlobIdx].BlockRoot = blobs[0].BlockRoot
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, errMismatchedBlobBlockRoot)
|
||||
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
blobs[lastBlobIdx].Index = 100
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, errMissingBlobIndex)
|
||||
|
||||
var emptyKzg [48]byte
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
blobs[lastBlobIdx].KzgCommitment = emptyKzg[:]
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, errMismatchedBlobCommitments)
|
||||
|
||||
// happy path
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
|
||||
expectedCommits := make(map[[48]byte]bool)
|
||||
for _, bl := range blobs {
|
||||
expectedCommits[bytesutil.ToBytes48(bl.KzgCommitment)] = true
|
||||
}
|
||||
// The assertions using this map expect all commitments to be unique, so make sure that stays true.
|
||||
require.Equal(t, len(blobs), len(expectedCommits))
|
||||
|
||||
bwb, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.NoError(t, err)
|
||||
for _, bw := range bwb {
|
||||
commits, err := bw.Block.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(commits), len(bw.Blobs))
|
||||
for i := range commits {
|
||||
bc := bytesutil.ToBytes48(commits[i])
|
||||
require.Equal(t, bc, bytesutil.ToBytes48(bw.Blobs[i].KzgCommitment))
|
||||
// Since we delete entries we've seen, duplicates will cause an error here.
|
||||
_, ok := expectedCommits[bc]
|
||||
// Make sure this was an expected delete, then delete it from the map so we can make sure we saw all of them.
|
||||
require.Equal(t, true, ok)
|
||||
delete(expectedCommits, bc)
|
||||
}
|
||||
}
|
||||
// We delete each entry we've seen, so if we see all expected commits, the map should be empty at the end.
|
||||
require.Equal(t, 0, len(expectedCommits))
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user