mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-08 04:54:05 -05:00
Use NetworkSchedule config to determine max blobs at epoch (#15714)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
This commit is contained in:
@@ -59,6 +59,7 @@ go_test(
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -170,8 +171,11 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
|
||||
func TestClient_GetHeader(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
var slot primitives.Slot = 23
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
expectedPath := "/eth/v1/builder/header/%d/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
expectedPath = fmt.Sprintf(expectedPath, ds)
|
||||
var slot primitives.Slot = ds
|
||||
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
t.Run("server error", func(t *testing.T) {
|
||||
@@ -533,7 +537,7 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
epr := &ExecHeaderResponseElectra{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseElectra), epr))
|
||||
pro, err := epr.ToProto(100)
|
||||
pro, err := epr.ToProto(es)
|
||||
require.NoError(t, err)
|
||||
ssz, err := pro.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -2413,6 +2413,8 @@ func driftGenesisTime(s *Service, slot primitives.Slot, delay time.Duration) {
|
||||
}
|
||||
|
||||
func TestMissingBlobIndices(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
maxBlobs := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
cases := []struct {
|
||||
name string
|
||||
expected [][]byte
|
||||
@@ -2426,23 +2428,23 @@ func TestMissingBlobIndices(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "expected exceeds max",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0) + 1),
|
||||
expected: fakeCommitments(maxBlobs + 1),
|
||||
err: errMaxBlobsExceeded,
|
||||
},
|
||||
{
|
||||
name: "first missing",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
expected: fakeCommitments(maxBlobs),
|
||||
present: []uint64{1, 2, 3, 4, 5},
|
||||
result: fakeResult([]uint64{0}),
|
||||
},
|
||||
{
|
||||
name: "all missing",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
expected: fakeCommitments(maxBlobs),
|
||||
result: fakeResult([]uint64{0, 1, 2, 3, 4, 5}),
|
||||
},
|
||||
{
|
||||
name: "none missing",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
expected: fakeCommitments(maxBlobs),
|
||||
present: []uint64{0, 1, 2, 3, 4, 5},
|
||||
result: fakeResult([]uint64{}),
|
||||
},
|
||||
@@ -2475,8 +2477,8 @@ func TestMissingBlobIndices(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, 0, c.present...))
|
||||
missing, err := missingBlobIndices(bs, c.root, c.expected, 0)
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, ds, c.present...))
|
||||
missing, err := missingBlobIndices(bs, c.root, c.expected, ds)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
@@ -2904,22 +2906,21 @@ type testIsAvailableParams struct {
|
||||
columnsToSave []uint64
|
||||
}
|
||||
|
||||
func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
|
||||
func testIsAvailableSetup(t *testing.T, p testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
options := append(params.options, WithDataColumnStorage(dataColumnStorage))
|
||||
options := append(p.options, WithDataColumnStorage(dataColumnStorage))
|
||||
service, _ := minimalTestService(t, options...)
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
|
||||
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32 /*validator count*/)
|
||||
|
||||
err := service.saveGenesisData(ctx, genesisState)
|
||||
require.NoError(t, err)
|
||||
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32, util.WithElectraStateSlot(fs))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
conf := util.DefaultBlockGenConfig()
|
||||
conf.NumBlobKzgCommitments = params.blobKzgCommitmentsCount
|
||||
conf.NumBlobKzgCommitments = p.blobKzgCommitmentsCount
|
||||
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, 10 /*block slot*/)
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, fs+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := signedBeaconBlock.Block
|
||||
@@ -2929,8 +2930,8 @@ func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.C
|
||||
root, err := block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(params.columnsToSave))
|
||||
for _, i := range params.columnsToSave {
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(p.columnsToSave))
|
||||
for _, i := range p.columnsToSave {
|
||||
dataColumnParam := util.DataColumnParam{
|
||||
Index: i,
|
||||
Slot: block.Slot,
|
||||
@@ -2954,8 +2955,12 @@ func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.C
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch, cfg.BellatrixForkEpoch, cfg.CapellaForkEpoch, cfg.DenebForkEpoch, cfg.ElectraForkEpoch, cfg.FuluForkEpoch = 0, 0, 0, 0, 0, 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
t.Run("Fulu - out of retention window", func(t *testing.T) {
|
||||
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
|
||||
params := testIsAvailableParams{}
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
@@ -2972,7 +2977,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
|
||||
@@ -562,8 +562,9 @@ func TestNotifyIndex(t *testing.T) {
|
||||
var root [32]byte
|
||||
copy(root[:], "exampleRoot")
|
||||
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
// Test notifying a new index
|
||||
bn.notifyIndex(root, 1, 1)
|
||||
bn.notifyIndex(root, 1, ds)
|
||||
if !bn.seenIndex[root][1] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
@@ -580,7 +581,7 @@ func TestNotifyIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test notifying a new index again
|
||||
bn.notifyIndex(root, 2, 1)
|
||||
bn.notifyIndex(root, 2, ds)
|
||||
if !bn.seenIndex[root][2] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
|
||||
@@ -125,11 +125,12 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReconstructBlobs(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
|
||||
require.NoError(t, kzg.Start())
|
||||
var emptyBlock blocks.ROBlock
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
|
||||
t.Run("no index", func(t *testing.T) {
|
||||
actual, err := peerdas.ReconstructBlobs(emptyBlock, nil, nil)
|
||||
@@ -190,10 +191,10 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("not committed to the same block", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}))
|
||||
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}))
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}), util.WithSlot(fs))
|
||||
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}), util.WithSlot(fs))
|
||||
|
||||
_, err = peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
|
||||
_, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
|
||||
require.ErrorContains(t, peerdas.ErrRootMismatch.Error(), err)
|
||||
})
|
||||
|
||||
|
||||
@@ -16,61 +16,60 @@ func TestDataColumnsAlignWithBlock(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
require.NoError(t, err)
|
||||
fuluMax := params.BeaconConfig().MaxBlobsPerBlock(fs)
|
||||
t.Run("pre fulu", func(t *testing.T) {
|
||||
block, _ := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 0, 0)
|
||||
block, _ := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fs, 0)
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, nil)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("too many commitmnets", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.BlobSchedule = []params.BlobScheduleEntry{{}}
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
t.Run("too many commitments", func(t *testing.T) {
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, fuluMax+1, util.WithSlot(fs))
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, nil)
|
||||
require.ErrorIs(t, err, peerdas.ErrTooManyCommitments)
|
||||
})
|
||||
|
||||
t.Run("root mismatch", func(t *testing.T) {
|
||||
_, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0)
|
||||
_, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0, util.WithSlot(fs))
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrRootMismatch)
|
||||
})
|
||||
|
||||
t.Run("column size mismatch", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
sidecars[0].Column = [][]byte{}
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("KZG commitments size mismatch", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
sidecars[0].KzgCommitments = [][]byte{}
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("KZG proofs mismatch", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
sidecars[0].KzgProofs = [][]byte{}
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("commitment mismatch", func(t *testing.T) {
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
alteredSidecars[1].KzgCommitments[0][0]++ // Overflow is OK
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, alteredSidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrCommitmentMismatch)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -18,13 +18,16 @@ import (
|
||||
)
|
||||
|
||||
func Test_commitmentsToCheck(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
fulu := primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
windowSlots = windowSlots + primitives.Slot(params.BeaconConfig().FuluForkEpoch)
|
||||
maxBlobs := params.LastNetworkScheduleEntry().MaxBlobsPerBlock
|
||||
commits := make([][]byte, maxBlobs+1)
|
||||
for i := 0; i < len(commits); i++ {
|
||||
commits[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -47,41 +50,40 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
{
|
||||
name: "commitments within da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Slot = 100
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Body.BlobKzgCommitments = commits[:maxBlobs]
|
||||
d.Block.Slot = fulu + 100
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
commits: commits,
|
||||
slot: 100,
|
||||
commits: commits[:maxBlobs],
|
||||
slot: fulu + 100,
|
||||
},
|
||||
{
|
||||
name: "commitments outside da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Slot = fulu
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Body.BlobKzgCommitments = commits[:maxBlobs]
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
slot: fulu + windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "excessive commitments",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Slot = 100
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Slot = fulu + 100
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
// Double the number of commitments, assert that this is over the limit
|
||||
d.Block.Body.BlobKzgCommitments = append(commits, d.Block.Body.BlobKzgCommitments...)
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
@@ -115,67 +117,69 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, blobSidecars[2]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[2]))
|
||||
err := as.IsDataAvailable(ctx, ds, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[0]))
|
||||
err = as.IsDataAvailable(ctx, ds, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, blobSidecars...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
require.NoError(t, as.IsDataAvailable(ctx, ds, blk))
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[0]))
|
||||
err := as.IsDataAvailable(ctx, ds, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
}
|
||||
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 6)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, blobSidecars...))
|
||||
// ignores duplicates
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars...), ErrDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
blobSidecars[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
|
||||
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars[0]), errIndexOutOfBounds)
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 4)
|
||||
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
|
||||
slotOOB := util.SlotAtEpoch(t, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
slotOOB += ds + 32
|
||||
require.NoError(t, as.Persist(slotOOB, moreBlobSidecars[0]))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
require.NoError(t, as.Persist(1, moreBlobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, moreBlobSidecars...))
|
||||
}
|
||||
|
||||
type mockBlobBatchVerifier struct {
|
||||
|
||||
@@ -39,7 +39,7 @@ func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpe
|
||||
entry := &blobCacheEntry{}
|
||||
if len(onDisk) > 0 {
|
||||
od := map[[32]byte][]int{blk.Root(): onDisk}
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, slots.ToEpoch(slot), od)
|
||||
sum := sumz.Summary(blk.Root())
|
||||
entry.setDiskSummary(sum)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,8 @@ import (
|
||||
)
|
||||
|
||||
func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, params.BeaconConfig().MaxBlobsPerBlock(1))
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, params.BeaconConfig().MaxBlobsPerBlock(ds))
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, sidecars)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
@@ -127,21 +128,22 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
fs := afero.NewMemMapFs()
|
||||
root := [32]byte{}
|
||||
|
||||
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0)) - 1
|
||||
writeFakeSSZ(t, fs, root, 0, okIdx)
|
||||
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(es)) - 1
|
||||
writeFakeSSZ(t, fs, root, es, okIdx)
|
||||
bs := NewWarmedEphemeralBlobStorageUsingFs(t, fs, WithLayout(LayoutNameByEpoch))
|
||||
indices := bs.Summary(root).mask
|
||||
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
expected[okIdx] = true
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i], indices[i])
|
||||
}
|
||||
|
||||
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
writeFakeSSZ(t, fs, root, 0, oobIdx)
|
||||
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
writeFakeSSZ(t, fs, root, es, oobIdx)
|
||||
// This now fails at cache warmup time.
|
||||
require.ErrorIs(t, warmCache(bs.layout, bs.cache), errIndexOutOfBounds)
|
||||
}
|
||||
|
||||
@@ -6,14 +6,17 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestSlotByRoot_Summary(t *testing.T) {
|
||||
noneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
allSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
firstSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
lastSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
oneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
ee := params.BeaconConfig().ElectraForkEpoch
|
||||
es := util.SlotAtEpoch(t, ee)
|
||||
noneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
allSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
firstSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
lastSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
oneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
firstSet[0] = true
|
||||
lastSet[len(lastSet)-1] = true
|
||||
oneSet[1] = true
|
||||
@@ -53,7 +56,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
if c.expected != nil {
|
||||
key := bytesutil.ToBytes32([]byte(c.name))
|
||||
sc.cache[key] = BlobStorageSummary{epoch: 0, mask: c.expected}
|
||||
sc.cache[key] = BlobStorageSummary{epoch: ee, mask: c.expected}
|
||||
}
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -73,6 +76,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllAvailable(t *testing.T) {
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
idxUpTo := func(u int) []int {
|
||||
r := make([]int, u)
|
||||
for i := range r {
|
||||
@@ -125,13 +129,13 @@ func TestAllAvailable(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "out of bound is safe",
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(0) + 1,
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(es) + 1,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "max present",
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(0),
|
||||
idxSet: idxUpTo(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(es),
|
||||
idxSet: idxUpTo(params.BeaconConfig().MaxBlobsPerBlock(es)),
|
||||
aa: true,
|
||||
},
|
||||
{
|
||||
@@ -143,7 +147,7 @@ func TestAllAvailable(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
mask := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
mask := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
for _, idx := range c.idxSet {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -60,12 +61,13 @@ func TestRootFromDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSlotFromFile(t *testing.T) {
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
cases := []struct {
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{slot: 0},
|
||||
{slot: 2},
|
||||
{slot: 1123581321},
|
||||
{slot: es + 0},
|
||||
{slot: es + 2},
|
||||
{slot: es + 1123581321},
|
||||
{slot: math.MaxUint64},
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -243,39 +245,40 @@ func TestSlotFromBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIterationComplete(t *testing.T) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
targets := []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
path: "by-epoch/0/1234/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
|
||||
path: "by-epoch/%d/%d/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
|
||||
slotOffset: 31,
|
||||
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
path: "by-epoch/%d/%d/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
|
||||
slotOffset: 31,
|
||||
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
path: "by-epoch/%d/%d/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", -1+math.MaxUint64/32, 0),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
|
||||
path: "by-epoch/%d/%d/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", -1+math.MaxUint64/32, 1),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
|
||||
path: "by-epoch/%d/%d/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", -1+math.MaxUint64/32, 0),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777217/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
|
||||
path: "by-epoch/%d/%d/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
|
||||
path: "by-epoch/2/11235/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", de+11235, 1),
|
||||
path: "by-epoch/%d/%d/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
|
||||
},
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
@@ -299,6 +302,7 @@ func TestIterationComplete(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, tar.ident.epoch, entry.epoch)
|
||||
require.Equal(t, true, entry.HasIndex(tar.ident.index))
|
||||
require.Equal(t, tar.path, byEpoch.sszPath(tar.ident))
|
||||
path := fmt.Sprintf(tar.path, periodForEpoch(tar.ident.epoch), tar.ident.epoch)
|
||||
require.Equal(t, path, byEpoch.sszPath(tar.ident))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,10 +4,10 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
@@ -18,9 +18,7 @@ func ezIdent(t *testing.T, rootStr string, epoch primitives.Epoch, index uint64)
|
||||
}
|
||||
|
||||
func setupTestBlobFile(t *testing.T, ident blobIdent, offset primitives.Slot, fs afero.Fs, l fsLayout) {
|
||||
slot, err := slots.EpochStart(ident.epoch)
|
||||
require.NoError(t, err)
|
||||
slot += offset
|
||||
slot := util.SlotAtEpoch(t, ident.epoch) + offset
|
||||
_, sc := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
|
||||
scb, err := sc[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
@@ -53,6 +51,7 @@ func testAssertFsMigrated(t *testing.T, fs afero.Fs, ident blobIdent, before, af
|
||||
}
|
||||
|
||||
func TestMigrations(t *testing.T) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
cases := []struct {
|
||||
name string
|
||||
forwardLayout string
|
||||
@@ -65,18 +64,18 @@ func TestMigrations(t *testing.T) {
|
||||
forwardLayout: LayoutNameByEpoch,
|
||||
targets: []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 0),
|
||||
slotOffset: 16,
|
||||
},
|
||||
},
|
||||
@@ -87,33 +86,33 @@ func TestMigrations(t *testing.T) {
|
||||
forwardLayout: LayoutNameByEpoch,
|
||||
targets: []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 0),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 1),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", de+16777217, 0),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", de+11235, 1),
|
||||
migrated: true,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -88,11 +88,11 @@ func NewEphemeralBlobStorageWithMocker(t testing.TB) (*BlobMocker, *BlobStorage)
|
||||
return &BlobMocker{fs: fs, bs: bs}, bs
|
||||
}
|
||||
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, epoch primitives.Epoch, set map[[32]byte][]int) BlobStorageSummarizer {
|
||||
c := newBlobStorageCache()
|
||||
for k, v := range set {
|
||||
for i := range v {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: 0, index: uint64(v[i])}); err != nil {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: epoch, index: uint64(v[i])}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,6 +142,7 @@ func testRoots(n int) [][32]byte {
|
||||
}
|
||||
|
||||
func TestLayoutPruneBefore(t *testing.T) {
|
||||
electra := params.BeaconConfig().ElectraForkEpoch
|
||||
roots := testRoots(10)
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -153,27 +154,27 @@ func TestLayoutPruneBefore(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "none pruned",
|
||||
pruneBefore: 1,
|
||||
pruneBefore: electra + 1,
|
||||
pruned: []testIdent{},
|
||||
remain: []testIdent{
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: electra + 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: electra + 1, index: 0}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "expected pruned before epoch",
|
||||
pruneBefore: 3,
|
||||
pruneBefore: electra + 3,
|
||||
pruned: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: 2, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: 2, index: 3}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: electra + 1, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: electra + 1, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: electra + 2, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: electra + 2, index: 3}},
|
||||
},
|
||||
remain: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: 3, index: 2}}, // boundary
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: 3, index: 0}}, // boundary
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: 4, index: 1}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: 4, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: electra + 3, index: 2}}, // boundary
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: electra + 3, index: 0}}, // boundary
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: electra + 4, index: 1}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: electra + 4, index: 5}},
|
||||
},
|
||||
sum: pruneSummary{blobsPruned: 4},
|
||||
},
|
||||
|
||||
@@ -139,6 +139,7 @@ go_test(
|
||||
"sender_test.go",
|
||||
"service_test.go",
|
||||
"subnets_test.go",
|
||||
"topics_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -134,13 +134,15 @@ func (s *Service) peerInspector(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) {
|
||||
|
||||
// pubsubOptions creates a list of options to configure our router with.
|
||||
func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
filt := pubsub.NewAllowlistSubscriptionFilter(s.allTopicStrings()...)
|
||||
filt = pubsub.WrapLimitSubscriptionFilter(filt, pubsubSubscriptionRequestLimit)
|
||||
psOpts := []pubsub.Option{
|
||||
pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign),
|
||||
pubsub.WithNoAuthor(),
|
||||
pubsub.WithMessageIdFn(func(pmsg *pubsubpb.Message) string {
|
||||
return MsgID(s.genesisValidatorsRoot, pmsg)
|
||||
}),
|
||||
pubsub.WithSubscriptionFilter(s),
|
||||
pubsub.WithSubscriptionFilter(filt),
|
||||
pubsub.WithPeerOutboundQueueSize(int(s.cfg.QueueSize)),
|
||||
pubsub.WithMaxMessageSize(int(MaxMessageSize())), // lint:ignore uintcast -- Max Message Size is a config value and is naturally bounded by networking limitations.
|
||||
pubsub.WithValidateQueueSize(int(s.cfg.QueueSize)),
|
||||
|
||||
@@ -1,5 +1,15 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"slices"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
)
|
||||
|
||||
const (
|
||||
// GossipProtocolAndDigest represents the protocol and fork digest prefix in a gossip topic.
|
||||
GossipProtocolAndDigest = "/eth2/%x/"
|
||||
@@ -66,3 +76,129 @@ const (
|
||||
// DataColumnSubnetTopicFormat is the topic format for the data column subnet.
|
||||
DataColumnSubnetTopicFormat = GossipProtocolAndDigest + GossipDataColumnSidecarMessage + "_%d"
|
||||
)
|
||||
|
||||
// topic is a struct representing a single gossipsub topic.
|
||||
// It can also be used to represent a set of subnet topics: see appendSubnetsBelow().
|
||||
// topic is intended to be used as an immutable value - it is hashable so it can be used as a map key
|
||||
// and it uses strings in order to leverage golangs string interning for memory efficiency.
|
||||
type topic struct {
|
||||
full string
|
||||
digest string
|
||||
message string
|
||||
start primitives.Epoch
|
||||
end primitives.Epoch
|
||||
suffix string
|
||||
subnet uint64
|
||||
}
|
||||
|
||||
func (t topic) String() string {
|
||||
return t.full
|
||||
}
|
||||
|
||||
// sszEnc is used to get the protocol suffix for topics. This value has been effectively hardcoded
|
||||
// since phase0.
|
||||
var sszEnc = &encoder.SszNetworkEncoder{}
|
||||
|
||||
// newTopic constructs a topic value for an ordinary topic structure (without subnets).
|
||||
func newTopic(start, end primitives.Epoch, digest [4]byte, message string) topic {
|
||||
suffix := sszEnc.ProtocolSuffix()
|
||||
t := topic{digest: hex.EncodeToString(digest[:]), message: message, start: start, end: end, suffix: suffix}
|
||||
t.full = "/" + "eth2" + "/" + t.digest + "/" + t.message + t.suffix
|
||||
return t
|
||||
}
|
||||
|
||||
// newSubnetTopic constructs a topic value for a topic with a subnet structure.
|
||||
func newSubnetTopic(start, end primitives.Epoch, digest [4]byte, message string, subnet uint64) topic {
|
||||
t := newTopic(start, end, digest, message)
|
||||
t.subnet = subnet
|
||||
t.full = "/" + "eth2" + "/" + t.digest + "/" + t.message + "_" + strconv.Itoa(int(t.subnet)) + t.suffix
|
||||
return t
|
||||
}
|
||||
|
||||
// allTopicStrings returns the full topic string for all topics
|
||||
// that could be derived from the current fork schedule.
|
||||
func (s *Service) allTopicStrings() []string {
|
||||
topics := s.allTopics()
|
||||
topicStrs := make([]string, 0, len(topics))
|
||||
for _, t := range topics {
|
||||
topicStrs = append(topicStrs, t.String())
|
||||
}
|
||||
return topicStrs
|
||||
}
|
||||
|
||||
// appendSubnetsBelow uses the value of top.subnet as the subnet count
|
||||
// and creates a topic value for each subnet less than the subnet count, appending them all
|
||||
// to appendTo.
|
||||
func appendSubnetsBelow(top topic, digest [4]byte, appendTo []topic) []topic {
|
||||
for i := range top.subnet {
|
||||
appendTo = append(appendTo, newSubnetTopic(top.start, top.end, digest, top.message, i))
|
||||
}
|
||||
return appendTo
|
||||
}
|
||||
|
||||
// allTopics returns all topics that could be derived from the current fork schedule.
|
||||
func (s *Service) allTopics() []topic {
|
||||
cfg := params.BeaconConfig()
|
||||
// bellatrix: no special topics; electra: blobs topics handled all together
|
||||
genesis, altair, capella := cfg.GenesisEpoch, cfg.AltairForkEpoch, cfg.CapellaForkEpoch
|
||||
deneb, fulu, future := cfg.DenebForkEpoch, cfg.FuluForkEpoch, cfg.FarFutureEpoch
|
||||
// Templates are starter topics - they have a placeholder digest and the subnet is set to the maximum value
|
||||
// for the subnet (see how this is used in allSubnetsBelow). These are not directly returned by the method,
|
||||
// they are copied and modified for each digest where they apply based on the start and end epochs.
|
||||
empty := [4]byte{0, 0, 0, 0} // empty digest for templates, replaced by real digests in per-fork copies.
|
||||
templates := []topic{
|
||||
newTopic(genesis, future, empty, GossipBlockMessage),
|
||||
newTopic(genesis, future, empty, GossipAggregateAndProofMessage),
|
||||
newTopic(genesis, future, empty, GossipExitMessage),
|
||||
newTopic(genesis, future, empty, GossipProposerSlashingMessage),
|
||||
newTopic(genesis, future, empty, GossipAttesterSlashingMessage),
|
||||
newSubnetTopic(genesis, future, empty, GossipAttestationMessage, cfg.AttestationSubnetCount),
|
||||
newSubnetTopic(altair, future, empty, GossipSyncCommitteeMessage, cfg.SyncCommitteeSubnetCount),
|
||||
newTopic(altair, future, empty, GossipContributionAndProofMessage),
|
||||
newTopic(altair, future, empty, GossipLightClientOptimisticUpdateMessage),
|
||||
newTopic(altair, future, empty, GossipLightClientFinalityUpdateMessage),
|
||||
newTopic(capella, future, empty, GossipBlsToExecutionChangeMessage),
|
||||
}
|
||||
last := params.GetNetworkScheduleEntry(genesis)
|
||||
schedule := []params.NetworkScheduleEntry{last}
|
||||
for next := params.NextNetworkScheduleEntry(last.Epoch); next.ForkDigest != last.ForkDigest; next = params.NextNetworkScheduleEntry(next.Epoch) {
|
||||
schedule = append(schedule, next)
|
||||
last = next
|
||||
}
|
||||
slices.Reverse(schedule) // reverse the fork schedule because it simplifies dealing with BPOs
|
||||
fullTopics := make([]topic, 0, len(templates))
|
||||
for _, top := range templates {
|
||||
for _, entry := range schedule {
|
||||
if top.start <= entry.Epoch && entry.Epoch < top.end {
|
||||
if top.subnet > 0 { // subnet topics in the list above should set this value to the max subnet count: see allSubnetsBelow
|
||||
fullTopics = appendSubnetsBelow(top, entry.ForkDigest, fullTopics)
|
||||
} else {
|
||||
fullTopics = append(fullTopics, newTopic(top.start, top.end, entry.ForkDigest, top.message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
end := future
|
||||
// We're iterating from high to low per the slices.Reverse above.
|
||||
// So we'll update end = n.Epoch as we go down, and use that as the end for the next entry.
|
||||
// This loop either adds blob or data column sidecar topics depending on the fork.
|
||||
for _, entry := range schedule {
|
||||
if entry.Epoch < deneb {
|
||||
break
|
||||
// note: there is a special case where deneb is the genesis fork, in which case
|
||||
// we'll generate blob sidecar topics for the earlier schedule, but
|
||||
// this only happens in devnets where it doesn't really matter.
|
||||
}
|
||||
message := GossipDataColumnSidecarMessage
|
||||
subnets := cfg.DataColumnSidecarSubnetCount
|
||||
if entry.Epoch < fulu {
|
||||
message = GossipBlobSidecarMessage
|
||||
subnets = uint64(cfg.MaxBlobsPerBlockAtEpoch(entry.Epoch))
|
||||
}
|
||||
// Set subnet to max value, allSubnetsBelow will iterate every index up to that value.
|
||||
top := newSubnetTopic(entry.Epoch, end, entry.ForkDigest, message, subnets)
|
||||
fullTopics = appendSubnetsBelow(top, entry.ForkDigest, fullTopics)
|
||||
end = entry.Epoch // These topics / subnet structures are mutually exclusive, so set each end to the next highest entry.
|
||||
}
|
||||
return fullTopics
|
||||
}
|
||||
|
||||
70
beacon-chain/p2p/topics_test.go
Normal file
70
beacon-chain/p2p/topics_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestAllTopics(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
s := &Service{}
|
||||
all := s.allTopicStrings()
|
||||
tops := map[string]struct{}{}
|
||||
for _, t := range all {
|
||||
tops[t] = struct{}{}
|
||||
}
|
||||
require.Equal(t, len(tops), len(all), "duplicate topics found")
|
||||
expected := []string{
|
||||
"/eth2/ad532ceb/sync_committee_contribution_and_proof/ssz_snappy",
|
||||
"/eth2/ad532ceb/beacon_aggregate_and_proof/ssz_snappy",
|
||||
"/eth2/ad532ceb/beacon_block/ssz_snappy",
|
||||
"/eth2/ad532ceb/bls_to_execution_change/ssz_snappy",
|
||||
"/eth2/afcaaba0/beacon_attestation_19/ssz_snappy",
|
||||
"/eth2/cc2c5cdb/data_column_sidecar_0/ssz_snappy",
|
||||
"/eth2/cc2c5cdb/data_column_sidecar_127/ssz_snappy",
|
||||
}
|
||||
forks := []primitives.Epoch{cfg.GenesisEpoch, cfg.AltairForkEpoch,
|
||||
cfg.BellatrixForkEpoch, cfg.CapellaForkEpoch, cfg.DenebForkEpoch,
|
||||
cfg.ElectraForkEpoch, cfg.FuluForkEpoch}
|
||||
// sanity check: we should always have a block topic.
|
||||
// construct it by hand in case there are bugs in newTopic.
|
||||
for _, f := range forks {
|
||||
digest := params.ForkDigest(f)
|
||||
expected = append(expected, "/eth2/"+hex.EncodeToString(digest[:])+"/beacon_block/ssz_snappy")
|
||||
}
|
||||
for _, e := range expected {
|
||||
_, ok := tops[e]
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
// we should have no data column subnets before fulu
|
||||
electraColumn := newSubnetTopic(cfg.ElectraForkEpoch, cfg.FuluForkEpoch,
|
||||
params.ForkDigest(params.BeaconConfig().ElectraForkEpoch),
|
||||
GossipDataColumnSidecarMessage,
|
||||
cfg.DataColumnSidecarSubnetCount-1)
|
||||
// we should have no blob sidecars before deneb or after electra
|
||||
blobBeforeDeneb := newSubnetTopic(cfg.DenebForkEpoch-1, cfg.DenebForkEpoch,
|
||||
params.ForkDigest(cfg.DenebForkEpoch-1),
|
||||
GossipBlobSidecarMessage,
|
||||
uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.DenebForkEpoch-1))-1)
|
||||
blobAfterElectra := newSubnetTopic(cfg.FuluForkEpoch, cfg.FarFutureEpoch,
|
||||
params.ForkDigest(cfg.FuluForkEpoch),
|
||||
GossipBlobSidecarMessage,
|
||||
uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.FuluForkEpoch))-1)
|
||||
unexpected := []string{
|
||||
"/eth2/cc2c5cdb/data_column_sidecar_128/ssz_snappy",
|
||||
electraColumn.String(),
|
||||
blobBeforeDeneb.String(),
|
||||
blobAfterElectra.String(),
|
||||
}
|
||||
for _, e := range unexpected {
|
||||
_, ok := tops[e]
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
}
|
||||
@@ -4876,8 +4876,16 @@ func TestServer_broadcastBlobSidecars(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_validateBlobs(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
fe := params.BeaconConfig().FuluForkEpoch
|
||||
fs := util.SlotAtEpoch(t, fe)
|
||||
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
denebMax := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
blob := util.GetRandBlob(123)
|
||||
// Generate proper commitment and proof for the blob
|
||||
var kzgBlob kzg.Blob
|
||||
@@ -4887,6 +4895,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
proof, err := kzg.ComputeBlobKZGProof(&kzgBlob, commitment)
|
||||
require.NoError(t, err)
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -4902,10 +4911,11 @@ func Test_validateBlobs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "could not verify blob proofs", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
|
||||
electraMax := params.BeaconConfig().MaxBlobsPerBlock(es)
|
||||
blobs := [][]byte{}
|
||||
commitments := [][]byte{}
|
||||
proofs := [][]byte{}
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := 0; i < electraMax+1; i++ {
|
||||
blobs = append(blobs, blob[:])
|
||||
commitments = append(commitments, commitment[:])
|
||||
proofs = append(proofs, proof[:])
|
||||
@@ -4923,6 +4933,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
|
||||
t.Run("Deneb block with valid single blob", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -4931,107 +4942,54 @@ func Test_validateBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Deneb block with max blobs (6)", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:6]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with exactly 6 blobs
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:6], proofs[:6]))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:denebMax], proofs[:denebMax]))
|
||||
})
|
||||
|
||||
t.Run("Deneb block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:7]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 7 blobs when max is 6
|
||||
err = s.validateBlobs(b, blobs[:7], proofs[:7])
|
||||
require.ErrorContains(t, "number of blobs over max, 7 > 6", err)
|
||||
err = s.validateBlobs(b, blobs[:denebMax+1], proofs[:denebMax+1])
|
||||
require.ErrorContains(t, "number of blobs over max", err)
|
||||
})
|
||||
|
||||
t.Run("Electra block with valid blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot (epoch 5+)
|
||||
blk.Block.Slot = es
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with 9 blobs in Electra
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:electraMax], proofs[:electraMax]))
|
||||
})
|
||||
|
||||
t.Run("Electra block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
blk.Block.Slot = es
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:electraMax+1]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 10 blobs when max is 9
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
err = s.validateBlobs(b, blobs[:electraMax+1], proofs[:electraMax+1])
|
||||
require.ErrorContains(t, "number of blobs over max", err)
|
||||
})
|
||||
|
||||
t.Run("Fulu block with valid cell proofs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
testCfg.NumberOfColumns = 128 // Standard PeerDAS configuration
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
// Create Fulu block with proper cell proofs
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
blk.Block.Slot = fs
|
||||
|
||||
// Generate valid commitments and cell proofs for testing
|
||||
blobCount := 2
|
||||
@@ -5075,18 +5033,8 @@ func Test_validateBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Fulu block with invalid cell proof count", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.NumberOfColumns = 128
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
blk.Block.Slot = fs
|
||||
|
||||
// Create valid commitments but wrong number of cell proofs
|
||||
blobCount := 2
|
||||
@@ -5123,6 +5071,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -5134,6 +5083,7 @@ func Test_validateBlobs(t *testing.T) {
|
||||
|
||||
t.Run("empty blobs and proofs should pass", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -5148,53 +5098,48 @@ func Test_validateBlobs(t *testing.T) {
|
||||
|
||||
// Set up config with BlobSchedule (BPO - Blob Production Optimization)
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.FuluForkEpoch = 200
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
// Define blob schedule with progressive increases
|
||||
testCfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 0, MaxBlobsPerBlock: 3}, // Start with 3 blobs
|
||||
{Epoch: 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
|
||||
{Epoch: 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
|
||||
{Epoch: 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
|
||||
{Epoch: fe + 1, MaxBlobsPerBlock: 3}, // Start with 3 blobs
|
||||
{Epoch: fe + 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
|
||||
{Epoch: fe + 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
|
||||
{Epoch: fe + 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
|
||||
}
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
s := &Server{}
|
||||
|
||||
// Test epoch 0-9: max 3 blobs
|
||||
t.Run("epoch 0-9: max 3 blobs", func(t *testing.T) {
|
||||
t.Run("deneb under and over max", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 5 // Epoch 0
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:3]
|
||||
blk.Block.Slot = ds
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:denebMax]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:3], proofs[:3]))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:denebMax], proofs[:denebMax]))
|
||||
|
||||
// Should fail with 4 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:4]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:4], proofs[:4])
|
||||
require.ErrorContains(t, "number of blobs over max, 4 > 3", err)
|
||||
err = s.validateBlobs(b, blobs[:denebMax+1], proofs[:denebMax+1])
|
||||
require.ErrorContains(t, "number of blobs over max", err)
|
||||
})
|
||||
|
||||
// Test epoch 30+: max 9 blobs
|
||||
t.Run("epoch 30+: max 9 blobs", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 960 // Epoch 30
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
t.Run("different max in electra", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = es
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:electraMax]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:electraMax], proofs[:electraMax]))
|
||||
|
||||
// Should fail with 10 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
// exceed the electra max
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:electraMax+1]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
err = s.validateBlobs(b, blobs[:electraMax+1], proofs[:electraMax+1])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -51,7 +51,6 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -30,18 +30,19 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
func TestBlobs(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 1
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4096*2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
es := util.SlotAtEpoch(t, cfg.ElectraForkEpoch)
|
||||
ds := util.SlotAtEpoch(t, cfg.DenebForkEpoch)
|
||||
|
||||
db := testDB.SetupDB(t)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, es, 4)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
|
||||
@@ -171,7 +172,7 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -195,7 +196,7 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot not found", func(t *testing.T) {
|
||||
u := "http://foo.example/122"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es-1)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -212,7 +213,7 @@ func TestBlobs(t *testing.T) {
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
})
|
||||
t.Run("one blob only", func(t *testing.T) {
|
||||
u := "http://foo.example/123?indices=2"
|
||||
u := fmt.Sprintf("http://foo.example/%d?indices=2", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -243,7 +244,7 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("no blobs returns an empty array", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -267,10 +268,8 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("blob index over max", func(t *testing.T) {
|
||||
forkslot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(forkslot)
|
||||
u := fmt.Sprintf("http://foo.example/123?indices=%d", overLimit)
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
u := fmt.Sprintf("http://foo.example/%d?indices=%d", es, overLimit)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -284,7 +283,7 @@ func TestBlobs(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Message, fmt.Sprintf("requested blob indices [%d] are invalid", overLimit)))
|
||||
})
|
||||
t.Run("outside retention period returns 200 with what we have", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -308,13 +307,13 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("block without commitments returns 200 w/empty list ", func(t *testing.T) {
|
||||
denebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 333, 0)
|
||||
denebBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, es+128, 0)
|
||||
commitments, err := denebBlock.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(commitments), 0)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), denebBlock))
|
||||
|
||||
u := "http://foo.example/333"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es+128)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -426,19 +425,17 @@ func TestBlobs(t *testing.T) {
|
||||
func TestBlobs_Electra(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 0
|
||||
cfg.ElectraForkEpoch = 1
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4096*2
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 0, MaxBlobsPerBlock: 6},
|
||||
{Epoch: 1, MaxBlobsPerBlock: 9},
|
||||
{Epoch: cfg.FuluForkEpoch + 4096, MaxBlobsPerBlock: 6},
|
||||
{Epoch: cfg.FuluForkEpoch + 4096 + 128, MaxBlobsPerBlock: 9},
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
es := util.SlotAtEpoch(t, cfg.ElectraForkEpoch)
|
||||
db := testDB.SetupDB(t)
|
||||
forkslot, err := slots.EpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
require.NoError(t, err)
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(forkslot)
|
||||
electraBlock, blobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 123, overLimit)
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(es)
|
||||
electraBlock, blobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, es, overLimit)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), electraBlock))
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
|
||||
@@ -456,7 +453,7 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
TimeFetcher: mockChainService,
|
||||
}
|
||||
t.Run("max blobs for electra", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
u := fmt.Sprintf("http://foo.example/%d", es)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -487,8 +484,8 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("requested blob index at max", func(t *testing.T) {
|
||||
limit := overLimit - 1
|
||||
u := fmt.Sprintf("http://foo.example/123?indices=%d", limit)
|
||||
limit := params.BeaconConfig().MaxBlobsPerBlock(es) - 1
|
||||
u := fmt.Sprintf("http://foo.example/%d?indices=%d", es, limit)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -519,7 +516,8 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("blob index over max", func(t *testing.T) {
|
||||
u := fmt.Sprintf("http://foo.example/123?indices=%d", overLimit)
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(es)
|
||||
u := fmt.Sprintf("http://foo.example/%d?indices=%d", es, overLimit)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
@@ -535,6 +533,7 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_parseIndices(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
tests := []struct {
|
||||
name string
|
||||
query string
|
||||
@@ -564,7 +563,7 @@ func Test_parseIndices(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := parseIndices(&url.URL{RawQuery: tt.query}, 0)
|
||||
got, err := parseIndices(&url.URL{RawQuery: tt.query}, ds)
|
||||
if err != nil && tt.wantErr != "" {
|
||||
require.StringContains(t, tt.wantErr, err.Error())
|
||||
return
|
||||
@@ -593,6 +592,7 @@ func TestGetBlobs(t *testing.T) {
|
||||
{Epoch: 20, MaxBlobsPerBlock: 12}, // Fulu
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
es := util.SlotAtEpoch(t, cfg.ElectraForkEpoch)
|
||||
|
||||
db := testDB.SetupDB(t)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
@@ -1014,9 +1014,7 @@ func TestGetBlobs(t *testing.T) {
|
||||
|
||||
// Test for Electra fork
|
||||
t.Run("electra max blobs", func(t *testing.T) {
|
||||
forkslot, err := slots.EpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
require.NoError(t, err)
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(forkslot)
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlock(es)
|
||||
electraBlock, electraBlobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 323, overLimit)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), electraBlock))
|
||||
electraBs := filesystem.NewEphemeralBlobStorage(t)
|
||||
|
||||
@@ -3,7 +3,6 @@ package lookup
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
@@ -284,14 +283,9 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, opts ...options.
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
|
||||
// Compute the first Fulu slot.
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
fuluForkSlot := primitives.Slot(math.MaxUint64)
|
||||
if fuluForkEpoch != primitives.Epoch(math.MaxUint64) {
|
||||
fuluForkSlot, err = slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Fulu start slot"), Reason: core.Internal}
|
||||
}
|
||||
fuluForkSlot, err := slots.EpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{Err: errors.Wrap(err, "could not calculate Fulu start slot"), Reason: core.Internal}
|
||||
}
|
||||
|
||||
// Convert versioned hashes to indices if provided
|
||||
|
||||
@@ -190,7 +190,7 @@ func TestBlobsErrorHandling(t *testing.T) {
|
||||
|
||||
t.Run("non-existent block by slot returns 404", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
BeaconDB: db,
|
||||
BeaconDB: db,
|
||||
ChainInfoFetcher: &mockChain.ChainService{},
|
||||
}
|
||||
|
||||
@@ -275,39 +275,19 @@ func TestBlobsErrorHandling(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetBlob(t *testing.T) {
|
||||
const (
|
||||
slot = 123
|
||||
blobCount = 4
|
||||
denebForEpoch = 1
|
||||
fuluForkEpoch = 2
|
||||
)
|
||||
|
||||
setupDeneb := func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = denebForEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
setupFulu := func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = denebForEpoch
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
const blobCount = 4
|
||||
ctx := t.Context()
|
||||
db := testDB.SetupDB(t)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().DenebForkEpoch + 4096*2
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
db := testDB.SetupDB(t)
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
// Create and save Deneb block and blob sidecars.
|
||||
_, blobStorage := filesystem.NewEphemeralBlobStorageAndFs(t)
|
||||
|
||||
denebBlock, storedBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [fieldparams.RootLength]byte{}, slot, blobCount)
|
||||
denebBlock, storedBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [fieldparams.RootLength]byte{}, ds, blobCount, util.WithDenebSlot(ds))
|
||||
denebBlockRoot := denebBlock.Root()
|
||||
|
||||
verifiedStoredSidecars := verification.FakeVerifySliceForTest(t, storedBlobSidecars)
|
||||
@@ -316,13 +296,14 @@ func TestGetBlob(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = db.SaveBlock(t.Context(), denebBlock)
|
||||
err := db.SaveBlock(t.Context(), denebBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create Electra block and blob sidecars. (Electra block = Fulu block),
|
||||
// save the block, convert blob sidecars to data column sidecars and save the block.
|
||||
fuluForkSlot := fuluForkEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fuluForkSlot, blobCount)
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
dsStr := fmt.Sprintf("%d", ds)
|
||||
fuluBlock, fuluBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fs, blobCount)
|
||||
fuluBlockRoot := fuluBlock.Root()
|
||||
|
||||
cellsAndProofsList := make([]kzg.CellsAndProofs, 0, len(fuluBlobSidecars))
|
||||
@@ -347,8 +328,6 @@ func TestGetBlob(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("genesis", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{}
|
||||
_, rpcErr := blocker.Blobs(ctx, "genesis")
|
||||
require.Equal(t, http.StatusBadRequest, core.ErrorReasonToHTTP(rpcErr.Reason))
|
||||
@@ -356,8 +335,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("head", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{
|
||||
Root: denebBlockRoot[:],
|
||||
@@ -388,8 +365,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -405,8 +380,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("justified", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -422,8 +395,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("root", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
@@ -438,8 +409,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -449,7 +418,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123")
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, dsStr)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, blobCount, len(verifiedBlobs))
|
||||
})
|
||||
@@ -457,8 +426,6 @@ func TestGetBlob(t *testing.T) {
|
||||
t.Run("one blob only", func(t *testing.T) {
|
||||
const index = 2
|
||||
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -468,7 +435,7 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{index}))
|
||||
retrievedVerifiedSidecars, rpcErr := blocker.Blobs(ctx, dsStr, options.WithIndices([]int{index}))
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 1, len(retrievedVerifiedSidecars))
|
||||
|
||||
@@ -483,8 +450,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("no blobs returns an empty array", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -494,14 +459,12 @@ func TestGetBlob(t *testing.T) {
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
}
|
||||
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123")
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, dsStr)
|
||||
require.IsNil(t, rpcErr)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("no blob at index", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -512,14 +475,12 @@ func TestGetBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
noBlobIndex := len(storedBlobSidecars) + 1
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{0, noBlobIndex}))
|
||||
_, rpcErr := blocker.Blobs(ctx, dsStr, options.WithIndices([]int{0, noBlobIndex}))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
|
||||
t.Run("index too big", func(t *testing.T) {
|
||||
setupDeneb(t)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: denebBlockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
@@ -528,14 +489,12 @@ func TestGetBlob(t *testing.T) {
|
||||
BeaconDB: db,
|
||||
BlobStorage: blobStorage,
|
||||
}
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", options.WithIndices([]int{0, math.MaxInt}))
|
||||
_, rpcErr := blocker.Blobs(ctx, dsStr, options.WithIndices([]int{0, math.MaxInt}))
|
||||
require.NotNil(t, rpcErr)
|
||||
require.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
})
|
||||
|
||||
t.Run("not enough stored data column sidecars", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[:fieldparams.CellsPerBlob-1])
|
||||
require.NoError(t, err)
|
||||
@@ -555,8 +514,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("reconstruction needed", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[1 : peerdas.MinimumColumnCountToReconstruct()+1])
|
||||
require.NoError(t, err)
|
||||
@@ -582,8 +539,6 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("no reconstruction needed", func(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -74,6 +74,18 @@ func WithTimeAsNow(t time.Time) ClockOpt {
|
||||
}
|
||||
}
|
||||
|
||||
func WithSlotAsNow(s types.Slot) ClockOpt {
|
||||
return func(g *Clock) {
|
||||
g.now = func() time.Time {
|
||||
t, err := slots.StartTime(g.t, s)
|
||||
if err != nil {
|
||||
panic(err) // lint:nopanic -- This is a programming error if genesis/slot are invalid.
|
||||
}
|
||||
return t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewClock constructs a Clock value from a genesis timestamp (t) and a Genesis Validator Root (vr).
|
||||
// The WithNower ClockOpt can be used in tests to specify an alternate `time.Now` implementation,
|
||||
// for instance to return a value for `Now` spanning a certain number of slots from genesis time, to control the current slot.
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -24,8 +25,9 @@ func testBlobGen(t *testing.T, start primitives.Slot, n int) ([]blocks.ROBlock,
|
||||
}
|
||||
|
||||
func TestValidateNext_happy(t *testing.T) {
|
||||
current := primitives.Slot(128)
|
||||
blks, blobs := testBlobGen(t, 63, 4)
|
||||
startSlot := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
current := startSlot + 65
|
||||
blks, blobs := testBlobGen(t, startSlot, 4)
|
||||
cfg := &blobSyncConfig{
|
||||
retentionStart: 0,
|
||||
nbv: testNewBlobVerifier(),
|
||||
@@ -74,8 +76,9 @@ func TestValidateNext_sigMatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateNext_errorsFromVerifier(t *testing.T) {
|
||||
current := primitives.Slot(128)
|
||||
blks, blobs := testBlobGen(t, 63, 1)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
current := primitives.Slot(ds + 96)
|
||||
blks, blobs := testBlobGen(t, ds+31, 1)
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
|
||||
@@ -2,6 +2,7 @@ package sync
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
@@ -18,9 +19,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
types "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -165,23 +168,12 @@ func (r *expectedBlobChunk) requireExpected(t *testing.T, s *Service, stream net
|
||||
require.Equal(t, rob.Index, r.sidecar.Index)
|
||||
}
|
||||
|
||||
func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func()) {
|
||||
cfg := params.BeaconConfig()
|
||||
copiedCfg := cfg.Copy()
|
||||
repositionFutureEpochs(copiedCfg)
|
||||
copiedCfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(copiedCfg)
|
||||
cleanup := func() {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
chain, clock := defaultMockChain(t)
|
||||
func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob) {
|
||||
maxBlobs := int(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(params.BeaconConfig().DenebForkEpoch))
|
||||
chain := defaultMockChain(t, c.clock.CurrentEpoch())
|
||||
if c.chain == nil {
|
||||
c.chain = chain
|
||||
}
|
||||
if c.clock == nil {
|
||||
c.clock = clock
|
||||
}
|
||||
d := db.SetupDB(t)
|
||||
|
||||
sidecars := make([]blocks.ROBlob, 0)
|
||||
@@ -208,16 +200,16 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func())
|
||||
|
||||
client := p2ptest.NewTestP2P(t)
|
||||
s := &Service{
|
||||
cfg: &config{p2p: client, chain: c.chain, clock: clock, beaconDB: d, blobStorage: filesystem.NewEphemeralBlobStorage(t)},
|
||||
cfg: &config{p2p: client, chain: c.chain, clock: c.clock, beaconDB: d, blobStorage: filesystem.NewEphemeralBlobStorage(t)},
|
||||
rateLimiter: newRateLimiter(client),
|
||||
}
|
||||
|
||||
byRootRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
byRangeRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
byRootRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(maxBlobs)
|
||||
byRangeRate := params.BeaconConfig().MaxRequestBlobSidecars * uint64(maxBlobs)
|
||||
s.setRateCollector(p2p.RPCBlobSidecarsByRootTopicV1, leakybucket.NewCollector(0.000001, int64(byRootRate), time.Second, false))
|
||||
s.setRateCollector(p2p.RPCBlobSidecarsByRangeTopicV1, leakybucket.NewCollector(0.000001, int64(byRangeRate), time.Second, false))
|
||||
|
||||
return s, sidecars, cleanup
|
||||
return s, sidecars
|
||||
}
|
||||
|
||||
func defaultExpectedRequirer(t *testing.T, s *Service, expect []*expectedBlobChunk) func(network.Stream) {
|
||||
@@ -225,12 +217,16 @@ func defaultExpectedRequirer(t *testing.T, s *Service, expect []*expectedBlobChu
|
||||
for _, ex := range expect {
|
||||
ex.requireExpected(t, s, stream)
|
||||
}
|
||||
|
||||
encoding := s.cfg.p2p.Encoding()
|
||||
_, _, err := ReadStatusCode(stream, encoding)
|
||||
require.ErrorIs(t, err, io.EOF)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *blobsTestCase) run(t *testing.T) {
|
||||
s, sidecars, cleanup := c.setup(t)
|
||||
defer cleanup()
|
||||
blobRpcThrottleInterval = time.Microsecond * 1
|
||||
s, sidecars := c.setup(t)
|
||||
req := c.requestFromSidecars(sidecars)
|
||||
expect := c.defineExpected(t, sidecars, req)
|
||||
m := map[types.Slot][]blocks.ROBlob{}
|
||||
@@ -266,41 +262,32 @@ func (c *blobsTestCase) run(t *testing.T) {
|
||||
// so it is helpful in tests to temporarily reposition the epochs to give room for some math.
|
||||
func repositionFutureEpochs(cfg *params.BeaconChainConfig) {
|
||||
if cfg.FuluForkEpoch == math.MaxUint64 {
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 100
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4096*2
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMockChain(t *testing.T) (*mock.ChainService, *startup.Clock) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
df, err := params.Fork(de)
|
||||
func defaultMockChain(t *testing.T, current primitives.Epoch) *mock.ChainService {
|
||||
fe := current - 2
|
||||
df, err := params.Fork(current)
|
||||
require.NoError(t, err)
|
||||
denebBuffer := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1000
|
||||
ce := de + denebBuffer
|
||||
fe := ce - 2
|
||||
cs, err := slots.EpochStart(ce)
|
||||
require.NoError(t, err)
|
||||
genesis := time.Now()
|
||||
mockNow := startup.MockNower{}
|
||||
clock := startup.NewClock(genesis, params.BeaconConfig().GenesisValidatorsRoot, startup.WithNower(mockNow.Now))
|
||||
mockNow.SetSlot(t, clock, cs)
|
||||
chain := &mock.ChainService{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: fe},
|
||||
Fork: df,
|
||||
}
|
||||
|
||||
return chain, clock
|
||||
return chain
|
||||
}
|
||||
|
||||
func TestTestcaseSetup_BlocksAndBlobs(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
ctx := t.Context()
|
||||
nblocks := 10
|
||||
c := &blobsTestCase{nblocks: nblocks}
|
||||
c := &blobsTestCase{nblocks: nblocks, clock: startup.NewClock(genesis.Time(), genesis.ValidatorsRoot(), startup.WithSlotAsNow(ds))}
|
||||
c.oldestSlot = c.defaultOldestSlotByRoot
|
||||
s, sidecars, cleanup := c.setup(t)
|
||||
s, sidecars := c.setup(t)
|
||||
req := blobRootRequestFromSidecars(sidecars)
|
||||
expect := c.filterExpectedByRoot(t, sidecars, req)
|
||||
defer cleanup()
|
||||
maxed := nblocks * params.BeaconConfig().MaxBlobsPerBlock(0)
|
||||
maxed := nblocks * params.BeaconConfig().MaxBlobsPerBlockAtEpoch(params.BeaconConfig().DenebForkEpoch)
|
||||
require.Equal(t, maxed, len(sidecars))
|
||||
require.Equal(t, maxed, len(expect))
|
||||
for _, sc := range sidecars {
|
||||
|
||||
@@ -1017,13 +1017,13 @@ func TestBlobRangeForBlocks(t *testing.T) {
|
||||
for i := range blks {
|
||||
sbbs[i] = blks[i]
|
||||
}
|
||||
retentionStart := primitives.Slot(5)
|
||||
retentionStart := blks[len(blks)/2].Block().Slot()
|
||||
bwb, err := sortedBlockWithVerifiedBlobSlice(sbbs)
|
||||
require.NoError(t, err)
|
||||
bounds := countCommitments(bwb, retentionStart).blobRange(nil)
|
||||
require.Equal(t, retentionStart, bounds.low)
|
||||
higher := primitives.Slot(len(blks) + 1)
|
||||
bounds = countCommitments(bwb, higher).blobRange(nil)
|
||||
maxBlkSlot := blks[len(blks)-1].Block().Slot()
|
||||
bounds = countCommitments(bwb, maxBlkSlot+1).blobRange(nil)
|
||||
var nilBounds *blobRange
|
||||
require.Equal(t, nilBounds, bounds)
|
||||
|
||||
@@ -1054,17 +1054,17 @@ func TestBlobRequest(t *testing.T) {
|
||||
}
|
||||
bwb, err := sortedBlockWithVerifiedBlobSlice(sbbs)
|
||||
require.NoError(t, err)
|
||||
maxBlkSlot := primitives.Slot(len(blks) - 1)
|
||||
|
||||
tooHigh := primitives.Slot(len(blks) + 1)
|
||||
maxBlkSlot := blks[len(blks)-1].Block().Slot()
|
||||
tooHigh := maxBlkSlot + 1
|
||||
req = countCommitments(bwb, tooHigh).blobRange(nil).Request()
|
||||
require.Equal(t, nilReq, req)
|
||||
|
||||
req = countCommitments(bwb, maxBlkSlot).blobRange(nil).Request()
|
||||
require.Equal(t, uint64(1), req.Count)
|
||||
require.Equal(t, maxBlkSlot, req.StartSlot)
|
||||
require.Equal(t, uint64(1), req.Count)
|
||||
|
||||
halfway := primitives.Slot(5)
|
||||
halfway := blks[len(blks)/2].Block().Slot()
|
||||
req = countCommitments(bwb, halfway).blobRange(nil).Request()
|
||||
require.Equal(t, halfway, req.StartSlot)
|
||||
// adding 1 to include the halfway slot itself
|
||||
@@ -1103,6 +1103,12 @@ func TestCountCommitments(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCommitmentCountList(t *testing.T) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
ds := util.SlotAtEpoch(t, de)
|
||||
denebRel := func(s primitives.Slot) primitives.Slot {
|
||||
return ds + s
|
||||
}
|
||||
maxBlobs := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
cases := []struct {
|
||||
name string
|
||||
cc commitmentCountList
|
||||
@@ -1119,20 +1125,20 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
{
|
||||
name: "nil bss, single slot",
|
||||
cc: []commitmentCount{
|
||||
{slot: 11235, count: 1},
|
||||
{slot: denebRel(11235), count: 1},
|
||||
},
|
||||
expected: &blobRange{low: 11235, high: 11235},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 11235, Count: 1},
|
||||
expected: &blobRange{low: denebRel(11235), high: denebRel(11235)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(11235), Count: 1},
|
||||
},
|
||||
{
|
||||
name: "nil bss, sparse slots",
|
||||
cc: []commitmentCount{
|
||||
{slot: 11235, count: 1},
|
||||
{slot: 11240, count: params.BeaconConfig().MaxBlobsPerBlock(0)},
|
||||
{slot: 11250, count: 3},
|
||||
{slot: denebRel(11235), count: 1},
|
||||
{slot: denebRel(11240), count: maxBlobs},
|
||||
{slot: denebRel(11250), count: 3},
|
||||
},
|
||||
expected: &blobRange{low: 11235, high: 11250},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 11235, Count: 16},
|
||||
expected: &blobRange{low: denebRel(11235), high: denebRel(11250)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(11235), Count: 16},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable in middle, some avail low, none high",
|
||||
@@ -1141,15 +1147,15 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("1")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 3, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: 15, count: 3},
|
||||
{slot: denebRel(0), count: 3, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: denebRel(5), count: maxBlobs, root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: denebRel(15), count: 3},
|
||||
},
|
||||
expected: &blobRange{low: 0, high: 15},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 0, Count: 16},
|
||||
expected: &blobRange{low: denebRel(0), high: denebRel(15)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(0), Count: 16},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable at high and low",
|
||||
@@ -1158,15 +1164,15 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: 3},
|
||||
{slot: 15, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("2"))},
|
||||
{slot: denebRel(0), count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: denebRel(5), count: 3},
|
||||
{slot: denebRel(15), count: maxBlobs, root: bytesutil.ToBytes32([]byte("2"))},
|
||||
},
|
||||
expected: &blobRange{low: 5, high: 5},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 1},
|
||||
expected: &blobRange{low: denebRel(5), high: denebRel(5)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(5), Count: 1},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable at high and low, adjacent range in middle",
|
||||
@@ -1175,16 +1181,16 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: 3},
|
||||
{slot: 6, count: 3},
|
||||
{slot: 15, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("2"))},
|
||||
{slot: denebRel(0), count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: denebRel(5), count: 3},
|
||||
{slot: denebRel(6), count: 3},
|
||||
{slot: denebRel(15), count: maxBlobs, root: bytesutil.ToBytes32([]byte("2"))},
|
||||
},
|
||||
expected: &blobRange{low: 5, high: 6},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 2},
|
||||
expected: &blobRange{low: denebRel(5), high: denebRel(6)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(5), Count: 2},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable at high and low, range in middle",
|
||||
@@ -1194,16 +1200,16 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
bytesutil.ToBytes32([]byte("1")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, de, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: 3, root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: 10, count: 3},
|
||||
{slot: 15, count: params.BeaconConfig().MaxBlobsPerBlock(0), root: bytesutil.ToBytes32([]byte("2"))},
|
||||
{slot: denebRel(0), count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: denebRel(5), count: 3, root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: denebRel(10), count: 3},
|
||||
{slot: denebRel(15), count: maxBlobs, root: bytesutil.ToBytes32([]byte("2"))},
|
||||
},
|
||||
expected: &blobRange{low: 5, high: 10},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 6},
|
||||
expected: &blobRange{low: denebRel(5), high: denebRel(10)},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: denebRel(5), Count: 6},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -1218,8 +1224,8 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
require.IsNil(t, br.Request())
|
||||
} else {
|
||||
req := br.Request()
|
||||
require.DeepEqual(t, req.StartSlot, c.request.StartSlot)
|
||||
require.DeepEqual(t, req.Count, c.request.Count)
|
||||
require.Equal(t, req.StartSlot, c.request.StartSlot)
|
||||
require.Equal(t, req.Count, c.request.Count)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1299,7 +1305,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
r1: {0, 1},
|
||||
r7: {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
bss := filesystem.NewMockBlobStorageSummarizer(t, params.BeaconConfig().DenebForkEpoch, onDisk)
|
||||
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(bwb[i1].Blobs))
|
||||
|
||||
@@ -439,6 +439,7 @@ func TestService_Synced(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMissingBlobRequest(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
cases := []struct {
|
||||
name string
|
||||
setup func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage)
|
||||
@@ -476,7 +477,7 @@ func TestMissingBlobRequest(t *testing.T) {
|
||||
{
|
||||
name: "2 commitments, 1 missing",
|
||||
setup: func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage) {
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 2)
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 2)
|
||||
bm, fs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
require.NoError(t, bm.CreateFakeIndices(bk.Root(), bk.Block().Slot(), 1))
|
||||
return bk, fs
|
||||
@@ -486,7 +487,7 @@ func TestMissingBlobRequest(t *testing.T) {
|
||||
{
|
||||
name: "2 commitments, 0 missing",
|
||||
setup: func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage) {
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 2)
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 2)
|
||||
bm, fs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
require.NoError(t, bm.CreateFakeIndices(bk.Root(), bk.Block().Slot(), 0, 1))
|
||||
return bk, fs
|
||||
|
||||
@@ -415,6 +415,7 @@ func TestRequestPendingBlobs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConstructPendingBlobsRequest(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
d := db.SetupDB(t)
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
s := &Service{cfg: &config{beaconDB: d, blobStorage: bs}}
|
||||
@@ -436,6 +437,7 @@ func TestConstructPendingBlobsRequest(t *testing.T) {
|
||||
ParentRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
StateRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
Slot: ds,
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte{}, 96),
|
||||
}
|
||||
|
||||
@@ -57,6 +57,8 @@ func (s *Service) streamBlobBatch(ctx context.Context, batch blockBatch, wQuota
|
||||
return wQuota, nil
|
||||
}
|
||||
|
||||
var blobRpcThrottleInterval = time.Second
|
||||
|
||||
// blobsSidecarsByRangeRPCHandler looks up the request blobs from the database from a given start slot index
|
||||
func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
var err error
|
||||
@@ -86,7 +88,7 @@ func (s *Service) blobSidecarsByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
}
|
||||
|
||||
// Ticker to stagger out large requests.
|
||||
ticker := time.NewTicker(time.Second)
|
||||
ticker := time.NewTicker(blobRpcThrottleInterval)
|
||||
defer ticker.Stop()
|
||||
batcher, err := newBlockRangeBatcher(rp, s.cfg.beaconDB, s.rateLimiter, s.cfg.chain.IsCanonical, ticker)
|
||||
if err != nil {
|
||||
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
types "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func (c *blobsTestCase) defaultOldestSlotByRange(t *testing.T) types.Slot {
|
||||
@@ -18,8 +20,7 @@ func (c *blobsTestCase) defaultOldestSlotByRange(t *testing.T) types.Slot {
|
||||
if oldestEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
oldestEpoch = params.BeaconConfig().DenebForkEpoch
|
||||
}
|
||||
oldestSlot, err := slots.EpochStart(oldestEpoch)
|
||||
require.NoError(t, err)
|
||||
oldestSlot := util.SlotAtEpoch(t, oldestEpoch)
|
||||
return oldestSlot
|
||||
}
|
||||
|
||||
@@ -89,16 +90,11 @@ func (c *blobsTestCase) runTestBlobSidecarsByRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlobByRangeOK(t *testing.T) {
|
||||
origNC := params.BeaconConfig()
|
||||
// restore network config after test completes
|
||||
defer func() {
|
||||
params.OverrideBeaconConfig(origNC)
|
||||
}()
|
||||
// set MaxRequestBlobSidecars to a low-ish value so the test doesn't timeout.
|
||||
nc := params.BeaconConfig().Copy()
|
||||
nc.MaxRequestBlobSidecars = 100
|
||||
params.OverrideBeaconConfig(nc)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
retainSlots := util.SlotAtEpoch(t, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
current := ds + retainSlots
|
||||
cases := []*blobsTestCase{
|
||||
{
|
||||
name: "beginning of window + 10",
|
||||
@@ -134,11 +130,11 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
Count: 20,
|
||||
}
|
||||
},
|
||||
total: func() *int { x := params.BeaconConfig().MaxBlobsPerBlock(0) * 10; return &x }(), // 10 blocks * 4 blobs = 40
|
||||
total: func() *int { x := params.BeaconConfig().MaxBlobsPerBlock(ds) * 10; return &x }(), // 10 blocks * 4 blobs = 40
|
||||
},
|
||||
{
|
||||
name: "when request count > MAX_REQUEST_BLOCKS_DENEB, MAX_REQUEST_BLOBS_SIDECARS sidecars in response",
|
||||
nblocks: int(params.BeaconConfig().MaxRequestBlocksDeneb) + 10,
|
||||
nblocks: int(params.BeaconConfig().MaxRequestBlocksDeneb) + 1,
|
||||
requestFromSidecars: func(scs []blocks.ROBlob) interface{} {
|
||||
return ðpb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: scs[0].Slot(),
|
||||
@@ -148,7 +144,9 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
total: func() *int { x := int(params.BeaconConfig().MaxRequestBlobSidecars); return &x }(),
|
||||
},
|
||||
}
|
||||
clock := startup.NewClock(genesis.Time(), genesis.ValidatorsRoot(), startup.WithSlotAsNow(current))
|
||||
for _, c := range cases {
|
||||
c.clock = clock
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
c.runTestBlobSidecarsByRange(t)
|
||||
})
|
||||
@@ -156,19 +154,12 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlobsByRangeValidation(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
repositionFutureEpochs(cfg)
|
||||
undo, err := params.SetActiveWithUndo(cfg)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
repositionFutureEpochs(params.BeaconConfig())
|
||||
denebSlot := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
|
||||
minReqEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
minReqSlots, err := slots.EpochStart(minReqEpochs)
|
||||
require.NoError(t, err)
|
||||
minReqSlots := util.SlotAtEpoch(t, minReqEpochs)
|
||||
// spec criteria for mix,max bound checking
|
||||
/*
|
||||
Clients MUST keep a record of signed blobs sidecars seen on the epoch range
|
||||
@@ -231,7 +222,7 @@ func TestBlobsByRangeValidation(t *testing.T) {
|
||||
},
|
||||
start: defaultMinStart,
|
||||
end: defaultMinStart + 9,
|
||||
batch: blobBatchLimit(100),
|
||||
batch: blobBatchLimit(defaultCurrent),
|
||||
},
|
||||
{
|
||||
name: "count > MAX_REQUEST_BLOB_SIDECARS",
|
||||
@@ -243,7 +234,7 @@ func TestBlobsByRangeValidation(t *testing.T) {
|
||||
start: defaultMinStart,
|
||||
end: defaultMinStart - 10 + 999,
|
||||
// a large count is ok, we just limit the amount of actual responses
|
||||
batch: blobBatchLimit(100),
|
||||
batch: blobBatchLimit(defaultCurrent),
|
||||
},
|
||||
{
|
||||
name: "start + count > current",
|
||||
@@ -265,7 +256,7 @@ func TestBlobsByRangeValidation(t *testing.T) {
|
||||
},
|
||||
start: denebSlot,
|
||||
end: denebSlot + 89,
|
||||
batch: blobBatchLimit(100),
|
||||
batch: blobBatchLimit(defaultCurrent - minReqSlots + 100),
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -285,8 +276,7 @@ func TestBlobsByRangeValidation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlobRPCMinValidSlot(t *testing.T) {
|
||||
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
require.NoError(t, err)
|
||||
denebSlot := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
cases := []struct {
|
||||
name string
|
||||
current func(t *testing.T) types.Slot
|
||||
@@ -296,9 +286,8 @@ func TestBlobRPCMinValidSlot(t *testing.T) {
|
||||
{
|
||||
name: "before deneb",
|
||||
current: func(t *testing.T) types.Slot {
|
||||
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch - 1)
|
||||
st := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch-1)
|
||||
// note: we no longer need to deal with deneb fork epoch being far future
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
expected: denebSlot,
|
||||
@@ -306,9 +295,8 @@ func TestBlobRPCMinValidSlot(t *testing.T) {
|
||||
{
|
||||
name: "equal to deneb",
|
||||
current: func(t *testing.T) types.Slot {
|
||||
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
st := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
// note: we no longer need to deal with deneb fork epoch being far future
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
expected: denebSlot,
|
||||
@@ -316,9 +304,8 @@ func TestBlobRPCMinValidSlot(t *testing.T) {
|
||||
{
|
||||
name: "after deneb, before expiry starts",
|
||||
current: func(t *testing.T) types.Slot {
|
||||
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch + params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
st := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch+params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
// note: we no longer need to deal with deneb fork epoch being far future
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
expected: denebSlot,
|
||||
@@ -326,9 +313,8 @@ func TestBlobRPCMinValidSlot(t *testing.T) {
|
||||
{
|
||||
name: "expiry starts one epoch after deneb + MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS",
|
||||
current: func(t *testing.T) types.Slot {
|
||||
st, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch + params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1)
|
||||
st := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch+params.BeaconConfig().MinEpochsForBlobsSidecarsRequest+1)
|
||||
// note: we no longer need to deal with deneb fork epoch being far future
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
},
|
||||
expected: denebSlot + params.BeaconConfig().SlotsPerEpoch,
|
||||
|
||||
@@ -49,7 +49,7 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
batchSize := flags.Get().BlobBatchLimit
|
||||
var ticker *time.Ticker
|
||||
if len(blobIdents) > batchSize {
|
||||
ticker = time.NewTicker(time.Second)
|
||||
ticker = time.NewTicker(blobRpcThrottleInterval)
|
||||
}
|
||||
|
||||
// Compute the oldest slot we'll allow a peer to request, based on the current slot.
|
||||
|
||||
@@ -7,13 +7,15 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2pTypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
types "github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
@@ -123,6 +125,13 @@ func (c *blobsTestCase) runTestBlobSidecarsByRoot(t *testing.T) {
|
||||
if c.streamReader == nil {
|
||||
c.streamReader = defaultExpectedRequirer
|
||||
}
|
||||
if c.clock == nil {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
denebBuffer := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1000
|
||||
ce := de + denebBuffer
|
||||
cs := util.SlotAtEpoch(t, ce)
|
||||
c.clock = startup.NewClock(genesis.Time(), genesis.ValidatorsRoot(), startup.WithSlotAsNow(cs))
|
||||
}
|
||||
c.run(t)
|
||||
}
|
||||
|
||||
@@ -181,18 +190,20 @@ func readChunkEncodedBlobsAsStreamReader(t *testing.T, s *Service, expect []*exp
|
||||
}
|
||||
|
||||
func TestBlobsByRootValidation(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
repositionFutureEpochs(cfg)
|
||||
undo, err := params.SetActiveWithUndo(cfg)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
capellaSlot, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
|
||||
require.NoError(t, err)
|
||||
dmc, clock := defaultMockChain(t)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
repositionFutureEpochs(params.BeaconConfig())
|
||||
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
denebBuffer := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest + 1000
|
||||
ce := de + denebBuffer
|
||||
cs := util.SlotAtEpoch(t, ce)
|
||||
clock := startup.NewClock(genesis.Time(), genesis.ValidatorsRoot(), startup.WithSlotAsNow(cs))
|
||||
|
||||
dmc := defaultMockChain(t, ce)
|
||||
capellaSlot := util.SlotAtEpoch(t, params.BeaconConfig().CapellaForkEpoch)
|
||||
dmc.Slot = &capellaSlot
|
||||
dmc.FinalizedCheckPoint = ðpb.Checkpoint{Epoch: params.BeaconConfig().CapellaForkEpoch}
|
||||
maxBlobs := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(params.BeaconConfig().DenebForkEpoch)
|
||||
cases := []*blobsTestCase{
|
||||
{
|
||||
name: "block before minimum_request_epoch",
|
||||
@@ -222,7 +233,7 @@ func TestBlobsByRootValidation(t *testing.T) {
|
||||
name: "block with all indices missing between 2 full blocks",
|
||||
nblocks: 3,
|
||||
missing: map[int]bool{1: true},
|
||||
total: func(i int) *int { return &i }(2 * int(params.BeaconConfig().MaxBlobsPerBlock(0))),
|
||||
total: func(i int) *int { return &i }(2 * int(maxBlobs)),
|
||||
},
|
||||
{
|
||||
name: "exceeds req max",
|
||||
@@ -232,6 +243,7 @@ func TestBlobsByRootValidation(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
c.clock = clock
|
||||
c.runTestBlobSidecarsByRoot(t)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
@@ -614,18 +613,19 @@ func TestBlobValidatorFromRangeReq(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSeqBlobValid(t *testing.T) {
|
||||
one, oneBlobs := generateTestBlockWithSidecars(t, [32]byte{}, 0, 3)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
one, oneBlobs := generateTestBlockWithSidecars(t, [32]byte{}, ds, 3)
|
||||
r1, err := one.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
two, twoBlobs := generateTestBlockWithSidecars(t, r1, 1, 3)
|
||||
two, twoBlobs := generateTestBlockWithSidecars(t, r1, ds+1, 3)
|
||||
r2, err := two.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
_, oops := generateTestBlockWithSidecars(t, r2, 0, 4)
|
||||
_, oops := generateTestBlockWithSidecars(t, r2, ds, 4)
|
||||
oops[1].SignedBlockHeader.Header.ParentRoot = bytesutil.PadTo([]byte("derp"), 32)
|
||||
wrongRoot, err := blocks.NewROBlobWithRoot(oops[2].BlobSidecar, bytesutil.ToBytes32([]byte("parentderp")))
|
||||
require.NoError(t, err)
|
||||
oob := oops[3]
|
||||
oob.Index = uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
oob.Index = uint64(params.BeaconConfig().MaxBlobsPerBlock(ds))
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -704,7 +704,7 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
|
||||
t.Run("single blob - Deneb", func(t *testing.T) {
|
||||
// Setup genesis such that we are currently in deneb.
|
||||
s := uint64(slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
s := uint64(util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
clock := startup.NewClock(time.Now().Add(-time.Second*time.Duration(s)), [32]byte{})
|
||||
ctxByte, err := ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
@@ -713,7 +713,7 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
// Set current slot to a deneb slot.
|
||||
slot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch + 1)
|
||||
slot := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch+1)
|
||||
// Create a simple handler that will return a valid response.
|
||||
p2.SetStreamHandler(topic, func(stream network.Stream) {
|
||||
defer func() {
|
||||
@@ -757,7 +757,7 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
// Setup genesis such that we are currently in deneb.
|
||||
s := uint64(slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
s := uint64(util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
clock := startup.NewClock(time.Now().Add(-time.Second*time.Duration(s)), [32]byte{})
|
||||
ctxByte, err := ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
@@ -766,7 +766,7 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
// Set current slot to the first slot of the last deneb epoch.
|
||||
slot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
slot := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
// Create a simple handler that will return a valid response.
|
||||
p2.SetStreamHandler(topic, func(stream network.Stream) {
|
||||
defer func() {
|
||||
@@ -825,7 +825,7 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
|
||||
s := uint64(slots.UnsafeEpochStart(params.BeaconConfig().ElectraForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
s := uint64(util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
clock := startup.NewClock(time.Now().Add(-time.Second*time.Duration(s)), [32]byte{})
|
||||
ctxByte, err := ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
@@ -834,7 +834,7 @@ func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
|
||||
slot := slots.UnsafeEpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
slot := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
// Create a simple handler that will return a valid response.
|
||||
p2.SetStreamHandler(topic, func(stream network.Stream) {
|
||||
defer func() {
|
||||
|
||||
@@ -812,7 +812,7 @@ func isDigestValid(digest [4]byte, clock *startup.Clock) (bool, error) {
|
||||
// In the event there is a fork the next epoch,
|
||||
// we skip the check, as we subscribe subnets an
|
||||
// epoch in advance.
|
||||
if params.DigestChangesAfter(current) {
|
||||
if params.NextNetworkScheduleEntry(current).Epoch == current+1 {
|
||||
return true, nil
|
||||
}
|
||||
return params.ForkDigest(current) == digest, nil
|
||||
|
||||
@@ -19,6 +19,7 @@ go_test(
|
||||
srcs = ["blob_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestBlobAlignsWithBlock(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
tests := []struct {
|
||||
name string
|
||||
blockAndBlob func(t *testing.T) (blocks.ROBlock, []blocks.ROBlob)
|
||||
@@ -19,13 +21,13 @@ func TestBlobAlignsWithBlock(t *testing.T) {
|
||||
{
|
||||
name: "happy path",
|
||||
blockAndBlob: func(t *testing.T) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1)
|
||||
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 1)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mismatched roots",
|
||||
blockAndBlob: func(t *testing.T) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1)
|
||||
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 1)
|
||||
tweaked := blobs[0].BlobSidecar
|
||||
tweaked.SignedBlockHeader.Header.Slot = tweaked.SignedBlockHeader.Header.Slot + 1
|
||||
tampered, err := blocks.NewROBlob(tweaked)
|
||||
@@ -37,7 +39,7 @@ func TestBlobAlignsWithBlock(t *testing.T) {
|
||||
{
|
||||
name: "mismatched roots - fake",
|
||||
blockAndBlob: func(t *testing.T) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1)
|
||||
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 1)
|
||||
copied := blobs[0].BlobSidecar
|
||||
// exact same header, mess with the root
|
||||
fake, err := blocks.NewROBlobWithRoot(copied, bytesutil.ToBytes32([]byte("derp")))
|
||||
|
||||
@@ -23,7 +23,8 @@ import (
|
||||
|
||||
func TestBlobIndexInBounds(t *testing.T) {
|
||||
ini := &Initializer{}
|
||||
_, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 1)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 1)
|
||||
b := blobs[0]
|
||||
// set Index to a value that is out of bounds
|
||||
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
|
||||
@@ -31,7 +32,8 @@ func TestBlobIndexInBounds(t *testing.T) {
|
||||
require.Equal(t, true, v.results.executed(RequireBlobIndexInBounds))
|
||||
require.NoError(t, v.results.result(RequireBlobIndexInBounds))
|
||||
|
||||
b.Index = uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
maxBlobs := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
b.Index = uint64(maxBlobs)
|
||||
v = ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
|
||||
require.ErrorIs(t, v.BlobIndexInBounds(), ErrBlobIndexInvalid)
|
||||
require.Equal(t, true, v.results.executed(RequireBlobIndexInBounds))
|
||||
|
||||
2
changelog/kasey_max-blobs-use-network-schedule.md
Normal file
2
changelog/kasey_max-blobs-use-network-schedule.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Switch implementation of get max blobs to use network schedule entry code to be consistent with other fork-related helpers.
|
||||
@@ -82,6 +82,7 @@ go_test(
|
||||
"//genesis:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -384,10 +383,12 @@ func (b *BeaconChainConfig) ApplyOptions(opts ...Option) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this needs to be able to return an error
|
||||
// InitializeForkSchedule initializes the schedules forks baked into the config.
|
||||
// InitializeForkSchedule initializes the scheduled forks and BPOs baked into the config.
|
||||
func (b *BeaconChainConfig) InitializeForkSchedule() {
|
||||
// Reset Fork Version Schedule.
|
||||
// TODO: this needs to be able to return an error. The network schedule code has
|
||||
// to implement weird fallbacks when it is not initialized properly, it would be better
|
||||
// if the beacon node could crash if there isn't a valid fork schedule
|
||||
// at the return of this function.
|
||||
b.ForkVersionSchedule = configForkSchedule(b)
|
||||
b.ForkVersionNames = configForkNames(b)
|
||||
b.forkSchedule = initForkSchedule(b)
|
||||
@@ -439,16 +440,18 @@ func (ns *NetworkSchedule) epochIdx(epoch primitives.Epoch) int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) safeIndex(idx int) NetworkScheduleEntry {
|
||||
if idx < 0 || len(ns.entries) == 0 {
|
||||
return genesisNetworkScheduleEntry()
|
||||
}
|
||||
if idx >= len(ns.entries) {
|
||||
return ns.entries[len(ns.entries)-1]
|
||||
}
|
||||
return ns.entries[idx]
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) Next(epoch primitives.Epoch) NetworkScheduleEntry {
|
||||
lastIdx := len(ns.entries) - 1
|
||||
idx := ns.epochIdx(epoch)
|
||||
if idx < 0 {
|
||||
return ns.entries[0]
|
||||
}
|
||||
if idx == lastIdx {
|
||||
return ns.entries[lastIdx]
|
||||
}
|
||||
return ns.entries[idx+1]
|
||||
return ns.safeIndex(ns.epochIdx(epoch) + 1)
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) LastEntry() NetworkScheduleEntry {
|
||||
@@ -457,38 +460,21 @@ func (ns *NetworkSchedule) LastEntry() NetworkScheduleEntry {
|
||||
return ns.entries[i]
|
||||
}
|
||||
}
|
||||
return ns.entries[0]
|
||||
return genesisNetworkScheduleEntry()
|
||||
}
|
||||
|
||||
// LastFork is the last full fork (this is used by e2e testing)
|
||||
func (ns *NetworkSchedule) LastFork() NetworkScheduleEntry {
|
||||
for i := len(ns.entries) - 1; i >= 0; i-- {
|
||||
if ns.entries[i].isFork {
|
||||
if ns.entries[i].isFork && ns.entries[i].Epoch != BeaconConfig().FarFutureEpoch {
|
||||
return ns.entries[i]
|
||||
}
|
||||
}
|
||||
return ns.entries[0]
|
||||
return genesisNetworkScheduleEntry()
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) ForEpoch(epoch primitives.Epoch) NetworkScheduleEntry {
|
||||
idx := ns.epochIdx(epoch)
|
||||
if idx < 0 {
|
||||
return ns.entries[0]
|
||||
}
|
||||
if idx >= len(ns.entries)-1 {
|
||||
return ns.entries[len(ns.entries)-1]
|
||||
}
|
||||
return ns.entries[idx]
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) activatedAt(epoch primitives.Epoch) (*NetworkScheduleEntry, bool) {
|
||||
ns.mu.RLock()
|
||||
defer ns.mu.RUnlock()
|
||||
if ns.byEpoch == nil {
|
||||
return nil, false
|
||||
}
|
||||
entry, ok := ns.byEpoch[epoch]
|
||||
return entry, ok
|
||||
func (ns *NetworkSchedule) forEpoch(epoch primitives.Epoch) NetworkScheduleEntry {
|
||||
return ns.safeIndex(ns.epochIdx(epoch))
|
||||
}
|
||||
|
||||
func (ns *NetworkSchedule) merge(other *NetworkSchedule) *NetworkSchedule {
|
||||
@@ -497,10 +483,15 @@ func (ns *NetworkSchedule) merge(other *NetworkSchedule) *NetworkSchedule {
|
||||
merged = append(merged, other.entries...)
|
||||
sort.Slice(merged, func(i, j int) bool {
|
||||
if merged[i].Epoch == merged[j].Epoch {
|
||||
if merged[i].VersionEnum == merged[j].VersionEnum {
|
||||
return merged[i].isFork
|
||||
// This can happen for 2 reasons:
|
||||
// 1) both entries are forks in a test setup (eg starting genesis at a later fork)
|
||||
// - break tie by version enum
|
||||
// 2) one entry is a fork, the other is a BPO change
|
||||
// - break tie by putting the fork first
|
||||
if merged[i].isFork && merged[j].isFork {
|
||||
return merged[i].VersionEnum < merged[j].VersionEnum
|
||||
}
|
||||
return merged[i].VersionEnum < merged[j].VersionEnum
|
||||
return merged[i].isFork
|
||||
}
|
||||
return merged[i].Epoch < merged[j].Epoch
|
||||
})
|
||||
@@ -702,52 +693,12 @@ func (b *BeaconChainConfig) TargetBlobsPerBlock(slot primitives.Slot) int {
|
||||
// MaxBlobsPerBlock returns the maximum number of blobs per block for the given slot.
|
||||
func (b *BeaconChainConfig) MaxBlobsPerBlock(slot primitives.Slot) int {
|
||||
epoch := primitives.Epoch(slot.DivSlot(b.SlotsPerEpoch))
|
||||
|
||||
if len(b.BlobSchedule) > 0 {
|
||||
if !slices.IsSortedFunc(b.BlobSchedule, func(a, b BlobScheduleEntry) int {
|
||||
return int(a.Epoch - b.Epoch)
|
||||
}) {
|
||||
slices.SortFunc(b.BlobSchedule, func(a, b BlobScheduleEntry) int {
|
||||
return int(a.Epoch - b.Epoch)
|
||||
})
|
||||
}
|
||||
|
||||
for i := len(b.BlobSchedule) - 1; i >= 0; i-- {
|
||||
if epoch >= b.BlobSchedule[i].Epoch {
|
||||
return int(b.BlobSchedule[i].MaxBlobsPerBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if epoch >= b.ElectraForkEpoch {
|
||||
return b.DeprecatedMaxBlobsPerBlockElectra
|
||||
}
|
||||
|
||||
return b.DeprecatedMaxBlobsPerBlock
|
||||
return b.MaxBlobsPerBlockAtEpoch(epoch)
|
||||
}
|
||||
|
||||
// MaxBlobsPerBlockAtEpoch returns the maximum number of blobs per block for the given epoch
|
||||
func (b *BeaconChainConfig) MaxBlobsPerBlockAtEpoch(epoch primitives.Epoch) int {
|
||||
if len(b.BlobSchedule) > 0 {
|
||||
if !slices.IsSortedFunc(b.BlobSchedule, func(a, b BlobScheduleEntry) int {
|
||||
return int(a.Epoch - b.Epoch)
|
||||
}) {
|
||||
slices.SortFunc(b.BlobSchedule, func(a, b BlobScheduleEntry) int {
|
||||
return int(a.Epoch - b.Epoch)
|
||||
})
|
||||
}
|
||||
|
||||
for i := len(b.BlobSchedule) - 1; i >= 0; i-- {
|
||||
if epoch >= b.BlobSchedule[i].Epoch {
|
||||
return int(b.BlobSchedule[i].MaxBlobsPerBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if epoch >= b.ElectraForkEpoch {
|
||||
return b.DeprecatedMaxBlobsPerBlockElectra
|
||||
}
|
||||
return b.DeprecatedMaxBlobsPerBlock
|
||||
return int(b.networkSchedule.forEpoch(epoch).MaxBlobsPerBlock)
|
||||
}
|
||||
|
||||
// DenebEnabled centralizes the check to determine if code paths that are specific to deneb should be allowed to execute.
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
@@ -109,75 +110,80 @@ func TestConfigGenesisValidatorRoot(t *testing.T) {
|
||||
require.Equal(t, params.BeaconConfig().GenesisValidatorsRoot, genesis.ValidatorsRoot())
|
||||
}
|
||||
|
||||
func TestMaxBlobsPerBlock(t *testing.T) {
|
||||
t.Run("Before all forks and no BlobSchedule", func(t *testing.T) {
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.BlobSchedule = nil
|
||||
cfg.ElectraForkEpoch = 100
|
||||
cfg.FuluForkEpoch = 200
|
||||
require.Equal(t, cfg.MaxBlobsPerBlock(0), cfg.DeprecatedMaxBlobsPerBlock)
|
||||
})
|
||||
func TestMaxBlobsJumbled(t *testing.T) {
|
||||
params.SetActiveTestCleanup(t, params.MainnetBeaconConfig)
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4098*2
|
||||
electraMaxBlobs := uint64(cfg.DeprecatedMaxBlobsPerBlockElectra)
|
||||
offsets := []primitives.Epoch{cfg.FuluForkEpoch}
|
||||
for _, offset := range []primitives.Epoch{320, 640, 960, 1080} {
|
||||
offsets = append(offsets, cfg.FuluForkEpoch+offset)
|
||||
}
|
||||
maxBlobs := map[primitives.Epoch]uint64{
|
||||
cfg.FuluForkEpoch: electraMaxBlobs,
|
||||
offsets[0]: electraMaxBlobs + 3,
|
||||
offsets[1]: electraMaxBlobs + 6,
|
||||
offsets[2]: electraMaxBlobs + 9,
|
||||
offsets[3]: electraMaxBlobs + 12,
|
||||
}
|
||||
schedule := make([]params.BlobScheduleEntry, 0, len(maxBlobs))
|
||||
for _, epoch := range offsets[1:] {
|
||||
schedule = append(schedule, params.BlobScheduleEntry{Epoch: epoch, MaxBlobsPerBlock: maxBlobs[epoch]})
|
||||
}
|
||||
cfg.BlobSchedule = schedule
|
||||
cfg.InitializeForkSchedule()
|
||||
for i := 1; i < len(cfg.BlobSchedule); i++ {
|
||||
beforeEpoch, epoch := cfg.BlobSchedule[i-1].Epoch, cfg.BlobSchedule[i].Epoch
|
||||
before, after := maxBlobs[beforeEpoch], maxBlobs[epoch]
|
||||
require.Equal(t, before, uint64(cfg.MaxBlobsPerBlockAtEpoch(epoch-1)))
|
||||
require.Equal(t, after, uint64(cfg.MaxBlobsPerBlockAtEpoch(epoch)))
|
||||
beforeSlot, err := cfg.SlotsPerEpoch.SafeMul(uint64(beforeEpoch))
|
||||
require.NoError(t, err)
|
||||
afterSlot, err := cfg.SlotsPerEpoch.SafeMul(uint64(epoch))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, before, uint64(cfg.MaxBlobsPerBlock(beforeSlot)))
|
||||
require.Equal(t, after, uint64(cfg.MaxBlobsPerBlock(afterSlot)))
|
||||
}
|
||||
|
||||
t.Run("Uses latest matching BlobSchedule entry", func(t *testing.T) {
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 5, MaxBlobsPerBlock: 7},
|
||||
{Epoch: 10, MaxBlobsPerBlock: 11},
|
||||
}
|
||||
slot := 11 * cfg.SlotsPerEpoch
|
||||
require.Equal(t, cfg.MaxBlobsPerBlock(slot), 11)
|
||||
})
|
||||
require.Equal(t, electraMaxBlobs, uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.FuluForkEpoch-1)))
|
||||
require.Equal(t, electraMaxBlobs, uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.ElectraForkEpoch)))
|
||||
require.Equal(t, cfg.DeprecatedMaxBlobsPerBlock, cfg.MaxBlobsPerBlockAtEpoch(cfg.ElectraForkEpoch-1))
|
||||
require.Equal(t, cfg.DeprecatedMaxBlobsPerBlock, cfg.MaxBlobsPerBlockAtEpoch(cfg.DenebForkEpoch))
|
||||
preBlobEpochs := []primitives.Epoch{cfg.DenebForkEpoch - 1, cfg.CapellaForkEpoch, cfg.BellatrixForkEpoch, cfg.AltairForkEpoch, 0}
|
||||
for _, epoch := range preBlobEpochs {
|
||||
require.Equal(t, 0, cfg.MaxBlobsPerBlockAtEpoch(epoch))
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("Uses earlier matching BlobSchedule entry", func(t *testing.T) {
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 5, MaxBlobsPerBlock: 7},
|
||||
{Epoch: 10, MaxBlobsPerBlock: 11},
|
||||
}
|
||||
slot := 6 * cfg.SlotsPerEpoch
|
||||
require.Equal(t, cfg.MaxBlobsPerBlock(slot), 7)
|
||||
})
|
||||
func TestFirstBPOAtFork(t *testing.T) {
|
||||
params.SetActiveTestCleanup(t, params.MainnetBeaconConfig)
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.FuluForkEpoch = cfg.ElectraForkEpoch + 4096*2
|
||||
electraMaxBlobs := uint64(cfg.DeprecatedMaxBlobsPerBlockElectra)
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: cfg.FuluForkEpoch, MaxBlobsPerBlock: electraMaxBlobs + 1},
|
||||
{Epoch: cfg.FuluForkEpoch + 1, MaxBlobsPerBlock: electraMaxBlobs + 2},
|
||||
}
|
||||
cfg.InitializeForkSchedule()
|
||||
require.Equal(t, electraMaxBlobs, uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.FuluForkEpoch-1)))
|
||||
require.Equal(t, electraMaxBlobs+1, uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.FuluForkEpoch)))
|
||||
require.Equal(t, electraMaxBlobs+2, uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.FuluForkEpoch+2)))
|
||||
}
|
||||
|
||||
t.Run("Before first BlobSchedule entry falls back to fork logic", func(t *testing.T) {
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.FuluForkEpoch = 1
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 5, MaxBlobsPerBlock: 7},
|
||||
}
|
||||
slot := primitives.Slot(2) // Epoch 0
|
||||
require.Equal(t, cfg.MaxBlobsPerBlock(slot), cfg.DeprecatedMaxBlobsPerBlock)
|
||||
})
|
||||
|
||||
t.Run("Unsorted BlobSchedule still picks latest matching entry", func(t *testing.T) {
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 10, MaxBlobsPerBlock: 11},
|
||||
{Epoch: 5, MaxBlobsPerBlock: 7},
|
||||
}
|
||||
slot := 11 * cfg.SlotsPerEpoch
|
||||
require.Equal(t, cfg.MaxBlobsPerBlock(slot), 11)
|
||||
})
|
||||
|
||||
t.Run("Unsorted BlobSchedule picks earlier matching entry correctly", func(t *testing.T) {
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 10, MaxBlobsPerBlock: 11},
|
||||
{Epoch: 5, MaxBlobsPerBlock: 7},
|
||||
}
|
||||
slot := 6 * cfg.SlotsPerEpoch
|
||||
require.Equal(t, cfg.MaxBlobsPerBlock(slot), 7)
|
||||
})
|
||||
|
||||
t.Run("Unsorted BlobSchedule falls back to fork logic when epoch is before all entries", func(t *testing.T) {
|
||||
cfg := params.MainnetConfig()
|
||||
cfg.ElectraForkEpoch = 2
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 10, MaxBlobsPerBlock: 11},
|
||||
{Epoch: 5, MaxBlobsPerBlock: 7},
|
||||
}
|
||||
slot := primitives.Slot(1) // Epoch 0
|
||||
require.Equal(t, cfg.MaxBlobsPerBlock(slot), cfg.DeprecatedMaxBlobsPerBlock)
|
||||
})
|
||||
func TestMaxBlobsNoSchedule(t *testing.T) {
|
||||
params.SetActiveTestCleanup(t, params.MainnetBeaconConfig)
|
||||
cfg := params.MainnetConfig()
|
||||
electraMaxBlobs := uint64(cfg.DeprecatedMaxBlobsPerBlockElectra)
|
||||
cfg.BlobSchedule = nil
|
||||
cfg.InitializeForkSchedule()
|
||||
require.Equal(t, electraMaxBlobs, uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.FuluForkEpoch-1)))
|
||||
require.Equal(t, electraMaxBlobs, uint64(cfg.MaxBlobsPerBlockAtEpoch(cfg.ElectraForkEpoch)))
|
||||
require.Equal(t, cfg.DeprecatedMaxBlobsPerBlock, cfg.MaxBlobsPerBlockAtEpoch(cfg.ElectraForkEpoch-1))
|
||||
require.Equal(t, cfg.DeprecatedMaxBlobsPerBlock, cfg.MaxBlobsPerBlockAtEpoch(cfg.DenebForkEpoch))
|
||||
preBlobEpochs := []primitives.Epoch{cfg.DenebForkEpoch - 1, cfg.CapellaForkEpoch, cfg.BellatrixForkEpoch, cfg.AltairForkEpoch, 0}
|
||||
for _, epoch := range preBlobEpochs {
|
||||
require.Equal(t, 0, cfg.MaxBlobsPerBlockAtEpoch(epoch))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_TargetBlobCount(t *testing.T) {
|
||||
@@ -287,3 +293,15 @@ func TestFarFuturePrepareFilter(t *testing.T) {
|
||||
entry := params.GetNetworkScheduleEntry(oldElectra)
|
||||
require.Equal(t, [4]byte(params.BeaconConfig().DenebForkVersion), entry.ForkVersion)
|
||||
}
|
||||
|
||||
func TestMaxBlobsOverrideEpoch(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
require.Equal(t, 0, cfg.MaxBlobsPerBlockAtEpoch(0))
|
||||
params.SetGenesisFork(t, cfg, version.Deneb)
|
||||
require.Equal(t, cfg.DeprecatedMaxBlobsPerBlock, cfg.MaxBlobsPerBlockAtEpoch(0))
|
||||
params.SetGenesisFork(t, cfg, version.Electra)
|
||||
require.Equal(t, cfg.DeprecatedMaxBlobsPerBlockElectra, cfg.MaxBlobsPerBlockAtEpoch(0))
|
||||
params.SetGenesisFork(t, cfg, version.Fulu)
|
||||
require.Equal(t, cfg.DeprecatedMaxBlobsPerBlockElectra, cfg.MaxBlobsPerBlockAtEpoch(0))
|
||||
}
|
||||
|
||||
@@ -4,19 +4,14 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DigestChangesAfter checks if an allotted fork is in the following epoch.
|
||||
func DigestChangesAfter(e primitives.Epoch) bool {
|
||||
_, ok := BeaconConfig().networkSchedule.activatedAt(e + 1)
|
||||
return ok
|
||||
}
|
||||
|
||||
// ForkDigestUsingConfig retrieves the fork digest from the current schedule determined
|
||||
// by the provided epoch.
|
||||
func ForkDigestUsingConfig(epoch primitives.Epoch, cfg *BeaconChainConfig) [4]byte {
|
||||
entry := cfg.networkSchedule.ForEpoch(epoch)
|
||||
entry := cfg.networkSchedule.forEpoch(epoch)
|
||||
return entry.ForkDigest
|
||||
}
|
||||
|
||||
@@ -42,10 +37,10 @@ func Fork(epoch primitives.Epoch) (*ethpb.Fork, error) {
|
||||
}
|
||||
|
||||
func ForkFromConfig(cfg *BeaconChainConfig, epoch primitives.Epoch) *ethpb.Fork {
|
||||
current := cfg.networkSchedule.ForEpoch(epoch)
|
||||
current := cfg.networkSchedule.forEpoch(epoch)
|
||||
previous := current
|
||||
if current.Epoch > 0 {
|
||||
previous = cfg.networkSchedule.ForEpoch(current.Epoch - 1)
|
||||
previous = cfg.networkSchedule.forEpoch(current.Epoch - 1)
|
||||
}
|
||||
return ðpb.Fork{
|
||||
PreviousVersion: previous.ForkVersion[:],
|
||||
@@ -102,11 +97,17 @@ func LastForkEpoch() primitives.Epoch {
|
||||
}
|
||||
|
||||
func LastNetworkScheduleEntry() NetworkScheduleEntry {
|
||||
lastIdx := len(BeaconConfig().networkSchedule.entries) - 1
|
||||
return BeaconConfig().networkSchedule.entries[lastIdx]
|
||||
return BeaconConfig().networkSchedule.LastEntry()
|
||||
}
|
||||
|
||||
func GetNetworkScheduleEntry(epoch primitives.Epoch) NetworkScheduleEntry {
|
||||
entry := BeaconConfig().networkSchedule.ForEpoch(epoch)
|
||||
entry := BeaconConfig().networkSchedule.forEpoch(epoch)
|
||||
return entry
|
||||
}
|
||||
|
||||
func genesisNetworkScheduleEntry() NetworkScheduleEntry {
|
||||
b := BeaconConfig()
|
||||
// TODO: note this has a zero digest, but we would never hit this fallback condition on
|
||||
// a properly initialized fork schedule.
|
||||
return NetworkScheduleEntry{Epoch: b.GenesisEpoch, isFork: true, ForkVersion: to4(b.GenesisForkVersion), VersionEnum: version.Phase0}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
@@ -93,15 +92,6 @@ func TestRetrieveForkDataFromDigest(t *testing.T) {
|
||||
require.Equal(t, params.BeaconConfig().AltairForkEpoch, epoch)
|
||||
}
|
||||
|
||||
func TestIsForkNextEpoch(t *testing.T) {
|
||||
// at
|
||||
assert.Equal(t, false, params.DigestChangesAfter(params.BeaconConfig().ElectraForkEpoch))
|
||||
// just before
|
||||
assert.Equal(t, true, params.DigestChangesAfter(params.BeaconConfig().ElectraForkEpoch-1))
|
||||
// just after
|
||||
assert.Equal(t, false, params.DigestChangesAfter(params.BeaconConfig().ElectraForkEpoch+1))
|
||||
}
|
||||
|
||||
func TestNextForkData(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
@@ -163,7 +153,9 @@ func TestNextForkData(t *testing.T) {
|
||||
func TestLastForkEpoch(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
require.Equal(t, cfg.ElectraForkEpoch, params.LastForkEpoch())
|
||||
if cfg.FuluForkEpoch == cfg.FarFutureEpoch {
|
||||
require.Equal(t, cfg.ElectraForkEpoch, params.LastForkEpoch())
|
||||
}
|
||||
}
|
||||
|
||||
func TestForkFromConfig_UsesPassedConfig(t *testing.T) {
|
||||
|
||||
@@ -2,12 +2,41 @@ package params
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
)
|
||||
|
||||
const (
|
||||
EnvNameOverrideAccept = "PRYSM_API_OVERRIDE_ACCEPT"
|
||||
)
|
||||
|
||||
func SetGenesisFork(t *testing.T, cfg *BeaconChainConfig, fork int) {
|
||||
setGenesisUpdateEpochs(cfg, fork)
|
||||
OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
func setGenesisUpdateEpochs(b *BeaconChainConfig, fork int) {
|
||||
switch fork {
|
||||
case version.Fulu:
|
||||
b.FuluForkEpoch = 0
|
||||
setGenesisUpdateEpochs(b, version.Electra)
|
||||
case version.Electra:
|
||||
b.ElectraForkEpoch = 0
|
||||
setGenesisUpdateEpochs(b, version.Deneb)
|
||||
case version.Deneb:
|
||||
b.DenebForkEpoch = 0
|
||||
setGenesisUpdateEpochs(b, version.Capella)
|
||||
case version.Capella:
|
||||
b.CapellaForkEpoch = 0
|
||||
setGenesisUpdateEpochs(b, version.Bellatrix)
|
||||
case version.Bellatrix:
|
||||
b.BellatrixForkEpoch = 0
|
||||
setGenesisUpdateEpochs(b, version.Altair)
|
||||
case version.Altair:
|
||||
b.AltairForkEpoch = 0
|
||||
}
|
||||
}
|
||||
|
||||
// SetupTestConfigCleanup preserves configurations allowing to modify them within tests without any
|
||||
// restrictions, everything is restored after the test.
|
||||
func SetupTestConfigCleanup(t testing.TB) {
|
||||
|
||||
@@ -49,6 +49,8 @@ func Run(t *testing.T, config string, fork int) {
|
||||
|
||||
func runTest(t *testing.T, config string, fork int, basePath string) { // nolint:gocognit
|
||||
require.NoError(t, utils.SetConfig(t, config))
|
||||
cfg := params.BeaconConfig()
|
||||
params.SetGenesisFork(t, cfg, fork)
|
||||
testFolders, _ := utils.TestFolders(t, config, version.String(fork), basePath)
|
||||
if len(testFolders) == 0 {
|
||||
t.Fatalf("No test folders found for %s/%s/%s", config, version.String(fork), basePath)
|
||||
|
||||
@@ -80,7 +80,8 @@ func runSingleMerkleProofTests(t *testing.T, config, forkOrPhase string, unmarsh
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if index < consensus_blocks.KZGOffset || index > uint64(consensus_blocks.KZGOffset+params.BeaconConfig().MaxBlobsPerBlock(0)) {
|
||||
maxBlobs := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(params.BeaconConfig().DenebForkEpoch)
|
||||
if index < consensus_blocks.KZGOffset || index > uint64(consensus_blocks.KZGOffset+maxBlobs) {
|
||||
return
|
||||
}
|
||||
localProof, err := consensus_blocks.MerkleProofKZGCommitment(body, int(index-consensus_blocks.KZGOffset))
|
||||
|
||||
@@ -29,10 +29,12 @@ go_library(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/spectest/utils:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -7,7 +7,9 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/spectest/utils"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -19,6 +21,10 @@ type SSZToBlockBody func([]byte) (interfaces.ReadOnlyBeaconBlockBody, error)
|
||||
|
||||
func RunExecutionPayloadTest(t *testing.T, config string, fork string, sszToBlockBody SSZToBlockBody, sszToState SSZToState) {
|
||||
require.NoError(t, utils.SetConfig(t, config))
|
||||
cfg := params.BeaconConfig()
|
||||
fv, err := version.FromString(fork)
|
||||
require.NoError(t, err)
|
||||
params.SetGenesisFork(t, cfg, fv)
|
||||
testFolders, testsFolderPath := utils.TestFolders(t, config, fork, "operations/execution_payload/pyspec_tests")
|
||||
if len(testFolders) == 0 {
|
||||
t.Fatalf("No test folders found for %s/%s/%s", config, fork, "operations/execution_payload/pyspec_tests")
|
||||
|
||||
@@ -15,8 +15,10 @@ go_library(
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/spectest/utils:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -12,8 +12,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/spectest/utils"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -29,6 +31,8 @@ func init() {
|
||||
// RunBlockProcessingTest executes "sanity/blocks" tests.
|
||||
func RunBlockProcessingTest(t *testing.T, config, folderPath string) {
|
||||
require.NoError(t, utils.SetConfig(t, config))
|
||||
cfg := params.BeaconConfig()
|
||||
params.SetGenesisFork(t, cfg, version.Deneb)
|
||||
|
||||
testFolders, testsFolderPath := utils.TestFolders(t, config, "deneb", folderPath)
|
||||
for _, folder := range testFolders {
|
||||
|
||||
@@ -27,6 +27,7 @@ go_library(
|
||||
"lightclient.go",
|
||||
"logging.go",
|
||||
"merge.go",
|
||||
"slot.go",
|
||||
"state.go",
|
||||
"sync_aggregate.go",
|
||||
"sync_committee.go",
|
||||
|
||||
@@ -50,6 +50,12 @@ func WithPayloadSetter(p *enginev1.ExecutionPayloadDeneb) DenebBlockGeneratorOpt
|
||||
}
|
||||
}
|
||||
|
||||
func WithDenebSlot(slot primitives.Slot) DenebBlockGeneratorOption {
|
||||
return func(g *denebBlockGenerator) {
|
||||
g.slot = slot
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateTestDenebBlockWithSidecar(t *testing.T, parent [32]byte, slot primitives.Slot, nblobs int, opts ...DenebBlockGeneratorOption) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
g := &denebBlockGenerator{
|
||||
parent: parent,
|
||||
@@ -178,9 +184,11 @@ func fakeEmptyProof(_ *testing.T, _ *ethpb.BlobSidecar) [][]byte {
|
||||
}
|
||||
|
||||
func ExtendBlocksPlusBlobs(t *testing.T, blks []blocks.ROBlock, size int) ([]blocks.ROBlock, []blocks.ROBlob) {
|
||||
deneb := params.BeaconConfig().DenebForkEpoch
|
||||
denebSlot := SlotAtEpoch(t, deneb)
|
||||
blobs := make([]blocks.ROBlob, 0)
|
||||
if len(blks) == 0 {
|
||||
blk, blb := GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 6)
|
||||
blk, blb := GenerateTestDenebBlockWithSidecar(t, [32]byte{}, denebSlot, 6)
|
||||
blobs = append(blobs, blb...)
|
||||
blks = append(blks, blk)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,8 @@ import (
|
||||
)
|
||||
|
||||
func TestInclusionProofs(t *testing.T) {
|
||||
_, blobs := GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
ds := SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, blobs := GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, params.BeaconConfig().MaxBlobsPerBlock(ds))
|
||||
for i := range blobs {
|
||||
require.NoError(t, blocks.VerifyKZGInclusionProof(blobs[i]))
|
||||
}
|
||||
|
||||
@@ -18,8 +18,17 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type ElectraStateOption func(*ethpb.BeaconStateElectra) error
|
||||
|
||||
func WithElectraStateSlot(slot primitives.Slot) ElectraStateOption {
|
||||
return func(s *ethpb.BeaconStateElectra) error {
|
||||
s.Slot = slot
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeterministicGenesisStateElectra returns a genesis state in Electra format made using the deterministic deposits.
|
||||
func DeterministicGenesisStateElectra(t testing.TB, numValidators uint64) (state.BeaconState, []bls.SecretKey) {
|
||||
func DeterministicGenesisStateElectra(t testing.TB, numValidators uint64, opts ...ElectraStateOption) (state.BeaconState, []bls.SecretKey) {
|
||||
deposits, privKeys, err := DeterministicDepositsAndKeys(numValidators)
|
||||
if err != nil {
|
||||
t.Fatal(errors.Wrapf(err, "failed to get %d deposits", numValidators))
|
||||
@@ -28,7 +37,7 @@ func DeterministicGenesisStateElectra(t testing.TB, numValidators uint64) (state
|
||||
if err != nil {
|
||||
t.Fatal(errors.Wrapf(err, "failed to get eth1data for %d deposits", numValidators))
|
||||
}
|
||||
beaconState, err := genesisBeaconStateElectra(t.Context(), deposits, uint64(0), eth1Data)
|
||||
beaconState, err := genesisBeaconStateElectra(t.Context(), deposits, uint64(0), eth1Data, opts...)
|
||||
if err != nil {
|
||||
t.Fatal(errors.Wrapf(err, "failed to get genesis beacon state of %d validators", numValidators))
|
||||
}
|
||||
@@ -51,7 +60,7 @@ func setKeysToActive(beaconState state.BeaconState) error {
|
||||
}
|
||||
|
||||
// genesisBeaconStateElectra returns the genesis beacon state.
|
||||
func genesisBeaconStateElectra(ctx context.Context, deposits []*ethpb.Deposit, genesisTime uint64, eth1Data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
func genesisBeaconStateElectra(ctx context.Context, deposits []*ethpb.Deposit, genesisTime uint64, eth1Data *ethpb.Eth1Data, opts ...ElectraStateOption) (state.BeaconState, error) {
|
||||
st, err := emptyGenesisStateElectra()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -68,7 +77,7 @@ func genesisBeaconStateElectra(ctx context.Context, deposits []*ethpb.Deposit, g
|
||||
return nil, errors.Wrap(err, "could not process validator deposits")
|
||||
}
|
||||
|
||||
return buildGenesisBeaconStateElectra(genesisTime, st, st.Eth1Data())
|
||||
return buildGenesisBeaconStateElectra(genesisTime, st, st.Eth1Data(), opts...)
|
||||
}
|
||||
|
||||
// emptyGenesisStateElectra returns an empty genesis state in Electra format.
|
||||
@@ -105,7 +114,7 @@ func emptyGenesisStateElectra() (state.BeaconState, error) {
|
||||
return state_native.InitializeFromProtoElectra(st)
|
||||
}
|
||||
|
||||
func buildGenesisBeaconStateElectra(genesisTime uint64, preState state.BeaconState, eth1Data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
func buildGenesisBeaconStateElectra(genesisTime uint64, preState state.BeaconState, eth1Data *ethpb.Eth1Data, opts ...ElectraStateOption) (state.BeaconState, error) {
|
||||
if eth1Data == nil {
|
||||
return nil, errors.New("no eth1data provided for genesis state")
|
||||
}
|
||||
@@ -213,6 +222,11 @@ func buildGenesisBeaconStateElectra(genesisTime uint64, preState state.BeaconSta
|
||||
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
|
||||
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(st); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var scBits [fieldparams.SyncAggregateSyncCommitteeBytesLength]byte
|
||||
bodyRoot, err := (ðpb.BeaconBlockBodyElectra{
|
||||
|
||||
15
testing/util/slot.go
Normal file
15
testing/util/slot.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
)
|
||||
|
||||
func SlotAtEpoch(t *testing.T, e primitives.Epoch) primitives.Slot {
|
||||
s, err := slots.EpochStart(e)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}
|
||||
Reference in New Issue
Block a user