mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
adding test for blob parameter only and increased blob limits
This commit is contained in:
@@ -62,6 +62,9 @@ func E2ETestConfig() *BeaconChainConfig {
|
||||
e2eConfig.BlobSchedule = []BlobScheduleEntry{
|
||||
{Epoch: e2eConfig.DenebForkEpoch, MaxBlobsPerBlock: uint64(e2eConfig.DeprecatedMaxBlobsPerBlock)},
|
||||
{Epoch: e2eConfig.ElectraForkEpoch, MaxBlobsPerBlock: uint64(e2eConfig.DeprecatedMaxBlobsPerBlockElectra)},
|
||||
// BPO (Blob Parameter Optimization) schedule for Fulu
|
||||
{Epoch: e2eConfig.FuluForkEpoch + 1, MaxBlobsPerBlock: 15},
|
||||
{Epoch: e2eConfig.FuluForkEpoch + 2, MaxBlobsPerBlock: 21},
|
||||
}
|
||||
|
||||
e2eConfig.InitializeForkSchedule()
|
||||
@@ -117,6 +120,9 @@ func E2EMainnetTestConfig() *BeaconChainConfig {
|
||||
e2eConfig.BlobSchedule = []BlobScheduleEntry{
|
||||
{Epoch: e2eConfig.DenebForkEpoch, MaxBlobsPerBlock: uint64(e2eConfig.DeprecatedMaxBlobsPerBlock)},
|
||||
{Epoch: e2eConfig.ElectraForkEpoch, MaxBlobsPerBlock: uint64(e2eConfig.DeprecatedMaxBlobsPerBlockElectra)},
|
||||
// BPO (Blob Parameter Optimization) schedule for Fulu
|
||||
{Epoch: e2eConfig.FuluForkEpoch + 1, MaxBlobsPerBlock: 15},
|
||||
{Epoch: e2eConfig.FuluForkEpoch + 2, MaxBlobsPerBlock: 21},
|
||||
}
|
||||
|
||||
e2eConfig.InitializeForkSchedule()
|
||||
|
||||
@@ -128,6 +128,45 @@ func GethOsakaTime(genesisTime time.Time, cfg *clparams.BeaconChainConfig) *uint
|
||||
return osakaTime
|
||||
}
|
||||
|
||||
// GethBPO1Time calculates the absolute time of the BPO1 activation
|
||||
// by finding the first BlobSchedule entry with MaxBlobsPerBlock > 9 (Electra's limit)
|
||||
// which corresponds to the first BPO increase.
|
||||
func GethBPO1Time(genesisTime time.Time, cfg *clparams.BeaconChainConfig) *uint64 {
|
||||
for _, entry := range cfg.BlobSchedule {
|
||||
// BPO1 is the first entry that increases beyond Electra's 9 blobs
|
||||
if entry.MaxBlobsPerBlock > uint64(cfg.DeprecatedMaxBlobsPerBlockElectra) {
|
||||
startSlot, err := slots.EpochStart(entry.Epoch)
|
||||
if err == nil {
|
||||
startTime := slots.UnsafeStartTime(genesisTime, startSlot)
|
||||
newTime := uint64(startTime.Unix())
|
||||
return &newTime
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GethBPO2Time calculates the absolute time of the BPO2 activation
|
||||
// by finding the second BlobSchedule entry with MaxBlobsPerBlock > 9.
|
||||
func GethBPO2Time(genesisTime time.Time, cfg *clparams.BeaconChainConfig) *uint64 {
|
||||
count := 0
|
||||
for _, entry := range cfg.BlobSchedule {
|
||||
// Count entries that are beyond Electra's limit
|
||||
if entry.MaxBlobsPerBlock > uint64(cfg.DeprecatedMaxBlobsPerBlockElectra) {
|
||||
count++
|
||||
if count == 2 { // BPO2 is the second such entry
|
||||
startSlot, err := slots.EpochStart(entry.Epoch)
|
||||
if err == nil {
|
||||
startTime := slots.UnsafeStartTime(genesisTime, startSlot)
|
||||
newTime := uint64(startTime.Unix())
|
||||
return &newTime
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GethTestnetGenesis creates a genesis.json for eth1 clients with a set of defaults suitable for ephemeral testnets,
|
||||
// like in an e2e test. The parameters are minimal but the full value is returned unmarshaled so that it can be
|
||||
// customized as desired.
|
||||
@@ -149,6 +188,8 @@ func GethTestnetGenesis(genesis time.Time, cfg *clparams.BeaconChainConfig) *cor
|
||||
if cfg.FuluForkEpoch == 0 {
|
||||
osakaTime = &genesisTime
|
||||
}
|
||||
bpo1Time := GethBPO1Time(genesis, cfg)
|
||||
bpo2Time := GethBPO2Time(genesis, cfg)
|
||||
cc := ¶ms.ChainConfig{
|
||||
ChainID: big.NewInt(defaultTestChainId),
|
||||
HomesteadBlock: bigz,
|
||||
@@ -171,26 +212,16 @@ func GethTestnetGenesis(genesis time.Time, cfg *clparams.BeaconChainConfig) *cor
|
||||
CancunTime: cancunTime,
|
||||
PragueTime: pragueTime,
|
||||
OsakaTime: osakaTime,
|
||||
BPO1Time: bpo1Time,
|
||||
BPO2Time: bpo2Time,
|
||||
DepositContractAddress: common.HexToAddress(cfg.DepositContractAddress),
|
||||
BlobScheduleConfig: ¶ms.BlobScheduleConfig{
|
||||
Cancun: params.DefaultCancunBlobConfig,
|
||||
Prague: params.DefaultPragueBlobConfig,
|
||||
Osaka: params.DefaultOsakaBlobConfig,
|
||||
BPO1: ¶ms.BlobConfig{
|
||||
Target: 9,
|
||||
Max: 14,
|
||||
UpdateFraction: 8832827,
|
||||
},
|
||||
BPO2: ¶ms.BlobConfig{
|
||||
Target: 14,
|
||||
Max: 21,
|
||||
UpdateFraction: 13739630,
|
||||
},
|
||||
BPO3: ¶ms.BlobConfig{
|
||||
Target: 21,
|
||||
Max: 32,
|
||||
UpdateFraction: 20609697,
|
||||
},
|
||||
BPO1: params.DefaultBPO1BlobConfig,
|
||||
BPO2: params.DefaultBPO2BlobConfig,
|
||||
BPO3: params.DefaultBPO3BlobConfig,
|
||||
},
|
||||
}
|
||||
da := defaultDepositContractAllocation(cfg.DepositContractAddress)
|
||||
|
||||
@@ -3,6 +3,7 @@ package eth1
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/endtoend/params"
|
||||
@@ -31,10 +32,6 @@ var _ e2etypes.EngineProxy = (*Proxy)(nil)
|
||||
|
||||
// WaitForBlocks waits for a certain amount of blocks to be mined by the ETH1 chain before returning.
|
||||
func WaitForBlocks(ctx context.Context, web3 *ethclient.Client, key *keystore.Key, blocksToWait uint64) error {
|
||||
nonce, err := web3.PendingNonceAt(ctx, key.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chainID, err := web3.NetworkID(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -49,19 +46,36 @@ func WaitForBlocks(ctx context.Context, web3 *ethclient.Client, key *keystore.Ke
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
// Get fresh nonce each iteration to handle any pending transactions
|
||||
nonce, err := web3.PendingNonceAt(ctx, key.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gasPrice, err := web3.SuggestGasPrice(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Bump gas price by 20% to ensure we can replace any pending transactions
|
||||
gasPrice = new(big.Int).Mul(gasPrice, big.NewInt(120))
|
||||
gasPrice = new(big.Int).Div(gasPrice, big.NewInt(100))
|
||||
|
||||
spamTX := types.NewTransaction(nonce, key.Address, big.NewInt(0), params.SpamTxGasLimit, gasPrice, []byte{})
|
||||
signed, err := types.SignTx(spamTX, types.NewEIP155Signer(chainID), key.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = web3.SendTransaction(ctx, signed); err != nil {
|
||||
// If replacement error, try again with next iteration which will get fresh nonce
|
||||
if strings.Contains(err.Error(), "replacement transaction underpriced") {
|
||||
time.Sleep(timeGapPerMiningTX)
|
||||
block, err = web3.BlockByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
nonce++
|
||||
time.Sleep(timeGapPerMiningTX)
|
||||
block, err = web3.BlockByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
|
||||
@@ -594,10 +594,14 @@ func encodeBlobs(data []byte) []kzg4844.Blob {
|
||||
blobIndex := 0
|
||||
fieldIndex := -1
|
||||
numOfElems := fieldparams.BlobLength / 32
|
||||
// Allow up to 6 blobs per transaction to properly test BPO limits.
|
||||
// With 10 blob txs per slot × 6 blobs = 60 max blobs submitted,
|
||||
// which exceeds the highest BPO limit (21) and ensures we can hit it.
|
||||
const maxBlobsPerTx = 6
|
||||
for i := 0; i < len(data); i += 31 {
|
||||
fieldIndex++
|
||||
if fieldIndex == numOfElems {
|
||||
if blobIndex >= 1 {
|
||||
if blobIndex >= maxBlobsPerTx-1 {
|
||||
break
|
||||
}
|
||||
blobs = append(blobs, kzg4844.Blob{})
|
||||
@@ -650,7 +654,12 @@ func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash {
|
||||
}
|
||||
|
||||
func randomBlobData() ([]byte, error) {
|
||||
size := mathRand.Intn(fieldparams.BlobSize) // #nosec G404
|
||||
// Always generate 6 blobs worth of data to properly test BPO limits.
|
||||
// With 10 blob txs per slot * 6 blobs = 60 blobs submitted per slot,
|
||||
// we can easily hit any BPO limit (up to 21 blobs per block).
|
||||
const numBlobs = 6
|
||||
// Generate enough data to fill all 6 blobs
|
||||
size := (numBlobs-1)*fieldparams.BlobSize + 1
|
||||
data := make([]byte, size)
|
||||
n, err := mathRand.Read(data) // #nosec G404
|
||||
if err != nil {
|
||||
|
||||
@@ -66,6 +66,10 @@ func e2eMinimal(t *testing.T, cfg *params.BeaconChainConfig, cfgo ...types.E2ECo
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.DenebForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.ElectraForkEpoch, ev.ElectraForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.FuluForkTransition)
|
||||
// Blob evaluators - run from Deneb onwards
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.BlobsIncludedInBlocks)
|
||||
// BPO (Blob Parameter Optimization) evaluator - runs from Fulu onwards
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.BlobLimitsRespected)
|
||||
|
||||
testConfig := &types.E2EConfig{
|
||||
BeaconFlags: []string{
|
||||
@@ -154,6 +158,11 @@ func e2eMainnet(t *testing.T, usePrysmSh, useMultiClient bool, cfg *params.Beaco
|
||||
evals = addIfForkSet(evals, cfg.CapellaForkEpoch, ev.CapellaForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.DenebForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.ElectraForkEpoch, ev.ElectraForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.FuluForkTransition)
|
||||
// Blob evaluators - run from Deneb onwards
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.BlobsIncludedInBlocks)
|
||||
// BPO (Blob Parameter Optimization) evaluator - runs from Fulu onwards
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.BlobLimitsRespected)
|
||||
|
||||
testConfig := &types.E2EConfig{
|
||||
BeaconFlags: []string{
|
||||
@@ -222,6 +231,11 @@ func scenarioEvals(cfg *params.BeaconChainConfig) []types.Evaluator {
|
||||
evals = addIfForkSet(evals, cfg.CapellaForkEpoch, ev.CapellaForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.DenebForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.ElectraForkEpoch, ev.ElectraForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.FuluForkTransition)
|
||||
// Blob evaluators - run from Deneb onwards
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.BlobsIncludedInBlocks)
|
||||
// BPO (Blob Parameter Optimization) evaluator - runs from Fulu onwards
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.BlobLimitsRespected)
|
||||
return evals
|
||||
}
|
||||
|
||||
@@ -243,5 +257,10 @@ func scenarioEvalsMulti(cfg *params.BeaconChainConfig) []types.Evaluator {
|
||||
evals = addIfForkSet(evals, cfg.CapellaForkEpoch, ev.CapellaForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.DenebForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.ElectraForkEpoch, ev.ElectraForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.FuluForkTransition)
|
||||
// Blob evaluators - run from Deneb onwards
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.BlobsIncludedInBlocks)
|
||||
// BPO (Blob Parameter Optimization) evaluator - runs from Fulu onwards
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.BlobLimitsRespected)
|
||||
return evals
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"blob_limits.go",
|
||||
"builder.go",
|
||||
"data.go",
|
||||
"execution_engine.go",
|
||||
|
||||
246
testing/endtoend/evaluators/blob_limits.go
Normal file
246
testing/endtoend/evaluators/blob_limits.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package evaluators
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/endtoend/policies"
|
||||
e2etypes "github.com/OffchainLabs/prysm/v7/testing/endtoend/types"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// Minimum blob utilization percentage required to pass the evaluator.
|
||||
// We expect the transaction generator to produce enough blobs to hit at least this threshold.
|
||||
const minBlobUtilizationPercent = 50
|
||||
|
||||
// BlobsIncludedInBlocks verifies that blocks contain blobs and that the blob count
|
||||
// does not exceed the maximum allowed for the epoch (respecting BPO schedule).
|
||||
var BlobsIncludedInBlocks = e2etypes.Evaluator{
|
||||
Name: "blobs_included_in_blocks_epoch_%d",
|
||||
Policy: func(currentEpoch primitives.Epoch) bool {
|
||||
// Only run from Deneb onwards
|
||||
return policies.OnwardsNthEpoch(params.BeaconConfig().DenebForkEpoch)(currentEpoch)
|
||||
},
|
||||
Evaluation: blobsIncludedInBlocks,
|
||||
}
|
||||
|
||||
// BlobLimitsRespected verifies that the BPO (Blob Parameter Optimization) limits
|
||||
// are correctly enforced after Fulu fork, checking that:
|
||||
// 1. Blocks don't exceed MaxBlobsPerBlock for their respective epoch
|
||||
// 2. We're utilizing the increased capacity (at least 50% utilization)
|
||||
// 3. When BPO activates (limit increases), blocks actually use the new higher limit
|
||||
var BlobLimitsRespected = e2etypes.Evaluator{
|
||||
Name: "blob_limits_respected_epoch_%d",
|
||||
Policy: func(currentEpoch primitives.Epoch) bool {
|
||||
// Only run from Fulu onwards where BPO is active
|
||||
return policies.OnwardsNthEpoch(params.BeaconConfig().FuluForkEpoch)(currentEpoch)
|
||||
},
|
||||
Evaluation: blobLimitsRespected,
|
||||
}
|
||||
|
||||
func blobsIncludedInBlocks(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
|
||||
chainHead, err := client.GetChainHead(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get chain head")
|
||||
}
|
||||
|
||||
// Check blocks from the previous epoch
|
||||
epoch := chainHead.HeadEpoch
|
||||
if epoch > 0 {
|
||||
epoch = epoch - 1
|
||||
}
|
||||
|
||||
// Skip if we're before Deneb
|
||||
if epoch < params.BeaconConfig().DenebForkEpoch {
|
||||
return nil
|
||||
}
|
||||
|
||||
req := ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: epoch}}
|
||||
blks, err := client.ListBeaconBlocks(context.Background(), req)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blocks from beacon-chain")
|
||||
}
|
||||
|
||||
blocksWithBlobs := 0
|
||||
for _, ctr := range blks.BlockContainers {
|
||||
blk, err := blocks.BeaconBlockContainerToSignedBeaconBlock(ctr)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to convert block container to signed beacon block")
|
||||
}
|
||||
|
||||
if blk == nil || blk.IsNil() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip blocks before Deneb
|
||||
if blk.Version() < version.Deneb {
|
||||
continue
|
||||
}
|
||||
|
||||
commitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blob kzg commitments")
|
||||
}
|
||||
|
||||
if len(commitments) > 0 {
|
||||
blocksWithBlobs++
|
||||
}
|
||||
}
|
||||
|
||||
// We expect at least some blocks to have blobs since we're sending blob transactions
|
||||
if blocksWithBlobs == 0 {
|
||||
return errors.Errorf("no blocks with blobs found in epoch %d", epoch)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func blobLimitsRespected(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
nodeClient := ethpb.NewNodeClient(conn)
|
||||
beaconClient := ethpb.NewBeaconChainClient(conn)
|
||||
|
||||
genesis, err := nodeClient.GetGenesis(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get genesis")
|
||||
}
|
||||
|
||||
currSlot := slots.CurrentSlot(genesis.GenesisTime.AsTime())
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
|
||||
// Check the previous epoch to ensure blocks are finalized
|
||||
epochToCheck := currEpoch
|
||||
if epochToCheck > 0 {
|
||||
epochToCheck = epochToCheck - 1
|
||||
}
|
||||
|
||||
// Skip if we're before Fulu
|
||||
if epochToCheck < params.BeaconConfig().FuluForkEpoch {
|
||||
return nil
|
||||
}
|
||||
|
||||
req := ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: epochToCheck}}
|
||||
blks, err := beaconClient.ListBeaconBlocks(context.Background(), req)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blocks from beacon-chain")
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
maxBlobsForEpoch := cfg.MaxBlobsPerBlockAtEpoch(epochToCheck)
|
||||
|
||||
// Get the previous epoch's limit to detect BPO transitions
|
||||
var prevEpochMaxBlobs int
|
||||
if epochToCheck > 0 {
|
||||
prevEpochMaxBlobs = cfg.MaxBlobsPerBlockAtEpoch(epochToCheck - 1)
|
||||
}
|
||||
|
||||
// Check if this is a BPO transition epoch (limit increased from previous epoch)
|
||||
isBPOTransitionEpoch := maxBlobsForEpoch > prevEpochMaxBlobs
|
||||
|
||||
var totalBlobs int
|
||||
var maxBlobsInBlock int
|
||||
var blockCount int
|
||||
|
||||
for _, ctr := range blks.BlockContainers {
|
||||
blk, err := blocks.BeaconBlockContainerToSignedBeaconBlock(ctr)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to convert block container to signed beacon block")
|
||||
}
|
||||
|
||||
if blk == nil || blk.IsNil() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip blocks before Deneb (shouldn't happen if we're checking post-Fulu epochs)
|
||||
if blk.Version() < version.Deneb {
|
||||
continue
|
||||
}
|
||||
|
||||
slot := blk.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(slot)
|
||||
|
||||
commitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blob kzg commitments")
|
||||
}
|
||||
|
||||
blobCount := len(commitments)
|
||||
|
||||
// Verify we don't exceed the limit
|
||||
if blobCount > maxBlobsForEpoch {
|
||||
return errors.Errorf(
|
||||
"block at slot %d (epoch %d) has %d blobs, exceeding max allowed %d for this epoch",
|
||||
slot, blockEpoch, blobCount, maxBlobsForEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
totalBlobs += blobCount
|
||||
blockCount++
|
||||
if blobCount > maxBlobsInBlock {
|
||||
maxBlobsInBlock = blobCount
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate utilization
|
||||
if blockCount == 0 {
|
||||
return errors.Errorf("no blocks found in epoch %d", epochToCheck)
|
||||
}
|
||||
|
||||
utilizationPercent := (maxBlobsInBlock * 100) / maxBlobsForEpoch
|
||||
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"epoch": epochToCheck,
|
||||
"maxBlobsForEpoch": maxBlobsForEpoch,
|
||||
"prevEpochMaxBlobs": prevEpochMaxBlobs,
|
||||
"maxBlobsInBlock": maxBlobsInBlock,
|
||||
"totalBlobs": totalBlobs,
|
||||
"blockCount": blockCount,
|
||||
"utilizationPercent": utilizationPercent,
|
||||
"isBPOTransitionEpoch": isBPOTransitionEpoch,
|
||||
}).Info("Blob utilization stats for epoch")
|
||||
|
||||
// For BPO transition epochs, verify that BPO actually activated by checking
|
||||
// that at least one block exceeded the previous epoch's limit
|
||||
if isBPOTransitionEpoch {
|
||||
if maxBlobsInBlock <= prevEpochMaxBlobs {
|
||||
return errors.Errorf(
|
||||
"BPO transition at epoch %d: limit increased from %d to %d, but no block exceeded the old limit. "+
|
||||
"Max blobs in any block was %d. This indicates BPO may not be working correctly or "+
|
||||
"the transaction generator is not producing enough blobs to test the increased limit.",
|
||||
epochToCheck, prevEpochMaxBlobs, maxBlobsForEpoch, maxBlobsInBlock,
|
||||
)
|
||||
}
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"epoch": epochToCheck,
|
||||
"prevLimit": prevEpochMaxBlobs,
|
||||
"newLimit": maxBlobsForEpoch,
|
||||
"maxBlobsInBlock": maxBlobsInBlock,
|
||||
}).Info("BPO activation verified: blocks are using capacity beyond previous limit")
|
||||
}
|
||||
|
||||
// For all BPO epochs (where limit > Electra's 9), verify we're utilizing the capacity
|
||||
electraMax := cfg.DeprecatedMaxBlobsPerBlockElectra
|
||||
if maxBlobsForEpoch > electraMax {
|
||||
if utilizationPercent < minBlobUtilizationPercent {
|
||||
return errors.Errorf(
|
||||
"BPO epoch %d has max blobs %d but only achieved %d%% utilization (max blobs in any block: %d). "+
|
||||
"Expected at least %d%% utilization to verify BPO is working. "+
|
||||
"This may indicate the transaction generator is not producing enough blobs.",
|
||||
epochToCheck, maxBlobsForEpoch, utilizationPercent, maxBlobsInBlock, minBlobUtilizationPercent,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
// - Participation at epoch 2
|
||||
// - Finalization at epoch 3
|
||||
// - Fulu fork transition at epoch 2
|
||||
// - BPO 1 at epoch 3 (15 blobs)
|
||||
// - BPO 2 at epoch 4 (21 blobs)
|
||||
// - Exit proposed at epoch 4
|
||||
// - Exit confirmed at epoch 5
|
||||
// - Withdrawal submitted at epoch 5
|
||||
@@ -23,6 +25,14 @@ func TestEndToEnd_MinimalConfig(t *testing.T) {
|
||||
cfg = types.InitForkCfg(version.Electra, version.Fulu, cfg)
|
||||
// Set Fulu fork at epoch 2 for a quick fork transition test
|
||||
cfg.FuluForkEpoch = 2
|
||||
// Update BlobSchedule to use the new FuluForkEpoch for BPO testing
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: cfg.DenebForkEpoch, MaxBlobsPerBlock: uint64(cfg.DeprecatedMaxBlobsPerBlock)},
|
||||
{Epoch: cfg.ElectraForkEpoch, MaxBlobsPerBlock: uint64(cfg.DeprecatedMaxBlobsPerBlockElectra)},
|
||||
// BPO (Blob Parameter Optimization) schedule for Fulu
|
||||
{Epoch: cfg.FuluForkEpoch + 1, MaxBlobsPerBlock: 15},
|
||||
{Epoch: cfg.FuluForkEpoch + 2, MaxBlobsPerBlock: 21},
|
||||
}
|
||||
cfg.InitializeForkSchedule()
|
||||
|
||||
r := e2eMinimal(t, cfg,
|
||||
|
||||
Reference in New Issue
Block a user