Compare commits

..

1 Commits

Author SHA1 Message Date
terence tsao
503c1ca465 Dont gen attestation pre state 2025-12-03 20:15:35 -08:00
22 changed files with 47 additions and 224 deletions

View File

@@ -10,6 +10,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
@@ -19,10 +20,15 @@ import (
"github.com/sirupsen/logrus"
)
var ErrStopAttestationStateGen = errors.New("stopped attestation state generation")
// The caller of this function must have a lock on forkchoice.
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch < headEpoch || c.Epoch == 0 {
if c.Epoch < headEpoch {
return nil
}
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return nil
}
// Only use head state if the head state is compatible with the target checkpoint.
@@ -30,11 +36,11 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
if err != nil {
return nil
}
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch)
if err != nil {
return nil
}
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch)
if err != nil {
return nil
}
@@ -50,17 +56,13 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
}
return st
}
// At this point we can only have c.Epoch > headEpoch.
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return nil
}
// Advance the head state to the start of the target epoch.
// Otherwise we need to advance the head state to the start of the target epoch.
// This point can only be reached if c.Root == headRoot and c.Epoch > headEpoch.
slot, err := slots.EpochStart(c.Epoch)
if err != nil {
return nil
}
// Try if we have already set the checkpoint cache. This will be tried again if we fail here but the check is cheap anyway.
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
lock := async.NewMultilock(string(c.Root) + epochKey)
lock.Lock()
@@ -136,6 +138,10 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
return nil, errors.Wrap(ErrNotCheckpoint, fmt.Sprintf("epoch %d root %#x", c.Epoch, c.Root))
}
if features.Get().DisableAttestationStateGen {
return nil, ErrStopAttestationStateGen
}
// Fallback to state regeneration.
log.WithFields(logrus.Fields{"epoch": c.Epoch, "root": fmt.Sprintf("%#x", c.Root)}).Debug("Regenerating attestation pre-state")
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))

View File

@@ -181,123 +181,6 @@ func TestService_GetRecentPreState(t *testing.T) {
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
}
func TestService_GetRecentPreState_Epoch_0(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
// Create a fork 31 <-- 32 <--- 64
// \---------33
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'T'},
state: s,
slot: 64,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
// Create a fork 30 <-- 31 <-- 32 <--- 64
// \---------33
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
st, blk, err := prepareForkchoiceState(ctx, 30, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 31, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'T'},
state: s,
slot: 64,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
func TestService_GetRecentPreState_Different(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetAttPreState_Concurrency(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()

View File

@@ -46,14 +46,6 @@ func (s *Store) UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot pri
return nil
}
// When increasing custody group count, prevent earliestAvailableSlot from going backward.
if storedEarliestAvailableSlot != 0 && earliestAvailableSlot < storedEarliestAvailableSlot {
return errors.Errorf(
"cannot decrease earliest available slot from %d to %d when increasing custody group count from %d to %d",
storedEarliestAvailableSlot, earliestAvailableSlot, storedGroupCount, custodyGroupCount,
)
}
storedGroupCount, storedEarliestAvailableSlot = custodyGroupCount, earliestAvailableSlot
// Store the earliest available slot.

View File

@@ -134,31 +134,6 @@ func TestUpdateCustodyInfo(t *testing.T) {
require.Equal(t, initialSlot, storedSlot)
require.Equal(t, initialCount, storedCount)
})
t.Run("prevent earliest available slot from going backward when increasing group count", func(t *testing.T) {
const (
initialSlot = primitives.Slot(200)
initialCount = uint64(5)
newSlot = primitives.Slot(100) // Lower than initial - should be rejected
newCount = uint64(10) // Higher than initial - would trigger update
)
db := setupDB(t)
// Initialize custody info with slot 200 and group count 5
_, _, err := db.UpdateCustodyInfo(ctx, initialSlot, initialCount)
require.NoError(t, err)
// Try to increase group count (5 -> 10) but with a lower earliest slot (200 -> 100)
// This should fail because we can't move earliest available slot backward when increasing custody
_, _, err = db.UpdateCustodyInfo(ctx, newSlot, newCount)
require.ErrorContains(t, "cannot decrease earliest available slot", err)
// Verify the database wasn't updated
storedSlot, storedCount := getCustodyInfoFromDB(t, db)
require.Equal(t, initialSlot, storedSlot)
require.Equal(t, initialCount, storedCount)
})
}
func TestUpdateEarliestAvailableSlot(t *testing.T) {

View File

@@ -240,7 +240,7 @@ func (f *ForkChoice) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool
if node.slot == epochStart {
return true, nil
}
if !features.Get().IgnoreUnviableAttestations {
if !features.Get().DisableLastEpochTargets {
// Allow any node from the checkpoint epoch - 1 to be viable.
nodeEpoch := slots.ToEpoch(node.slot)
if nodeEpoch+1 == cp.Epoch {
@@ -642,12 +642,8 @@ func (f *ForkChoice) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch
if !ok || node == nil {
return [32]byte{}, ErrNilNode
}
if slots.ToEpoch(node.slot) >= epoch {
if node.parent != nil {
node = node.parent
} else {
return f.store.finalizedDependentRoot, nil
}
if slots.ToEpoch(node.slot) >= epoch && node.parent != nil {
node = node.parent
}
return node.root, nil
}

View File

@@ -212,9 +212,6 @@ func (s *Store) prune(ctx context.Context) error {
return nil
}
// Save the new finalized dependent root because it will be pruned
s.finalizedDependentRoot = finalizedNode.parent.root
// Prune nodeByRoot starting from root
if err := s.pruneFinalizedNodeByRootMap(ctx, s.treeRootNode, finalizedNode); err != nil {
return err

View File

@@ -465,7 +465,6 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
ctx := t.Context()
f := setup(1, 1)
// Insert a block in slot 32
state, blk, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk))
@@ -476,7 +475,6 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, dependent, [32]byte{})
// Insert a block in slot 33
state, blk1, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'b'}, blk.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk1))
@@ -490,7 +488,7 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, dependent, [32]byte{})
// Insert a block for the next epoch (missed slot 0), slot 65
// Insert a block for the next epoch (missed slot 0)
state, blk2, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'c'}, blk1.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
@@ -511,7 +509,6 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, dependent, blk1.Root())
// Insert a block at slot 66
state, blk3, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+2, [32]byte{'d'}, blk2.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk3))
@@ -536,11 +533,8 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
dependent, err = f.DependentRoot(1)
require.NoError(t, err)
require.Equal(t, [32]byte{}, dependent)
dependent, err = f.DependentRoot(2)
require.NoError(t, err)
require.Equal(t, blk1.Root(), dependent)
// Insert a block for the next epoch, slot 96 (descends from finalized at slot 33)
// Insert a block for next epoch (slot 0 present)
state, blk4, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch, [32]byte{'e'}, blk1.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk4))
@@ -557,7 +551,6 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, dependent, blk1.Root())
// Insert a block at slot 97
state, blk5, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'f'}, blk4.Root(), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blk5))
@@ -607,16 +600,12 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
require.NoError(t, err)
require.Equal(t, target, blk1.Root())
// Prune finalization, finalize the block at slot 96
// Prune finalization
s.finalizedCheckpoint.Root = blk4.Root()
require.NoError(t, s.prune(ctx))
target, err = f.TargetRootForEpoch(blk4.Root(), 3)
require.NoError(t, err)
require.Equal(t, blk4.Root(), target)
// Dependent root for the finalized block should be the root of the pruned block at slot 33
dependent, err = f.DependentRootForEpoch(blk4.Root(), 3)
require.NoError(t, err)
require.Equal(t, blk1.Root(), dependent)
}
func TestStore_DependentRootForEpoch(t *testing.T) {

View File

@@ -31,7 +31,6 @@ type Store struct {
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.
finalizedDependentRoot [fieldparams.RootLength]byte // dependent root at finalized checkpoint.
committeeWeight uint64 // tracks the total active validator balance divided by the number of slots per Epoch.
treeRootNode *Node // the root node of the store tree.
headNode *Node // last head Node

View File

@@ -450,7 +450,7 @@ func (p *BeaconDbBlocker) blobsDataFromStoredDataColumns(root [fieldparams.RootL
if count < peerdas.MinimumColumnCountToReconstruct() {
// There is no way to reconstruct the data columns.
return nil, &core.RpcError{
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed", flags.SemiSupernode.Name),
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed, or retry later if it is already the case", flags.Supernode.Name),
Reason: core.NotFound,
}
}

View File

@@ -157,12 +157,6 @@ var (
Help: "Time for gossiped blob sidecars to arrive",
},
)
dataColumnSidecarArrivalGossipSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "gossip_data_column_sidecar_arrival_milliseconds",
Help: "Time for gossiped data column sidecars to arrive",
},
)
blobSidecarVerificationGossipSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "gossip_blob_sidecar_verification_milliseconds",

View File

@@ -173,6 +173,9 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed ethpb.Signed
bs, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
if err != nil {
if errors.Is(err, blockchain.ErrStopAttestationStateGen) {
return pubsub.ValidationIgnore, errors.New("ignored attestation, state generation is disabled")
}
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}

View File

@@ -138,6 +138,9 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(
preState, err := s.cfg.chain.AttestationTargetState(ctx, data.Target)
if err != nil {
if errors.Is(err, blockchain.ErrStopAttestationStateGen) {
return pubsub.ValidationIgnore, errors.New("ignored attestation, state generation is disabled")
}
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}

View File

@@ -191,7 +191,6 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
sinceSlotStartTime := receivedTime.Sub(startTime)
validationTime := s.cfg.clock.Now().Sub(receivedTime)
dataColumnSidecarArrivalGossipSummary.Observe(float64(sinceSlotStartTime.Milliseconds()))
dataColumnSidecarVerificationGossipHistogram.Observe(float64(validationTime.Milliseconds()))
peerGossipScore := s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid)

View File

@@ -1,3 +0,0 @@
### Changed
- `blobsDataFromStoredDataColumns`: Ask the use to use the `--supernode` flag and shorten the error mesage.

View File

@@ -1,3 +0,0 @@
### Added
- Track the dependent root of the latest finalized checkpoint in forkchoice.

View File

@@ -1,3 +0,0 @@
### Fixed
- Use head state to validate attestations for old blocks if they are compatible.

View File

@@ -1,2 +0,0 @@
### Fixed
- prevent eas from going backward when increasing cgc

View File

@@ -1,2 +0,0 @@
### Added
- prometheus summary `gossip_data_column_sidecar_arrival_milliseconds` to track data column sidecar arrival latency since slot start.

View File

@@ -1,2 +0,0 @@
### Changed
- Introduced flag `--ignore-unviable-attestations` (replaces and deprecates `--disable-last-epoch-targets`) to drop attestations whose target state is not viable; default remains to process them unless explicitly enabled.

View File

@@ -71,7 +71,8 @@ type Flags struct {
DisableResourceManager bool // Disables running the node with libp2p's resource manager.
DisableStakinContractCheck bool // Disables check for deposit contract when proposing blocks
IgnoreUnviableAttestations bool // Ignore attestations whose target state is not viable (avoids lagging-node DoS).
DisableLastEpochTargets bool // Disables processing of states for attestations to old blocks.
DisableAttestationStateGen bool // Disables generating attestation pre-states from the head state path.
EnableVerboseSigVerification bool // EnableVerboseSigVerification specifies whether to verify individual signature if batch verification fails
@@ -281,11 +282,13 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
logEnabled(blacklistRoots)
cfg.BlacklistedRoots = parseBlacklistedRoots(ctx.StringSlice(blacklistRoots.Name))
}
cfg.IgnoreUnviableAttestations = false
if ctx.IsSet(ignoreUnviableAttestations.Name) && ctx.Bool(ignoreUnviableAttestations.Name) {
logEnabled(ignoreUnviableAttestations)
cfg.IgnoreUnviableAttestations = true
if ctx.IsSet(disableLastEpochTargets.Name) {
logEnabled(disableLastEpochTargets)
cfg.DisableLastEpochTargets = true
}
if ctx.IsSet(disableAttestationStateGen.Name) {
logEnabled(disableAttestationStateGen)
cfg.DisableAttestationStateGen = true
}
if ctx.IsSet(EnableStateDiff.Name) {

View File

@@ -25,6 +25,4 @@ var upcomingDeprecation = []cli.Flag{
// deprecatedBeaconFlags contains flags that are still used by other components
// and therefore cannot be added to deprecatedFlags
var deprecatedBeaconFlags = []cli.Flag{
deprecatedDisableLastEpochTargets,
}
var deprecatedBeaconFlags = []cli.Flag{}

View File

@@ -201,15 +201,15 @@ var (
Usage: "(Work in progress): Enables the web portal for the validator client.",
Value: false,
}
// deprecatedDisableLastEpochTargets is a flag to disable processing of attestations for old blocks.
deprecatedDisableLastEpochTargets = &cli.BoolFlag{
// disableLastEpochTargets is a flag to disable processing of attestations for old blocks.
disableLastEpochTargets = &cli.BoolFlag{
Name: "disable-last-epoch-targets",
Usage: "Deprecated: disables processing of last epoch targets.",
Usage: "Disables processing of last epoch targets.",
}
// ignoreUnviableAttestations flag to skip attestations whose target state is not viable with respect to head (from lagging nodes).
ignoreUnviableAttestations = &cli.BoolFlag{
Name: "ignore-unviable-attestations",
Usage: "Ignores attestations whose target state is not viable with respect to the current head (avoid expensive state replay from lagging attesters).",
// disableAttestationStateGen is a flag to skip generating attestation states from the head state path.
disableAttestationStateGen = &cli.BoolFlag{
Name: "disable-attestation-state-gen",
Usage: "Skips generating attestation pre-states from the head state when handling attestations.",
}
)
@@ -256,7 +256,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
disableStakinContractCheck,
SaveFullExecutionPayloads,
enableStartupOptimistic,
ignoreUnviableAttestations,
enableFullSSZDataLogging,
disableVerboseSigVerification,
prepareAllPayloads,
@@ -272,6 +271,8 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
enableExperimentalAttestationPool,
forceHeadFlag,
blacklistRoots,
disableLastEpochTargets,
disableAttestationStateGen,
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {