mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
117 Commits
remove_pro
...
fulu-e2e
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f2392371e4 | ||
|
|
a77a41d31e | ||
|
|
19807c0d12 | ||
|
|
ae6a8e001f | ||
|
|
3df4a868cf | ||
|
|
a5f0c6ea06 | ||
|
|
acbd52f3fa | ||
|
|
4bd24e1970 | ||
|
|
e97fce7055 | ||
|
|
533ec96207 | ||
|
|
0db74365e0 | ||
|
|
6f90101364 | ||
|
|
49e1763ec2 | ||
|
|
c2527c82cd | ||
|
|
d4ea8fafd6 | ||
|
|
07d1d6bdf9 | ||
|
|
f938da99d9 | ||
|
|
9deec69cc7 | ||
|
|
2767f08f4d | ||
|
|
d46c620783 | ||
|
|
dd05e44ef3 | ||
|
|
9da36a5de6 | ||
|
|
7950a24926 | ||
|
|
ea51253be9 | ||
|
|
2ac30f5ce6 | ||
|
|
7418c00ad6 | ||
|
|
66342655fd | ||
|
|
18eca953c1 | ||
|
|
9f2f70a231 | ||
|
|
9706eadea9 | ||
|
|
8191bb5711 | ||
|
|
d4613aee0c | ||
|
|
8a37575407 | ||
|
|
0b1d4c485d | ||
|
|
0d04013443 | ||
|
|
2fbcbea550 | ||
|
|
9fcc1a7a77 | ||
|
|
5a49dcabb6 | ||
|
|
d0580debf6 | ||
|
|
869a6586ff | ||
|
|
0b47ac51f7 | ||
|
|
75916243f2 | ||
|
|
b499186483 | ||
|
|
cb28503b5f | ||
|
|
48fd9509ef | ||
|
|
316f0932ff | ||
|
|
7f321a5835 | ||
|
|
1b302219a8 | ||
|
|
e36c7ec26e | ||
|
|
8df746cead | ||
|
|
4294025eed | ||
|
|
41e336d373 | ||
|
|
86cb003b8e | ||
|
|
853d64f8f9 | ||
|
|
6dd11aaa2d | ||
|
|
f1d4f6d136 | ||
|
|
c5690a7eb6 | ||
|
|
edf60e6e33 | ||
|
|
ad022eead9 | ||
|
|
409b48940d | ||
|
|
64324211b8 | ||
|
|
5531014a75 | ||
|
|
80dc3d953d | ||
|
|
d175bd93ba | ||
|
|
4419a36cc2 | ||
|
|
0176706fad | ||
|
|
df307544c9 | ||
|
|
10821dc451 | ||
|
|
4706b758dd | ||
|
|
9f59b78cec | ||
|
|
75d8a2bdb6 | ||
|
|
ca4ae35f6e | ||
|
|
3245b884cf | ||
|
|
00e66ccab6 | ||
|
|
ed1885be8a | ||
|
|
fad101c4a0 | ||
|
|
1c0107812b | ||
|
|
fd710886c8 | ||
|
|
20d6bacfe9 | ||
|
|
1140141aaa | ||
|
|
7402efad2d | ||
|
|
f6cd1d9f7f | ||
|
|
a4751b057d | ||
|
|
eb5e2a5094 | ||
|
|
626830ff9c | ||
|
|
6f20ac57d4 | ||
|
|
cf4095cf3b | ||
|
|
39b3dba946 | ||
|
|
fa7e0b6e50 | ||
|
|
4aed113dea | ||
|
|
20ffd4c523 | ||
|
|
1cd3c3cc2d | ||
|
|
15e2e6b85e | ||
|
|
203a098076 | ||
|
|
9e5b7b00f3 | ||
|
|
1fcf3c7f30 | ||
|
|
ccf81ed33c | ||
|
|
382934fd30 | ||
|
|
c31f0435c6 | ||
|
|
59dc0263f2 | ||
|
|
de7ea1f72c | ||
|
|
2fc7a5af82 | ||
|
|
76d137198b | ||
|
|
05d8ce15af | ||
|
|
2715cd3fc7 | ||
|
|
7aa79a420c | ||
|
|
b3dc7e4afb | ||
|
|
db794db4ee | ||
|
|
f9bc42eed4 | ||
|
|
f97082df32 | ||
|
|
0d55b61b3d | ||
|
|
d487e5c109 | ||
|
|
7ffbf77b87 | ||
|
|
f103267f10 | ||
|
|
6f0ffa2a20 | ||
|
|
bcb5add346 | ||
|
|
a43fc50015 |
@@ -193,7 +193,7 @@ nogo(
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/httperror:go_default_library",
|
||||
"//tools/analyzers/httpwriter:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
|
||||
@@ -323,14 +323,17 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
var ok bool
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(st.Slot())
|
||||
if e == stateEpoch {
|
||||
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
|
||||
if e == stateEpoch || fuluAndNextEpoch {
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
st = st.Copy()
|
||||
if slot > st.Slot() {
|
||||
// At this point either we know we are proposing on a future slot or we need to still compute the
|
||||
// right proposer index pre-Fulu, either way we need to copy the state to process it.
|
||||
st = st.Copy()
|
||||
var err error
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||
if err != nil {
|
||||
@@ -338,7 +341,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
if e > stateEpoch {
|
||||
if e > stateEpoch && !fuluAndNextEpoch {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
|
||||
@@ -1053,40 +1053,3 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
|
||||
require.Equal(t, vhs[0].String(), vh0)
|
||||
require.Equal(t, vhs[1].String(), vh1)
|
||||
}
|
||||
|
||||
func TestComputePayloadAttribute(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
proposingSlot: slot,
|
||||
headRoot: [32]byte{},
|
||||
}
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -53,58 +54,53 @@ type fcuConfig struct {
|
||||
}
|
||||
|
||||
// sendFCU handles the logic to notify the engine of a forckhoice update
|
||||
// for the first time when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions when the
|
||||
// incoming block is late, preparing payload attributes in this case while it
|
||||
// only sends a message with empty attributes for early blocks.
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if !s.isNewHead(cfg.headRoot) {
|
||||
return nil
|
||||
// when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions .
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if cfg.postState.Version() < version.Fulu {
|
||||
// update the caches to compute the right proposer index
|
||||
// this function is called under a forkchoice lock which we need to release.
|
||||
s.ForkChoicer().Unlock()
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
s.ForkChoicer().Lock()
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return
|
||||
}
|
||||
// If head has not been updated and attributes are nil, we can skip the FCU.
|
||||
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
|
||||
return
|
||||
}
|
||||
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
|
||||
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
|
||||
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
|
||||
// to the engine if needed
|
||||
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = slotCtx
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
if s.isNewHead(fcuArgs.headRoot) {
|
||||
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
}
|
||||
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine.
|
||||
// The caller of this function should NOT have a lock in forkchoice store.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) {
|
||||
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
|
||||
defer span.End()
|
||||
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
|
||||
ctx = trace.NewContext(s.ctx, span)
|
||||
s.ForkChoicer().Lock()
|
||||
defer s.ForkChoicer().Unlock()
|
||||
_, err := s.notifyForkchoiceUpdate(ctx, args)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify forkchoice update")
|
||||
log.WithError(err).Error("Could not notify forkchoice update")
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
headBlock: wsb,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
|
||||
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
|
||||
require.Equal(t, true, has)
|
||||
@@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
headRoot: r,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
}
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch < headEpoch || c.Epoch == 0 {
|
||||
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
|
||||
return nil
|
||||
}
|
||||
// Only use head state if the head state is compatible with the target checkpoint.
|
||||
@@ -30,11 +30,13 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch-1)
|
||||
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
|
||||
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch-1)
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -43,7 +45,7 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
}
|
||||
|
||||
// If the head state alone is enough, we can return it directly read only.
|
||||
if c.Epoch == headEpoch {
|
||||
if c.Epoch <= headEpoch {
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
||||
@@ -170,12 +170,13 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 31,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 1, Root: ckRoot}))
|
||||
@@ -197,12 +198,13 @@ func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
@@ -227,6 +229,7 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -235,8 +238,9 @@ func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
state: s,
|
||||
block: headBlock,
|
||||
slot: 64,
|
||||
state: s,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
@@ -263,6 +267,7 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
@@ -270,7 +275,8 @@ func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
root: [32]byte{'U'},
|
||||
block: headBlock,
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
@@ -287,12 +293,13 @@ func TestService_GetRecentPreState_Different(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
|
||||
@@ -66,9 +66,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
startTime := time.Now()
|
||||
fcuArgs := &fcuConfig{}
|
||||
|
||||
if s.inRegularSync() {
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
}
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
}
|
||||
@@ -105,14 +102,17 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return nil
|
||||
}
|
||||
if err := s.sendFCU(cfg, fcuArgs); err != nil {
|
||||
return errors.Wrap(err, "could not send FCU to engine")
|
||||
}
|
||||
s.sendFCU(cfg, fcuArgs)
|
||||
|
||||
// Pre-Fulu the caches are updated when computing the payload attributes
|
||||
if cfg.postState.Version() >= version.Fulu {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = ctx
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -295,14 +295,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
arg := &fcuConfig{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB,
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
@@ -330,6 +322,7 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
|
||||
return nil
|
||||
}
|
||||
|
||||
// the caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -359,7 +352,9 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
s.ForkChoicer().RLock()
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
s.ForkChoicer().RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
@@ -372,7 +367,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
|
||||
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
|
||||
// caches.
|
||||
// caches. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
defer span.End()
|
||||
@@ -912,8 +907,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if currentSlot == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
// return early if we are in init sync
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
@@ -926,14 +919,32 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
// Before Fulu we need to process the next slot to find out if we are proposing.
|
||||
if lastState.Version() < version.Fulu {
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
} else {
|
||||
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
|
||||
defer func() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -963,6 +974,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
|
||||
@@ -42,14 +42,8 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(s.genesisTime, slot) {
|
||||
return nil
|
||||
}
|
||||
return s.computePayloadAttributes(cfg, fcuArgs)
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
@@ -173,26 +167,19 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
// boundary in order to compute the right proposer indices after processing
|
||||
// state transition. This function is called on late blocks while still locked,
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
// state transition. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) {
|
||||
slot := cfg.postState.Slot()
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
log.WithError(err).Error("Could not update next slot state cache")
|
||||
return
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil {
|
||||
log.WithError(err).Error("Could not handle epoch boundary")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,20 +189,6 @@ func reportProcessingTime(startTime time.Time) {
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}
|
||||
|
||||
// computePayloadAttributes modifies the passed FCU arguments to
|
||||
// contain the right payload attributes with the tracked proposer. It gets
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
|
||||
@@ -738,7 +738,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -788,7 +790,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -816,25 +820,9 @@ func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = []byte{'a'} // Mutate the signature.
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -866,7 +854,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1339,7 +1329,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1351,7 +1343,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1363,7 +1357,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1375,7 +1371,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1400,197 +1398,6 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
// INVALID from FCU, with LVH block 17. No head is viable. We check
|
||||
// that the node is optimistic and that we can actually import a block on top of
|
||||
// 17 and recover.
|
||||
func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SlotsPerEpoch = 6
|
||||
config.AltairForkEpoch = 1
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
driftGenesisTime(service, 18, 0)
|
||||
validHeadState, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
firstInvalidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 19, 0)
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
// not finish importing) one and that the node is optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// import another block based on the last valid head state
|
||||
mockEngine = &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 20, 0)
|
||||
b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
@@ -1642,7 +1449,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1662,8 +1471,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
@@ -1684,8 +1494,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1708,7 +1519,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1718,6 +1531,10 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isBlock18OptimisticAfterImport)
|
||||
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
|
||||
@@ -1768,7 +1585,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1835,7 +1654,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1856,8 +1677,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -1877,7 +1699,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1906,8 +1730,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1975,7 +1800,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -2000,7 +1827,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -2028,7 +1857,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -2072,7 +1903,6 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
t.Log(i)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2089,7 +1919,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -2109,8 +1941,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -2130,7 +1963,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -2161,7 +1996,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -2282,7 +2119,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2348,7 +2187,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2631,7 +2472,10 @@ func TestRollbackBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -2732,7 +2576,9 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
@@ -2766,7 +2612,10 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -3262,7 +3111,9 @@ func Test_postBlockProcess_EventSending(t *testing.T) {
|
||||
}
|
||||
|
||||
// Execute postBlockProcess
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(cfg)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Check error expectation
|
||||
if tt.expectError {
|
||||
|
||||
@@ -156,13 +156,15 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
|
||||
}
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -117,7 +117,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -177,7 +179,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -290,52 +290,3 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
|
||||
}
|
||||
|
||||
func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Slashed: true,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ProposerIndex: 0,
|
||||
})))
|
||||
|
||||
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 10
|
||||
block.Block.ProposerIndex = pID
|
||||
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
|
||||
block.Block.ParentRoot = latestBlockSignedRoot[:]
|
||||
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
validators[proposerIdx].Slashed = false
|
||||
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
|
||||
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
|
||||
}
|
||||
|
||||
@@ -122,24 +122,6 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
|
||||
}
|
||||
|
||||
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
|
||||
// from a block and its corresponding state.
|
||||
func RandaoSignatureBatch(
|
||||
|
||||
@@ -278,12 +278,12 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
bal, err := st.PendingBalanceToWithdraw(srcIdx)
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if bal > 0 {
|
||||
if hasBal {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -5,10 +5,20 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
var (
|
||||
dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
)
|
||||
|
||||
cellsAndProofsFromStructuredComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cells_and_proofs_from_structured_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute cells and proofs from structured computation.",
|
||||
Buckets: []float64{10, 20, 30, 40, 50, 100, 200},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@ package peerdas
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -296,32 +297,42 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
return nil, nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, blobCount)
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, blobCount)
|
||||
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, proofs)
|
||||
proofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = proofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
@@ -329,40 +340,55 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
|
||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
|
||||
|
||||
for i, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, nil, ErrNilBlobAndProof
|
||||
}
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong copied KZG proof size - should never happen")
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, kzgProofs)
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = kzgProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
|
||||
@@ -182,12 +182,6 @@ func ProcessBlockNoVerifyAnySig(
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sig := signed.Signature()
|
||||
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), sig[:], blk.HashTreeRoot)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
|
||||
}
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
|
||||
if err != nil {
|
||||
@@ -201,7 +195,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
|
||||
// Merge beacon block, randao and attestations signatures into a set.
|
||||
set := bls.NewSet()
|
||||
set.Join(bSet).Join(rSet).Join(aSet)
|
||||
set.Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
|
||||
@@ -157,9 +157,8 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "block signature", set.Descriptions[0])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[1])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[2])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[0])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[1])
|
||||
}
|
||||
|
||||
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
|
||||
|
||||
@@ -67,9 +67,9 @@ func NewSyncNeeds(current CurrentSlotter, oldestSlotFlagPtr *primitives.Slot, bl
|
||||
|
||||
// Override spec minimum block retention with user-provided flag only if it is lower than the spec minimum.
|
||||
sn.blockRetention = primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests)
|
||||
|
||||
if oldestSlotFlagPtr != nil {
|
||||
oldestEpoch := slots.ToEpoch(*oldestSlotFlagPtr)
|
||||
if oldestEpoch < sn.blockRetention {
|
||||
if *oldestSlotFlagPtr <= syncEpochOffset(current(), sn.blockRetention) {
|
||||
sn.validOldestSlotPtr = oldestSlotFlagPtr
|
||||
} else {
|
||||
log.WithField("backfill-oldest-slot", *oldestSlotFlagPtr).
|
||||
|
||||
@@ -128,6 +128,9 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
minBlobEpochs := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
minColEpochs := params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest
|
||||
denebSlot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
fuluSlot := slots.UnsafeEpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
minSlots := slots.UnsafeEpochStart(primitives.Epoch(params.BeaconConfig().MinEpochsForBlockRequests))
|
||||
|
||||
currentSlot := primitives.Slot(10000)
|
||||
currentFunc := func() primitives.Slot { return currentSlot }
|
||||
@@ -141,6 +144,7 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedCol primitives.Epoch
|
||||
name string
|
||||
input SyncNeeds
|
||||
current func() primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "basic initialization with no flags",
|
||||
@@ -174,13 +178,13 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "valid oldestSlotFlagPtr (earlier than spec minimum)",
|
||||
blobRetentionFlag: 0,
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(10)
|
||||
return &slot
|
||||
}(),
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
expectValidOldest: true,
|
||||
expectedBlob: minBlobEpochs,
|
||||
expectedCol: minColEpochs,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid oldestSlotFlagPtr (later than spec minimum)",
|
||||
@@ -210,6 +214,9 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
{
|
||||
name: "both blob retention flag and oldest slot set",
|
||||
blobRetentionFlag: minBlobEpochs + 5,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
oldestSlotFlagPtr: func() *primitives.Slot {
|
||||
slot := primitives.Slot(100)
|
||||
return &slot
|
||||
@@ -232,16 +239,27 @@ func TestSyncNeedsInitialize(t *testing.T) {
|
||||
expectedBlob: 5000,
|
||||
expectedCol: 5000,
|
||||
},
|
||||
{
|
||||
name: "regression for deneb start",
|
||||
blobRetentionFlag: 8212500,
|
||||
expectValidOldest: true,
|
||||
oldestSlotFlagPtr: &denebSlot,
|
||||
current: func() primitives.Slot {
|
||||
return fuluSlot + minSlots
|
||||
},
|
||||
expectedBlob: 8212500,
|
||||
expectedCol: 8212500,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := NewSyncNeeds(currentFunc, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
if tc.current == nil {
|
||||
tc.current = currentFunc
|
||||
}
|
||||
result, err := NewSyncNeeds(tc.current, tc.oldestSlotFlagPtr, tc.blobRetentionFlag)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that current, deneb, fulu are set correctly
|
||||
require.Equal(t, currentSlot, result.current())
|
||||
|
||||
// Check retention calculations
|
||||
require.Equal(t, tc.expectedBlob, result.blobRetention)
|
||||
require.Equal(t, tc.expectedCol, result.colRetention)
|
||||
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -185,73 +186,162 @@ func (dcs *DataColumnStorage) WarmCache() {
|
||||
|
||||
highestStoredEpoch := primitives.Epoch(0)
|
||||
|
||||
// Walk the data column filesystem to warm up the cache.
|
||||
if err := afero.Walk(dcs.fs, ".", func(path string, info os.FileInfo, fileErr error) (err error) {
|
||||
if fileErr != nil {
|
||||
return fileErr
|
||||
}
|
||||
|
||||
// If not a leaf, skip.
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract metadata from the file path.
|
||||
fileMetadata, err := extractFileMetadata(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while extracting file metadata")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open the data column filesystem file.
|
||||
f, err := dcs.fs.Open(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while opening data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the file.
|
||||
defer func() {
|
||||
// Overwrite the existing error only if it is nil, since the close error is less important.
|
||||
closeErr := f.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Read the metadata of the file.
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while reading metadata from data column filesystem file")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the indices.
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the ident.
|
||||
dataColumnsIdent := DataColumnsIdent{Root: fileMetadata.blockRoot, Epoch: fileMetadata.epoch, Indices: indices}
|
||||
|
||||
// Update the highest stored epoch.
|
||||
highestStoredEpoch = max(highestStoredEpoch, fileMetadata.epoch)
|
||||
|
||||
// Set the ident in the cache.
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
log.WithError(err).Error("Error encountered while ensuring data column filesystem cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.WithError(err).Error("Error encountered while walking data column filesystem.")
|
||||
// List all period directories
|
||||
periodFileInfos, err := afero.ReadDir(dcs.fs, ".")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error reading top directory during warm cache")
|
||||
return
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem.
|
||||
// Iterate through periods
|
||||
for _, periodFileInfo := range periodFileInfos {
|
||||
if !periodFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
periodPath := periodFileInfo.Name()
|
||||
|
||||
// List all epoch directories in this period
|
||||
epochFileInfos, err := afero.ReadDir(dcs.fs, periodPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate through epochs
|
||||
for _, epochFileInfo := range epochFileInfos {
|
||||
if !epochFileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
epochPath := path.Join(periodPath, epochFileInfo.Name())
|
||||
|
||||
// List all .sszs files in this epoch
|
||||
files, err := listEpochFiles(dcs.fs, epochPath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during warm cache")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process all files in this epoch in parallel
|
||||
epochHighest, err := dcs.processEpochFiles(files)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("epoch", epochPath).Error("Error processing epoch files during warm cache")
|
||||
}
|
||||
|
||||
highestStoredEpoch = max(highestStoredEpoch, epochHighest)
|
||||
}
|
||||
}
|
||||
|
||||
// Prune the cache and the filesystem
|
||||
dcs.prune()
|
||||
|
||||
log.WithField("elapsed", time.Since(start)).Info("Data column filesystem cache warm-up complete")
|
||||
totalElapsed := time.Since(start)
|
||||
|
||||
// Log summary
|
||||
log.WithField("elapsed", totalElapsed).Info("Data column filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// listEpochFiles lists all .sszs files in an epoch directory.
|
||||
func listEpochFiles(fs afero.Fs, epochPath string) ([]string, error) {
|
||||
fileInfos, err := afero.ReadDir(fs, epochPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read epoch directory")
|
||||
}
|
||||
|
||||
files := make([]string, 0, len(fileInfos))
|
||||
for _, fileInfo := range fileInfos {
|
||||
if fileInfo.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := fileInfo.Name()
|
||||
if strings.HasSuffix(fileName, "."+dataColumnsFileExtension) {
|
||||
files = append(files, path.Join(epochPath, fileName))
|
||||
}
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// processEpochFiles processes all .sszs files in an epoch directory in parallel.
|
||||
func (dcs *DataColumnStorage) processEpochFiles(files []string) (primitives.Epoch, error) {
|
||||
var (
|
||||
eg errgroup.Group
|
||||
mu sync.Mutex
|
||||
)
|
||||
|
||||
highestEpoch := primitives.Epoch(0)
|
||||
for _, filePath := range files {
|
||||
eg.Go(func() error {
|
||||
epoch, err := dcs.processFile(filePath)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("file", filePath).Error("Error processing file during warm cache")
|
||||
return nil
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
highestEpoch = max(highestEpoch, epoch)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return highestEpoch, err
|
||||
}
|
||||
|
||||
return highestEpoch, nil
|
||||
}
|
||||
|
||||
// processFile processes a single .sszs file.
|
||||
func (dcs *DataColumnStorage) processFile(filePath string) (primitives.Epoch, error) {
|
||||
// Extract metadata from the file path
|
||||
fileMetadata, err := extractFileMetadata(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "extract file metadata")
|
||||
}
|
||||
|
||||
// Open the file (each goroutine gets its own FD)
|
||||
f, err := dcs.fs.Open(filePath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "open file")
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := f.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during warm cache")
|
||||
}
|
||||
}()
|
||||
|
||||
// Read metadata
|
||||
metadata, err := dcs.metadata(f)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "read metadata")
|
||||
}
|
||||
|
||||
// Extract indices
|
||||
indices := metadata.indices.all()
|
||||
if len(indices) == 0 {
|
||||
return fileMetadata.epoch, nil // No indices, skip
|
||||
}
|
||||
|
||||
// Build ident and set in cache (thread-safe)
|
||||
dataColumnsIdent := DataColumnsIdent{
|
||||
Root: fileMetadata.blockRoot,
|
||||
Epoch: fileMetadata.epoch,
|
||||
Indices: indices,
|
||||
}
|
||||
|
||||
if err := dcs.cache.set(dataColumnsIdent); err != nil {
|
||||
return 0, errors.Wrap(err, "cache set")
|
||||
}
|
||||
|
||||
return fileMetadata.epoch, nil
|
||||
}
|
||||
|
||||
// Summary returns the DataColumnStorageSummary.
|
||||
@@ -515,6 +605,11 @@ func (dcs *DataColumnStorage) Clear() error {
|
||||
|
||||
// prune clean the cache, the filesystem and mutexes.
|
||||
func (dcs *DataColumnStorage) prune() {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
dataColumnPruneLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
|
||||
highestStoredEpoch := dcs.cache.HighestEpoch()
|
||||
|
||||
// Check if we need to prune.
|
||||
@@ -622,6 +717,9 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
// Create the SSZ encoded data column sidecars.
|
||||
var sszEncodedDataColumnSidecars []byte
|
||||
|
||||
// Initialize the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount := uint8(0)
|
||||
|
||||
for {
|
||||
dataColumnSidecars := pullChan(inputDataColumnSidecars)
|
||||
if len(dataColumnSidecars) == 0 {
|
||||
@@ -668,6 +766,9 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
return errors.Wrap(err, "set index")
|
||||
}
|
||||
|
||||
// Increment the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount++
|
||||
|
||||
// Append the SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
@@ -692,9 +793,12 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -808,10 +912,14 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -36,16 +36,15 @@ var (
|
||||
})
|
||||
|
||||
// Data columns
|
||||
dataColumnBuckets = []float64{3, 5, 7, 9, 11, 13}
|
||||
dataColumnSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_save_latency",
|
||||
Help: "Latency of DataColumnSidecar storage save operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
Buckets: []float64{10, 20, 30, 50, 100, 200, 500},
|
||||
})
|
||||
dataColumnFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_get_latency",
|
||||
Help: "Latency of DataColumnSidecar storage get operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
Buckets: []float64{3, 5, 7, 9, 11, 13},
|
||||
})
|
||||
dataColumnPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "data_column_pruned",
|
||||
@@ -59,4 +58,16 @@ var (
|
||||
Name: "data_column_disk_count",
|
||||
Help: "Approximate number of data columns in storage",
|
||||
})
|
||||
dataColumnFileSyncLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_file_sync_latency",
|
||||
Help: "Latency of sync operations when saving data columns in milliseconds",
|
||||
})
|
||||
dataColumnBatchStoreCount = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_batch_store_count",
|
||||
Help: "Number of data columns stored in a batch",
|
||||
})
|
||||
dataColumnPruneLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_prune_latency",
|
||||
Help: "Latency of data column prune operations in milliseconds",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -532,12 +532,19 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV2")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
|
||||
if !s.capabilityCache.has(GetBlobsV2) {
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)
|
||||
|
||||
if len(result) != 0 {
|
||||
getBlobsV2Latency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}
|
||||
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,13 @@ var (
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
getBlobsV2Latency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_blobs_v2_latency_milliseconds",
|
||||
Help: "Captures RPC latency for getBlobsV2 in milliseconds",
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_parse_error_count",
|
||||
Help: "The number of errors that occurred while parsing execution payload",
|
||||
|
||||
@@ -204,6 +204,9 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our light client finality update map.
|
||||
@@ -223,5 +226,8 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
coreblocks "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
corehelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filters"
|
||||
@@ -957,6 +958,13 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
|
||||
}
|
||||
}
|
||||
}
|
||||
blockRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash block")
|
||||
}
|
||||
if err := coreblocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not verify block signature")
|
||||
}
|
||||
_, err = transition.ExecuteStateTransition(ctx, parentState, blk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
|
||||
@@ -130,6 +130,10 @@ func (s *Server) SubmitAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttestationsV2")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
@@ -238,22 +242,14 @@ func (s *Server) handleAttestationsElectra(
|
||||
},
|
||||
})
|
||||
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(ctx, singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get target state for attestation")
|
||||
}
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(ctx, targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get committee for attestation")
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
wantedEpoch := slots.ToEpoch(att.Data.Slot)
|
||||
// Broadcast first using CommitteeId directly (fast path)
|
||||
// This matches gRPC behavior and avoids blocking on state fetching
|
||||
wantedEpoch := slots.ToEpoch(singleAtt.Data.Slot)
|
||||
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get head validator indices")
|
||||
}
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.GetCommitteeIndex(), att.Data.Slot)
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), singleAtt.CommitteeId, singleAtt.Data.Slot)
|
||||
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, singleAtt); err != nil {
|
||||
failedBroadcasts = append(failedBroadcasts, &server.IndexedError{
|
||||
Index: i,
|
||||
@@ -264,17 +260,35 @@ func (s *Server) handleAttestationsElectra(
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
// Save to pool after broadcast (slow path - requires state fetching)
|
||||
// Run in goroutine to avoid blocking the HTTP response
|
||||
go func() {
|
||||
for _, singleAtt := range validAttestations {
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(context.Background(), singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get target state for attestation")
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(context.Background(), targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get committee for attestation")
|
||||
continue
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if len(failedBroadcasts) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -470,6 +484,10 @@ func (s *Server) SubmitSyncCommitteeSignatures(w http.ResponseWriter, r *http.Re
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitPoolSyncCommitteeSignatures")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
var req structs.SubmitSyncCommitteeSignaturesRequest
|
||||
err := json.NewDecoder(r.Body).Decode(&req.Data)
|
||||
switch {
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -622,6 +623,8 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
AttestationStateFetcher: chainService,
|
||||
}
|
||||
@@ -654,6 +657,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -673,6 +677,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("phase0 att post electra", func(t *testing.T) {
|
||||
@@ -793,6 +798,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -812,6 +818,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
@@ -861,6 +868,27 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Failures[0].Message, "Incorrect attestation signature"))
|
||||
})
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleAtt)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Phase0))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListVoluntaryExits(t *testing.T) {
|
||||
@@ -1057,14 +1085,19 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
|
||||
t.Run("single", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1089,14 +1122,19 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1120,13 +1158,18 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1149,7 +1192,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, false, broadcaster.BroadcastCalled.Load())
|
||||
})
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
s := &Server{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[]")
|
||||
@@ -1166,7 +1215,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
s := &Server{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -1179,6 +1234,26 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleSyncCommitteeMsg)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitSyncCommitteeSignatures(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListBLSToExecutionChanges(t *testing.T) {
|
||||
|
||||
@@ -40,6 +40,7 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: data,
|
||||
})
|
||||
return
|
||||
}
|
||||
previous := schedule[0]
|
||||
for _, entry := range schedule {
|
||||
|
||||
@@ -52,24 +52,27 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestation")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, att, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
attCopy := att.Copy()
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
@@ -82,6 +85,10 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestationElectra")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, singleAtt, singleAtt.GetCommitteeIndex())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -98,18 +105,17 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
|
||||
singleAttCopy := singleAtt.Copy()
|
||||
att := singleAttCopy.ToAttestationElectra(committee)
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ func TestProposeAttestation(t *testing.T) {
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
TimeFetcher: chainService,
|
||||
AttestationStateFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
head := util.NewBeaconBlock()
|
||||
head.Block.Slot = 999
|
||||
@@ -141,6 +142,7 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
AttPool: attestations.NewPool(),
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
@@ -149,6 +151,37 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
}
|
||||
|
||||
func TestProposeAttestation_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
_, err := attesterServer.ProposeAttestation(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestProposeAttestationElectra_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := ðpb.SingleAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
}
|
||||
_, err := attesterServer.ProposeAttestationElectra(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestGetAttestationData_OK(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
|
||||
@@ -163,11 +163,15 @@ func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
s.kzgChan <- verificationSet
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case s.kzgChan <- verificationSet:
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
|
||||
|
||||
@@ -3,6 +3,7 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -243,8 +244,10 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
|
||||
// Compute missing indices by root, excluding those already in storage.
|
||||
var lastRoot [fieldparams.RootLength]byte
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(incompleteRoots))
|
||||
for root := range incompleteRoots {
|
||||
lastRoot = root
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
|
||||
missingIndices := make(map[uint64]bool, len(requestedIndices))
|
||||
@@ -259,6 +262,7 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
initialMissingRootCount := len(missingIndicesByRoot)
|
||||
initialMissingCount := computeTotalCount(missingIndicesByRoot)
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(params.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
@@ -301,11 +305,19 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
}).Debug("Requested direct data column sidecars from peers")
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingRootCount": initialMissingRootCount,
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingRootCount": len(missingIndicesByRoot),
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
})
|
||||
|
||||
if initialMissingRootCount == 1 {
|
||||
log = log.WithField("root", fmt.Sprintf("%#x", lastRoot))
|
||||
}
|
||||
|
||||
log.Debug("Requested direct data column sidecars from peers")
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
@@ -304,6 +304,36 @@ func TestValidateWithKzgBatchVerifier_DeadlockOnTimeout(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_ContextCanceledBeforeSend(t *testing.T) {
|
||||
cancelledCtx, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
result, err := service.validateWithKzgBatchVerifier(cancelledCtx, nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier did not return after context cancellation")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-service.kzgChan:
|
||||
t.Fatal("verificationSet was sent to kzgChan despite canceled context")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
||||
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
|
||||
if len(roSidecars) >= count {
|
||||
|
||||
@@ -204,6 +204,13 @@ var (
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnsRecoveredFromELAttempts = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "data_columns_recovered_from_el_attempts",
|
||||
Help: "Count the number of data columns recovery attempts from the execution layer.",
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnsRecoveredFromELTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "data_columns_recovered_from_el_total",
|
||||
@@ -242,6 +249,13 @@ var (
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnSidecarsObtainedViaELCount = promauto.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "data_column_obtained_via_el_count",
|
||||
Help: "Count the number of data column sidecars obtained via the execution layer.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
@@ -3,9 +3,9 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
@@ -21,13 +21,23 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var pendingAttsLimit = 32768
|
||||
const pendingAttsLimit = 32768
|
||||
|
||||
// aggregatorIndexFilter defines how aggregator index should be handled in equality checks.
|
||||
type aggregatorIndexFilter int
|
||||
|
||||
const (
|
||||
// ignoreAggregatorIndex means aggregates differing only by aggregator index are considered equal.
|
||||
ignoreAggregatorIndex aggregatorIndexFilter = iota
|
||||
// includeAggregatorIndex means aggregator index must also match for aggregates to be considered equal.
|
||||
includeAggregatorIndex
|
||||
)
|
||||
|
||||
// This method processes pending attestations as a "known" block as arrived. With validations,
|
||||
// the valid attestations get saved into the operation mem pool, and the invalid attestations gets deleted
|
||||
@@ -50,16 +60,7 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
attestations := s.blkRootToPendingAtts[bRoot]
|
||||
s.pendingAttsLock.RUnlock()
|
||||
|
||||
if len(attestations) > 0 {
|
||||
start := time.Now()
|
||||
s.processAttestations(ctx, attestations)
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
"pendingAttsCount": len(attestations),
|
||||
"duration": duration,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
s.processAttestations(ctx, attestations)
|
||||
|
||||
randGen := rand.NewGenerator()
|
||||
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
|
||||
@@ -79,26 +80,71 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
|
||||
}
|
||||
|
||||
// processAttestations processes a list of attestations.
|
||||
// It assumes (for logging purposes only) that all attestations pertain to the same block.
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []any) {
|
||||
if len(attestations) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
firstAttestation := attestations[0]
|
||||
var blockRoot []byte
|
||||
switch v := firstAttestation.(type) {
|
||||
case ethpb.Att:
|
||||
blockRoot = v.GetData().BeaconBlockRoot
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
blockRoot = v.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping processing", v)
|
||||
return
|
||||
}
|
||||
|
||||
validAggregates := make([]ethpb.SignedAggregateAttAndProof, 0, len(attestations))
|
||||
startAggregate := time.Now()
|
||||
atts := make([]ethpb.Att, 0, len(attestations))
|
||||
aggregateAttAndProofCount := 0
|
||||
for _, att := range attestations {
|
||||
switch v := att.(type) {
|
||||
case ethpb.Att:
|
||||
atts = append(atts, v)
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
s.processAggregate(ctx, v)
|
||||
aggregateAttAndProofCount++
|
||||
// Avoid processing multiple aggregates only differing by aggregator index.
|
||||
if slices.ContainsFunc(validAggregates, func(other ethpb.SignedAggregateAttAndProof) bool {
|
||||
return pendingAggregatesAreEqual(v, other, ignoreAggregatorIndex)
|
||||
}) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.processAggregate(ctx, v); err != nil {
|
||||
log.WithError(err).Debug("Pending aggregate attestation could not be processed")
|
||||
continue
|
||||
}
|
||||
|
||||
validAggregates = append(validAggregates, v)
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping", v)
|
||||
}
|
||||
}
|
||||
durationAggregateAttAndProof := time.Since(startAggregate)
|
||||
|
||||
startAtts := time.Now()
|
||||
for _, bucket := range bucketAttestationsByData(atts) {
|
||||
s.processAttestationBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
durationAtts := time.Since(startAtts)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
"totalCount": len(attestations),
|
||||
"aggregateAttAndProofCount": aggregateAttAndProofCount,
|
||||
"uniqueAggregateAttAndProofCount": len(validAggregates),
|
||||
"attCount": len(atts),
|
||||
"durationTotal": durationAggregateAttAndProof + durationAtts,
|
||||
"durationAggregateAttAndProof": durationAggregateAttAndProof,
|
||||
"durationAtts": durationAtts,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
// attestationBucket groups attestations with the same AttestationData for batch processing.
|
||||
@@ -303,21 +349,20 @@ func (s *Service) processVerifiedAttestation(
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) {
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) error {
|
||||
res, err := s.validateAggregatedAtt(ctx, aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
return errors.Wrap(err, "validate aggregated att")
|
||||
}
|
||||
|
||||
if res != pubsub.ValidationAccept || !s.validateBlockInAttestation(ctx, aggregate) {
|
||||
log.Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
return errors.New("Pending aggregated attestation failed validation")
|
||||
}
|
||||
|
||||
att := aggregate.AggregateAttestationAndProof().AggregateVal()
|
||||
if err := s.saveAttestation(att); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregated attestation")
|
||||
return
|
||||
return errors.Wrap(err, "save attestation")
|
||||
}
|
||||
|
||||
_ = s.setAggregatorIndexEpochSeen(att.GetData().Target.Epoch, aggregate.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
@@ -325,6 +370,8 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
|
||||
if err := s.cfg.p2p.Broadcast(ctx, aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast aggregated attestation")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This defines how pending aggregates are saved in the map. The key is the
|
||||
@@ -336,7 +383,7 @@ func (s *Service) savePendingAggregate(agg ethpb.SignedAggregateAttAndProof) {
|
||||
|
||||
s.savePending(root, agg, func(other any) bool {
|
||||
a, ok := other.(ethpb.SignedAggregateAttAndProof)
|
||||
return ok && pendingAggregatesAreEqual(agg, a)
|
||||
return ok && pendingAggregatesAreEqual(agg, a, includeAggregatorIndex)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -391,13 +438,19 @@ func (s *Service) savePending(root [32]byte, pending any, isEqual func(other any
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], pending)
|
||||
}
|
||||
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
|
||||
// pendingAggregatesAreEqual checks if two pending aggregate attestations are equal.
|
||||
// The filter parameter controls whether aggregator index is considered in the equality check.
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof, filter aggregatorIndexFilter) bool {
|
||||
if a.Version() != b.Version() {
|
||||
return false
|
||||
}
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
|
||||
if filter == includeAggregatorIndex {
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
aAtt := a.AggregateAttestationAndProof().AggregateVal()
|
||||
bAtt := b.AggregateAttestationAndProof().AggregateVal()
|
||||
if aAtt.GetData().Slot != bAtt.GetData().Slot {
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
// Process block A (which exists and has no pending attestations)
|
||||
// This should skip processing attestations for A and request blocks B and C
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), rootA))
|
||||
require.LogsContain(t, hook, "Requesting block by root")
|
||||
require.LogsContain(t, hook, "Requesting blocks by root")
|
||||
}
|
||||
|
||||
func TestProcessPendingAtts_HasBlockSaveUnaggregatedAtt(t *testing.T) {
|
||||
@@ -911,17 +911,17 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different version", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProofElectra{Message: ðpb.AggregateAttestationAndProofElectra{AggregatorIndex: 1}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 2}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different slot", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -942,7 +942,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different committee index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -963,7 +963,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregation bits", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -984,7 +984,30 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1000},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index should be equal while ignoring aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, ignoreAggregatorIndex))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -44,11 +43,13 @@ func (s *Service) processPendingBlocksQueue() {
|
||||
if !s.chainIsStarted() {
|
||||
return
|
||||
}
|
||||
|
||||
locker.Lock()
|
||||
defer locker.Unlock()
|
||||
|
||||
if err := s.processPendingBlocks(s.ctx); err != nil {
|
||||
log.WithError(err).Debug("Could not process pending blocks")
|
||||
}
|
||||
locker.Unlock()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -73,8 +74,10 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
randGen := rand.NewGenerator()
|
||||
var parentRoots [][32]byte
|
||||
|
||||
blkRoots := make([][32]byte, 0, len(sortedSlots)*maxBlocksPerSlot)
|
||||
|
||||
// Iterate through sorted slots.
|
||||
for _, slot := range sortedSlots {
|
||||
for i, slot := range sortedSlots {
|
||||
// Skip processing if slot is in the future.
|
||||
if slot > s.cfg.clock.CurrentSlot() {
|
||||
continue
|
||||
@@ -91,6 +94,9 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
|
||||
// Process each block in the queue.
|
||||
for _, b := range blocksInCache {
|
||||
start := time.Now()
|
||||
totalDuration := time.Duration(0)
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -147,19 +153,34 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
}
|
||||
cancelFunction()
|
||||
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
blkRoots = append(blkRoots, blkRoot)
|
||||
|
||||
// Remove the processed block from the queue.
|
||||
if err := s.removeBlockFromQueue(b, blkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(logrus.Fields{"slot": slot, "blockRoot": hex.EncodeToString(bytesutil.Trunc(blkRoot[:]))}).Debug("Processed pending block and cleared it in cache")
|
||||
|
||||
duration := time.Since(start)
|
||||
totalDuration += duration
|
||||
log.WithFields(logrus.Fields{
|
||||
"slotIndex": fmt.Sprintf("%d/%d", i+1, len(sortedSlots)),
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", blkRoot),
|
||||
"duration": duration,
|
||||
"totalDuration": totalDuration,
|
||||
}).Debug("Processed pending block and cleared it in cache")
|
||||
}
|
||||
|
||||
span.End()
|
||||
}
|
||||
|
||||
for _, blkRoot := range blkRoots {
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
}
|
||||
|
||||
return s.sendBatchRootRequest(ctx, parentRoots, randGen)
|
||||
}
|
||||
|
||||
@@ -379,6 +400,19 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
||||
req = roots[:maxReqBlock]
|
||||
}
|
||||
|
||||
if logrus.GetLevel() >= logrus.DebugLevel {
|
||||
rootsStr := make([]string, 0, len(roots))
|
||||
for _, req := range roots {
|
||||
rootsStr = append(rootsStr, fmt.Sprintf("%#x", req))
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"count": len(req),
|
||||
"roots": rootsStr,
|
||||
}).Debug("Requesting blocks by root")
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -438,8 +472,6 @@ func (s *Service) filterOutPendingAndSynced(roots [][fieldparams.RootLength]byte
|
||||
roots = append(roots[:i], roots[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
|
||||
}
|
||||
return roots
|
||||
}
|
||||
|
||||
@@ -189,12 +189,30 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
ctx, cancel := context.WithTimeout(ctx, secondsPerHalfSlot)
|
||||
defer cancel()
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", source.Root()),
|
||||
"slot": source.Slot(),
|
||||
"proposerIndex": source.ProposerIndex(),
|
||||
"type": source.Type(),
|
||||
})
|
||||
|
||||
var constructedSidecarCount uint64
|
||||
for iteration := uint64(0); ; /*no stop condition*/ iteration++ {
|
||||
log = log.WithField("iteration", iteration)
|
||||
|
||||
// Exit early if all sidecars to sample have been seen.
|
||||
if s.haveAllSidecarsBeenSeen(source.Slot(), source.ProposerIndex(), columnIndicesToSample) {
|
||||
if iteration > 0 && constructedSidecarCount == 0 {
|
||||
log.Debug("No data column sidecars constructed from the execution client")
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if iteration == 0 {
|
||||
dataColumnsRecoveredFromELAttempts.Inc()
|
||||
}
|
||||
|
||||
// Try to reconstruct data column constructedSidecars from the execution client.
|
||||
constructedSidecars, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
|
||||
if err != nil {
|
||||
@@ -202,8 +220,8 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
}
|
||||
|
||||
// No sidecars are retrieved from the EL, retry later
|
||||
sidecarCount := uint64(len(constructedSidecars))
|
||||
if sidecarCount == 0 {
|
||||
constructedSidecarCount = uint64(len(constructedSidecars))
|
||||
if constructedSidecarCount == 0 {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -212,9 +230,11 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
continue
|
||||
}
|
||||
|
||||
dataColumnsRecoveredFromELTotal.Inc()
|
||||
|
||||
// Boundary check.
|
||||
if sidecarCount != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", sidecarCount, fieldparams.NumberOfColumns)
|
||||
if constructedSidecarCount != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", constructedSidecarCount, fieldparams.NumberOfColumns)
|
||||
}
|
||||
|
||||
unseenIndices, err := s.broadcastAndReceiveUnseenDataColumnSidecars(ctx, source.Slot(), source.ProposerIndex(), columnIndicesToSample, constructedSidecars)
|
||||
@@ -222,19 +242,12 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
return nil, errors.Wrap(err, "broadcast and receive unseen data column sidecars")
|
||||
}
|
||||
|
||||
if len(unseenIndices) > 0 {
|
||||
dataColumnsRecoveredFromELTotal.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"count": len(unseenIndices),
|
||||
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
|
||||
}).Debug("Constructed data column sidecars from the execution client")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", source.Root()),
|
||||
"slot": source.Slot(),
|
||||
"proposerIndex": source.ProposerIndex(),
|
||||
"iteration": iteration,
|
||||
"type": source.Type(),
|
||||
"count": len(unseenIndices),
|
||||
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
|
||||
}).Debug("Constructed data column sidecars from the execution client")
|
||||
}
|
||||
dataColumnSidecarsObtainedViaELCount.Observe(float64(len(unseenIndices)))
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -3,11 +3,12 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -51,14 +52,12 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
// Decode the message, reject if it fails.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Reject messages that are not of the expected type.
|
||||
dcsc, ok := m.(*eth.DataColumnSidecar)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *eth.DataColumnSidecar")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
@@ -194,19 +193,13 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
dataColumnSidecarArrivalGossipSummary.Observe(float64(sinceSlotStartTime.Milliseconds()))
|
||||
dataColumnSidecarVerificationGossipHistogram.Observe(float64(validationTime.Milliseconds()))
|
||||
|
||||
peerGossipScore := s.cfg.p2p.Peers().Scorers().GossipScorer().Score(pid)
|
||||
|
||||
select {
|
||||
case s.dataColumnLogCh <- dataColumnLogEntry{
|
||||
Slot: roDataColumn.Slot(),
|
||||
ColIdx: roDataColumn.Index,
|
||||
PropIdx: roDataColumn.ProposerIndex(),
|
||||
BlockRoot: roDataColumn.BlockRoot(),
|
||||
ParentRoot: roDataColumn.ParentRoot(),
|
||||
PeerSuffix: pid.String()[len(pid.String())-6:],
|
||||
PeerGossipScore: peerGossipScore,
|
||||
validationTime: validationTime,
|
||||
sinceStartTime: sinceSlotStartTime,
|
||||
slot: roDataColumn.Slot(),
|
||||
index: roDataColumn.Index,
|
||||
root: roDataColumn.BlockRoot(),
|
||||
validationTime: validationTime,
|
||||
sinceStartTime: sinceSlotStartTime,
|
||||
}:
|
||||
default:
|
||||
log.WithField("slot", roDataColumn.Slot()).Warn("Failed to send data column log entry")
|
||||
@@ -251,68 +244,69 @@ func computeCacheKey(slot primitives.Slot, proposerIndex primitives.ValidatorInd
|
||||
}
|
||||
|
||||
type dataColumnLogEntry struct {
|
||||
Slot primitives.Slot
|
||||
ColIdx uint64
|
||||
PropIdx primitives.ValidatorIndex
|
||||
BlockRoot [32]byte
|
||||
ParentRoot [32]byte
|
||||
PeerSuffix string
|
||||
PeerGossipScore float64
|
||||
validationTime time.Duration
|
||||
sinceStartTime time.Duration
|
||||
slot primitives.Slot
|
||||
index uint64
|
||||
root [32]byte
|
||||
validationTime time.Duration
|
||||
sinceStartTime time.Duration
|
||||
}
|
||||
|
||||
func (s *Service) processDataColumnLogs() {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
slotStats := make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
|
||||
slotStats := make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
|
||||
|
||||
for {
|
||||
select {
|
||||
case entry := <-s.dataColumnLogCh:
|
||||
cols := slotStats[entry.Slot]
|
||||
cols[entry.ColIdx] = entry
|
||||
slotStats[entry.Slot] = cols
|
||||
case col := <-s.dataColumnLogCh:
|
||||
cols := slotStats[col.root]
|
||||
cols = append(cols, col)
|
||||
slotStats[col.root] = cols
|
||||
case <-ticker.C:
|
||||
for slot, columns := range slotStats {
|
||||
var (
|
||||
colIndices = make([]uint64, 0, fieldparams.NumberOfColumns)
|
||||
peers = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
gossipScores = make([]float64, 0, fieldparams.NumberOfColumns)
|
||||
validationTimes = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
sinceStartTimes = make([]string, 0, fieldparams.NumberOfColumns)
|
||||
)
|
||||
for root, columns := range slotStats {
|
||||
indices := make([]uint64, 0, fieldparams.NumberOfColumns)
|
||||
minValidationTime, maxValidationTime, sumValidationTime := time.Duration(0), time.Duration(0), time.Duration(0)
|
||||
minSinceStartTime, maxSinceStartTime, sumSinceStartTime := time.Duration(0), time.Duration(0), time.Duration(0)
|
||||
|
||||
totalReceived := 0
|
||||
for _, entry := range columns {
|
||||
if entry.PeerSuffix == "" {
|
||||
for _, column := range columns {
|
||||
indices = append(indices, column.index)
|
||||
|
||||
sumValidationTime += column.validationTime
|
||||
sumSinceStartTime += column.sinceStartTime
|
||||
|
||||
if totalReceived == 0 {
|
||||
minValidationTime, maxValidationTime = column.validationTime, column.validationTime
|
||||
minSinceStartTime, maxSinceStartTime = column.sinceStartTime, column.sinceStartTime
|
||||
totalReceived++
|
||||
continue
|
||||
}
|
||||
colIndices = append(colIndices, entry.ColIdx)
|
||||
peers = append(peers, entry.PeerSuffix)
|
||||
gossipScores = append(gossipScores, roundFloat(entry.PeerGossipScore, 2))
|
||||
validationTimes = append(validationTimes, fmt.Sprintf("%.2fms", float64(entry.validationTime.Milliseconds())))
|
||||
sinceStartTimes = append(sinceStartTimes, fmt.Sprintf("%.2fms", float64(entry.sinceStartTime.Milliseconds())))
|
||||
|
||||
minValidationTime, maxValidationTime = min(minValidationTime, column.validationTime), max(maxValidationTime, column.validationTime)
|
||||
minSinceStartTime, maxSinceStartTime = min(minSinceStartTime, column.sinceStartTime), max(maxSinceStartTime, column.sinceStartTime)
|
||||
totalReceived++
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"receivedCount": totalReceived,
|
||||
"columnIndices": colIndices,
|
||||
"peers": peers,
|
||||
"gossipScores": gossipScores,
|
||||
"validationTimes": validationTimes,
|
||||
"sinceStartTimes": sinceStartTimes,
|
||||
}).Debug("Accepted data column sidecars summary")
|
||||
if totalReceived > 0 {
|
||||
slices.Sort(indices)
|
||||
avgValidationTime := sumValidationTime / time.Duration(totalReceived)
|
||||
avgSinceStartTime := sumSinceStartTime / time.Duration(totalReceived)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": columns[0].slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"count": totalReceived,
|
||||
"indices": helpers.PrettySlice(indices),
|
||||
"validationTime": prettyMinMaxAverage(minValidationTime, maxValidationTime, avgValidationTime),
|
||||
"sinceStartTime": prettyMinMaxAverage(minSinceStartTime, maxSinceStartTime, avgSinceStartTime),
|
||||
}).Debug("Accepted data column sidecars summary")
|
||||
}
|
||||
}
|
||||
slotStats = make(map[primitives.Slot][fieldparams.NumberOfColumns]dataColumnLogEntry)
|
||||
|
||||
slotStats = make(map[[fieldparams.RootLength]byte][]dataColumnLogEntry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func roundFloat(f float64, decimals int) float64 {
|
||||
mult := math.Pow(10, float64(decimals))
|
||||
return math.Round(f*mult) / mult
|
||||
func prettyMinMaxAverage(min, max, average time.Duration) string {
|
||||
return fmt.Sprintf("[min: %v, avg: %v, max: %v]", min, average, max)
|
||||
}
|
||||
|
||||
@@ -54,11 +54,13 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -101,7 +103,10 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := 1; v < 6; v++ {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
@@ -180,11 +185,13 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -227,7 +234,10 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := 1; v < 6; v++ {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
|
||||
@@ -687,6 +687,12 @@ func sbrNotFound(t *testing.T, expectedRoot [32]byte) *mockStateByRooter {
|
||||
}}
|
||||
}
|
||||
|
||||
func sbrReturnsState(st state.BeaconState) *mockStateByRooter {
|
||||
return &mockStateByRooter{sbr: func(_ context.Context, _ [32]byte) (state.BeaconState, error) {
|
||||
return st, nil
|
||||
}}
|
||||
}
|
||||
|
||||
func sbrForValOverride(idx primitives.ValidatorIndex, val *ethpb.Validator) *mockStateByRooter {
|
||||
return sbrForValOverrideWithT(nil, idx, val)
|
||||
}
|
||||
|
||||
@@ -11,12 +11,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
@@ -361,7 +359,7 @@ func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.
|
||||
}
|
||||
|
||||
if !dv.fc.HasNode(parentRoot) {
|
||||
return columnErrBuilder(errSidecarParentNotSeen)
|
||||
return columnErrBuilder(errors.Wrapf(errSidecarParentNotSeen, "parent root: %#x", parentRoot))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -484,88 +482,19 @@ func (dv *RODataColumnsVerifier) SidecarProposerExpected(ctx context.Context) (e
|
||||
|
||||
defer dv.recordResult(RequireSidecarProposerExpected, &err)
|
||||
|
||||
type slotParentRoot struct {
|
||||
slot primitives.Slot
|
||||
parentRoot [fieldparams.RootLength]byte
|
||||
}
|
||||
|
||||
targetRootBySlotParentRoot := make(map[slotParentRoot][fieldparams.RootLength]byte)
|
||||
|
||||
var targetRootFromCache = func(slot primitives.Slot, parentRoot [fieldparams.RootLength]byte) ([fieldparams.RootLength]byte, error) {
|
||||
// Use cached values if available.
|
||||
slotParentRoot := slotParentRoot{slot: slot, parentRoot: parentRoot}
|
||||
if root, ok := targetRootBySlotParentRoot[slotParentRoot]; ok {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(slot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Compute the target root for the epoch.
|
||||
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
|
||||
if err != nil {
|
||||
return [fieldparams.RootLength]byte{}, columnErrBuilder(errors.Wrap(err, "target root from epoch"))
|
||||
}
|
||||
|
||||
// Store the target root in the cache.
|
||||
targetRootBySlotParentRoot[slotParentRoot] = targetRoot
|
||||
|
||||
return targetRoot, nil
|
||||
}
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the slot of the data column.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
// Compute the target root for the data column.
|
||||
targetRoot, err := targetRootFromCache(dataColumnSlot, parentRoot)
|
||||
// Get the verifying state, it is guaranteed to have the correct proposer in the lookahead.
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "target root"))
|
||||
return columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Create a checkpoint for the target root.
|
||||
checkpoint := &forkchoicetypes.Checkpoint{Root: targetRoot, Epoch: dataColumnEpoch}
|
||||
|
||||
// Try to extract the proposer index from the data column in the cache.
|
||||
idx, cached := dv.pc.Proposer(checkpoint, dataColumnSlot)
|
||||
|
||||
if !cached {
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
// Ensure the expensive index computation is only performed once for
|
||||
// concurrent requests for the same signature data.
|
||||
idxAny, err, _ := dv.sg.Do(concatRootSlot(parentRoot, dataColumnSlot), func() (any, error) {
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
}
|
||||
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "compute proposer"))
|
||||
}
|
||||
|
||||
return idx, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if idx, ok = idxAny.(primitives.ValidatorIndex); !ok {
|
||||
return columnErrBuilder(errors.New("type assertion to ValidatorIndex failed"))
|
||||
}
|
||||
// Use proposer lookahead directly
|
||||
idx, err := helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "proposer from lookahead"))
|
||||
}
|
||||
|
||||
if idx != dataColumn.ProposerIndex() {
|
||||
@@ -626,7 +555,3 @@ func inclusionProofKey(c blocks.RODataColumn) ([32]byte, error) {
|
||||
|
||||
return sha256.Sum256(unhashedKey), nil
|
||||
}
|
||||
|
||||
func concatRootSlot(root [fieldparams.RootLength]byte, slot primitives.Slot) string {
|
||||
return string(root[:]) + fmt.Sprintf("%d", slot)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package verification
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -795,87 +794,90 @@ func TestDataColumnsSidecarProposerExpected(t *testing.T) {
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
firstColumn := columns[0]
|
||||
ctx := t.Context()
|
||||
testCases := []struct {
|
||||
name string
|
||||
stateByRooter StateByRooter
|
||||
proposerCache proposerCache
|
||||
columns []blocks.RODataColumn
|
||||
error string
|
||||
}{
|
||||
{
|
||||
name: "Cached, matches",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex()),
|
||||
},
|
||||
columns: columns,
|
||||
},
|
||||
{
|
||||
name: "Cached, does not match",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex() + 1),
|
||||
},
|
||||
columns: columns,
|
||||
error: errSidecarUnexpectedProposer.Error(),
|
||||
},
|
||||
{
|
||||
name: "Not cached, state lookup failure",
|
||||
stateByRooter: sbrNotFound(t, firstColumn.ParentRoot()),
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
},
|
||||
columns: columns,
|
||||
error: "verifying state",
|
||||
},
|
||||
}
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: tc.stateByRooter,
|
||||
pc: tc.proposerCache,
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{
|
||||
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
|
||||
},
|
||||
// Create a Fulu state to get the expected proposer from the lookahead.
|
||||
fuluState, _ := util.DeterministicGenesisStateFulu(t, 32)
|
||||
expectedProposer, err := fuluState.ProposerLookahead()
|
||||
require.NoError(t, err)
|
||||
expectedProposerIdx := primitives.ValidatorIndex(expectedProposer[columnSlot])
|
||||
|
||||
// Generate data columns with the expected proposer index.
|
||||
matchingColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx)
|
||||
// Generate data columns with wrong proposer index.
|
||||
wrongColumns := generateTestDataColumnsWithProposer(t, parentRoot, columnSlot, blobCount, expectedProposerIdx+1)
|
||||
|
||||
t.Run("Proposer matches", func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrReturnsState(fuluState),
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
|
||||
headStateReadOnly: fuluState,
|
||||
},
|
||||
}
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(tc.columns, GossipDataColumnSidecarRequirements)
|
||||
var wg sync.WaitGroup
|
||||
verifier := initializer.NewDataColumnsVerifier(matchingColumns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
|
||||
var err1, err2 error
|
||||
wg.Go(func() {
|
||||
err1 = verifier.SidecarProposerExpected(ctx)
|
||||
})
|
||||
wg.Go(func() {
|
||||
err2 = verifier.SidecarProposerExpected(ctx)
|
||||
})
|
||||
wg.Wait()
|
||||
t.Run("Proposer does not match", func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrReturnsState(fuluState),
|
||||
hsp: &mockHeadStateProvider{
|
||||
headRoot: parentRoot[:],
|
||||
headSlot: columnSlot, // Same epoch so HeadStateReadOnly is used
|
||||
headStateReadOnly: fuluState,
|
||||
},
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
verifier := initializer.NewDataColumnsVerifier(wrongColumns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.ErrorContains(t, errSidecarUnexpectedProposer.Error(), err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
|
||||
if len(tc.error) > 0 {
|
||||
require.ErrorContains(t, tc.error, err1)
|
||||
require.ErrorContains(t, tc.error, err2)
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
return
|
||||
}
|
||||
t.Run("State lookup failure", func(t *testing.T) {
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: sbrNotFound(t, columns[0].ParentRoot()),
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, err1)
|
||||
require.NoError(t, err2)
|
||||
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.ErrorContains(t, "verifying state", err)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
}
|
||||
|
||||
err := verifier.SidecarProposerExpected(ctx)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
func generateTestDataColumnsWithProposer(t *testing.T, parent [fieldparams.RootLength]byte, slot primitives.Slot, blobCount int, proposer primitives.ValidatorIndex) []blocks.RODataColumn {
|
||||
roBlock, roBlobs := util.GenerateTestDenebBlockWithSidecar(t, parent, slot, blobCount, util.WithProposer(proposer))
|
||||
blobs := make([]kzg.Blob, 0, len(roBlobs))
|
||||
for i := range roBlobs {
|
||||
blobs = append(blobs, kzg.Blob(roBlobs[i].Blob))
|
||||
}
|
||||
|
||||
cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs)
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
return roDataColumnSidecars
|
||||
}
|
||||
|
||||
func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
@@ -922,12 +924,3 @@ func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestConcatRootSlot(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{1, 2, 3}
|
||||
const slot = primitives.Slot(3210)
|
||||
|
||||
const expected = "\x01\x02\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x003210"
|
||||
|
||||
actual := concatRootSlot(root, slot)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
3
changelog/avoid_kzg_send_after_context_cancel.md
Normal file
3
changelog/avoid_kzg_send_after_context_cancel.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Prevent blocked sends to the KZG batch verifier when the caller context is already canceled, avoiding useless queueing and potential hangs.
|
||||
3
changelog/bastin_fix-lcp2p-bug.md
Normal file
3
changelog/bastin_fix-lcp2p-bug.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p.
|
||||
3
changelog/builder-index.md
Normal file
3
changelog/builder-index.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices.
|
||||
3
changelog/james-prysm_align-atter-pool-apis.md
Normal file
3
changelog/james-prysm_align-atter-pool-apis.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now.
|
||||
11
changelog/james-prysm_fulu-e2e.md
Normal file
11
changelog/james-prysm_fulu-e2e.md
Normal file
@@ -0,0 +1,11 @@
|
||||
### Added
|
||||
|
||||
- Adding basic fulu fork transition support for mainnet and minimal e2e tests (multi scenario is not included)
|
||||
|
||||
### Changed
|
||||
|
||||
- updated go ethereum to 1.16.7
|
||||
|
||||
### Removed
|
||||
|
||||
- removed github.com/MariusVanDerWijden/FuzzyVM and github.com/MariusVanDerWijden/tx-fuzz due to lack of support post 1.16.7, only used in e2e for transaction fuzzing
|
||||
3
changelog/james-prysm_skip-e2e-slot1-check.md
Normal file
3
changelog/james-prysm_skip-e2e-slot1-check.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- e2e sync committee evaluator now skips the first slot after startup, we already skip the fork epoch for checks here, this skip only applies on startup, due to altair always from 0 and validators need to warm up.
|
||||
2
changelog/kasey_fix-backfill-flag.md
Normal file
2
changelog/kasey_fix-backfill-flag.md
Normal file
@@ -0,0 +1,2 @@
|
||||
#### Fixed
|
||||
- Fix validation logic for `--backfill-oldest-slot`, which was rejecting slots newer than 1056767.
|
||||
3
changelog/manu-agg.md
Normal file
3
changelog/manu-agg.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them.
|
||||
3
changelog/manu-cache-warmup.md
Normal file
3
changelog/manu-cache-warmup.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Data column sidecars cache warmup: Process in parallel all sidecars for a given epoch.
|
||||
3
changelog/manu-log.md
Normal file
3
changelog/manu-log.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Summarize DEBUG log corresponding to incoming via gossip data column sidecar.
|
||||
2
changelog/manu-remove-error-logs.md
Normal file
2
changelog/manu-remove-error-logs.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- `validateDataColumn`: Remove error logs.
|
||||
7
changelog/manu_reconstruct-metrics.md
Normal file
7
changelog/manu_reconstruct-metrics.md
Normal file
@@ -0,0 +1,7 @@
|
||||
### Added
|
||||
- prometheus histogram `cells_and_proofs_from_structured_computation_milliseconds` to track computation time for cells and proofs from structured blobs.
|
||||
- prometheus histogram `get_blobs_v2_latency_milliseconds` to track RPC latency for `getBlobsV2` calls to the execution layer.
|
||||
|
||||
### Changed
|
||||
- Run `ComputeCellsAndProofsFromFlat` in parallel to improve performance when computing cells and proofs.
|
||||
- Run `ComputeCellsAndProofsFromStructured` in parallel to improve performance when computing cells and proofs.
|
||||
2
changelog/potuz_dcs_pc_removal.md
Normal file
2
changelog/potuz_dcs_pc_removal.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- Use lookahead to validate data column sidecar proposer index.
|
||||
3
changelog/potuz_dont_lock_fcu.md
Normal file
3
changelog/potuz_dont_lock_fcu.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Notify the engine about forkchoice updates in the background.
|
||||
2
changelog/potuz_fcu_ctx.md
Normal file
2
changelog/potuz_fcu_ctx.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- Use a separate context when updating the slot cache.
|
||||
3
changelog/potuz_next_epoch_attributes.md
Normal file
3
changelog/potuz_next_epoch_attributes.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Do not process slots and copy states for next epoch proposers after Fulu
|
||||
2
changelog/potuz_no_fcu_on_batches.md
Normal file
2
changelog/potuz_no_fcu_on_batches.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- D not send FCU on block batches.
|
||||
3
changelog/potuz_remove_signature_check.md
Normal file
3
changelog/potuz_remove_signature_check.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Do not check block signature on state transition.
|
||||
3
changelog/potuz_use_head_previous_epoch.md
Normal file
3
changelog/potuz_use_head_previous_epoch.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Use the head state to validate attestations for the previous epoch if head is compatible with the target checkpoint.
|
||||
3
changelog/radek_extend-http-analyzer.md
Normal file
3
changelog/radek_extend-http-analyzer.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Extend `httperror` analyzer to more functions.
|
||||
3
changelog/sashass1315_fix-panic.md
Normal file
3
changelog/sashass1315_fix-panic.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- avoid panic when fork schedule is empty [#16175](https://github.com/OffchainLabs/prysm/pull/16175)
|
||||
3
changelog/satushh-consolidation.md
Normal file
3
changelog/satushh-consolidation.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Performance improvement in ProcessConsolidationRequests: Use more performance HasPendingBalanceToWithdraw instead of PendingBalanceToWithdraw as no need to calculate full total pending balance.
|
||||
9
config/params/testdata/e2e_config.yaml
vendored
9
config/params/testdata/e2e_config.yaml
vendored
@@ -49,7 +49,7 @@ ELECTRA_FORK_VERSION: 0x050000fd
|
||||
ELECTRA_FORK_EPOCH: 14
|
||||
# Fulu
|
||||
FULU_FORK_VERSION: 0x060000fd
|
||||
FULU_FORK_EPOCH: 18446744073709551615
|
||||
FULU_FORK_EPOCH: 16
|
||||
|
||||
|
||||
# Time parameters
|
||||
@@ -122,7 +122,7 @@ MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
|
||||
# Other e2e overrides
|
||||
# ---------------------------------------------------------------
|
||||
CONFIG_NAME: "end-to-end"
|
||||
SLOTS_PER_EPOCH: 6
|
||||
SLOTS_PER_EPOCH: 8
|
||||
EPOCHS_PER_ETH1_VOTING_PERIOD: 2
|
||||
MAX_SEED_LOOKAHEAD: 1
|
||||
|
||||
@@ -136,3 +136,8 @@ BLOB_SCHEDULE:
|
||||
# Electra
|
||||
- EPOCH: 14
|
||||
MAX_BLOBS_PER_BLOCK: 9
|
||||
# BPO (Blob Parameter Optimization) schedule for Fulu
|
||||
- EPOCH: 17
|
||||
MAX_BLOBS_PER_BLOCK: 15
|
||||
- EPOCH: 18
|
||||
MAX_BLOBS_PER_BLOCK: 21
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
package params
|
||||
|
||||
import "math"
|
||||
|
||||
const (
|
||||
AltairE2EForkEpoch = 6
|
||||
BellatrixE2EForkEpoch = 8
|
||||
CapellaE2EForkEpoch = 10
|
||||
DenebE2EForkEpoch = 12
|
||||
ElectraE2EForkEpoch = 14
|
||||
FuluE2EForkEpoch = math.MaxUint64
|
||||
FuluE2EForkEpoch = 16
|
||||
)
|
||||
|
||||
// E2ETestConfig retrieves the configurations made specifically for E2E testing.
|
||||
@@ -28,7 +26,7 @@ func E2ETestConfig() *BeaconChainConfig {
|
||||
// Time parameters.
|
||||
e2eConfig.SecondsPerSlot = 10
|
||||
e2eConfig.SlotDurationMilliseconds = 10000
|
||||
e2eConfig.SlotsPerEpoch = 6
|
||||
e2eConfig.SlotsPerEpoch = 8
|
||||
e2eConfig.SqrRootSlotsPerEpoch = 2
|
||||
e2eConfig.SecondsPerETH1Block = 2
|
||||
e2eConfig.EpochsPerEth1VotingPeriod = 2
|
||||
@@ -64,6 +62,9 @@ func E2ETestConfig() *BeaconChainConfig {
|
||||
e2eConfig.BlobSchedule = []BlobScheduleEntry{
|
||||
{Epoch: e2eConfig.DenebForkEpoch, MaxBlobsPerBlock: uint64(e2eConfig.DeprecatedMaxBlobsPerBlock)},
|
||||
{Epoch: e2eConfig.ElectraForkEpoch, MaxBlobsPerBlock: uint64(e2eConfig.DeprecatedMaxBlobsPerBlockElectra)},
|
||||
// BPO (Blob Parameter Optimization) schedule for Fulu
|
||||
{Epoch: e2eConfig.FuluForkEpoch + 1, MaxBlobsPerBlock: 15},
|
||||
{Epoch: e2eConfig.FuluForkEpoch + 2, MaxBlobsPerBlock: 21},
|
||||
}
|
||||
|
||||
e2eConfig.InitializeForkSchedule()
|
||||
@@ -119,6 +120,9 @@ func E2EMainnetTestConfig() *BeaconChainConfig {
|
||||
e2eConfig.BlobSchedule = []BlobScheduleEntry{
|
||||
{Epoch: e2eConfig.DenebForkEpoch, MaxBlobsPerBlock: uint64(e2eConfig.DeprecatedMaxBlobsPerBlock)},
|
||||
{Epoch: e2eConfig.ElectraForkEpoch, MaxBlobsPerBlock: uint64(e2eConfig.DeprecatedMaxBlobsPerBlockElectra)},
|
||||
// BPO (Blob Parameter Optimization) schedule for Fulu
|
||||
{Epoch: e2eConfig.FuluForkEpoch + 1, MaxBlobsPerBlock: 15},
|
||||
{Epoch: e2eConfig.FuluForkEpoch + 2, MaxBlobsPerBlock: 21},
|
||||
}
|
||||
|
||||
e2eConfig.InitializeForkSchedule()
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"basis_points.go",
|
||||
"builder_index.go",
|
||||
"committee_bits_mainnet.go",
|
||||
"committee_bits_minimal.go", # keep
|
||||
"committee_index.go",
|
||||
@@ -31,6 +32,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"builder_index_test.go",
|
||||
"committee_index_test.go",
|
||||
"domain_test.go",
|
||||
"epoch_test.go",
|
||||
|
||||
54
consensus-types/primitives/builder_index.go
Normal file
54
consensus-types/primitives/builder_index.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package primitives
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
fssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
var _ fssz.HashRoot = (BuilderIndex)(0)
|
||||
var _ fssz.Marshaler = (*BuilderIndex)(nil)
|
||||
var _ fssz.Unmarshaler = (*BuilderIndex)(nil)
|
||||
|
||||
// BuilderIndex is an index into the builder registry.
|
||||
type BuilderIndex uint64
|
||||
|
||||
// HashTreeRoot returns the SSZ hash tree root of the index.
|
||||
func (b BuilderIndex) HashTreeRoot() ([32]byte, error) {
|
||||
return fssz.HashWithDefaultHasher(b)
|
||||
}
|
||||
|
||||
// HashTreeRootWith appends the SSZ uint64 representation of the index to the given hasher.
|
||||
func (b BuilderIndex) HashTreeRootWith(hh *fssz.Hasher) error {
|
||||
hh.PutUint64(uint64(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalSSZ decodes the SSZ-encoded uint64 index from buf.
|
||||
func (b *BuilderIndex) UnmarshalSSZ(buf []byte) error {
|
||||
if len(buf) != b.SizeSSZ() {
|
||||
return fmt.Errorf("expected buffer of length %d received %d", b.SizeSSZ(), len(buf))
|
||||
}
|
||||
*b = BuilderIndex(fssz.UnmarshallUint64(buf))
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalSSZTo appends the SSZ-encoded index to dst and returns the extended buffer.
|
||||
func (b *BuilderIndex) MarshalSSZTo(dst []byte) ([]byte, error) {
|
||||
marshalled, err := b.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(dst, marshalled...), nil
|
||||
}
|
||||
|
||||
// MarshalSSZ encodes the index as an SSZ uint64.
|
||||
func (b *BuilderIndex) MarshalSSZ() ([]byte, error) {
|
||||
marshalled := fssz.MarshalUint64([]byte{}, uint64(*b))
|
||||
return marshalled, nil
|
||||
}
|
||||
|
||||
// SizeSSZ returns the size of the SSZ-encoded index in bytes.
|
||||
func (b *BuilderIndex) SizeSSZ() int {
|
||||
return 8
|
||||
}
|
||||
86
consensus-types/primitives/builder_index_test.go
Normal file
86
consensus-types/primitives/builder_index_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package primitives_test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"slices"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestBuilderIndex_SSZRoundTripAndHashRoot(t *testing.T) {
|
||||
cases := []uint64{
|
||||
0,
|
||||
1,
|
||||
42,
|
||||
(1 << 32) - 1,
|
||||
1 << 32,
|
||||
^uint64(0),
|
||||
}
|
||||
|
||||
for _, v := range cases {
|
||||
t.Run("v="+u64name(v), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
val := primitives.BuilderIndex(v)
|
||||
require.Equal(t, 8, (&val).SizeSSZ())
|
||||
|
||||
enc, err := (&val).MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 8, len(enc))
|
||||
|
||||
wantEnc := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(wantEnc, v)
|
||||
require.DeepEqual(t, wantEnc, enc)
|
||||
|
||||
dstPrefix := []byte("prefix:")
|
||||
dst, err := (&val).MarshalSSZTo(slices.Clone(dstPrefix))
|
||||
require.NoError(t, err)
|
||||
wantDst := append(dstPrefix, wantEnc...)
|
||||
require.DeepEqual(t, wantDst, dst)
|
||||
|
||||
var decoded primitives.BuilderIndex
|
||||
require.NoError(t, (&decoded).UnmarshalSSZ(enc))
|
||||
require.Equal(t, val, decoded)
|
||||
|
||||
root, err := val.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
var wantRoot [32]byte
|
||||
binary.LittleEndian.PutUint64(wantRoot[:8], v)
|
||||
require.Equal(t, wantRoot, root)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilderIndex_UnmarshalSSZRejectsWrongSize(t *testing.T) {
|
||||
for _, size := range []int{7, 9} {
|
||||
t.Run("size="+strconv.Itoa(size), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var v primitives.BuilderIndex
|
||||
err := (&v).UnmarshalSSZ(make([]byte, size))
|
||||
require.ErrorContains(t, "expected buffer of length 8", err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func u64name(v uint64) string {
|
||||
switch v {
|
||||
case 0:
|
||||
return "0"
|
||||
case 1:
|
||||
return "1"
|
||||
case 42:
|
||||
return "42"
|
||||
case (1 << 32) - 1:
|
||||
return "2^32-1"
|
||||
case 1 << 32:
|
||||
return "2^32"
|
||||
case ^uint64(0):
|
||||
return "max"
|
||||
default:
|
||||
return "custom"
|
||||
}
|
||||
}
|
||||
166
deps.bzl
166
deps.bzl
@@ -301,8 +301,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_bits_and_blooms_bitset",
|
||||
importpath = "github.com/bits-and-blooms/bitset",
|
||||
sum = "h1:1X2TS7aHz1ELcC0yU1y2stUs/0ig5oMU6STFZGrhvHI=",
|
||||
version = "v1.17.0",
|
||||
sum = "h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4=",
|
||||
version = "v1.22.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_bradfitz_go_smtpd",
|
||||
@@ -482,8 +482,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_cockroachdb_pebble",
|
||||
importpath = "github.com/cockroachdb/pebble",
|
||||
sum = "h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA=",
|
||||
version = "v1.1.2",
|
||||
sum = "h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw=",
|
||||
version = "v1.1.5",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_cockroachdb_redact",
|
||||
@@ -512,14 +512,16 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_consensys_bavard",
|
||||
importpath = "github.com/consensys/bavard",
|
||||
sum = "h1:Uw2CGvbXSZWhqK59X0VG/zOjpTFuOMcPLStrp1ihI0A=",
|
||||
version = "v0.1.22",
|
||||
sum = "h1:dTlIwEdFQmldzFf5F6bbTcYWhvnAgZai2g8eq3Wwxqg=",
|
||||
version = "v0.1.31-0.20250406004941-2db259e4b582",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_consensys_gnark_crypto",
|
||||
importpath = "github.com/consensys/gnark-crypto",
|
||||
sum = "h1:DDBdl4HaBtdQsq/wfMwJvZNE80sHidrK3Nfrefatm0E=",
|
||||
version = "v0.14.0",
|
||||
patch_args = ["-p1"],
|
||||
patches = ["//third_party:com_github_consensys_gnark_crypto.patch"],
|
||||
sum = "h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0=",
|
||||
version = "v0.18.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_containerd_cgroups",
|
||||
@@ -555,8 +557,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_cpuguy83_go_md2man_v2",
|
||||
importpath = "github.com/cpuguy83/go-md2man/v2",
|
||||
sum = "h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=",
|
||||
version = "v2.0.5",
|
||||
sum = "h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=",
|
||||
version = "v2.0.7",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_crate_crypto_go_eth_kzg",
|
||||
importpath = "github.com/crate-crypto/go-eth-kzg",
|
||||
sum = "h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_crate_crypto_go_ipa",
|
||||
@@ -591,8 +599,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_datadog_zstd",
|
||||
importpath = "github.com/DataDog/zstd",
|
||||
sum = "h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=",
|
||||
version = "v1.5.5",
|
||||
sum = "h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE=",
|
||||
version = "v1.5.7",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_davecgh_go_spew",
|
||||
@@ -606,23 +614,29 @@ def prysm_deps():
|
||||
sum = "h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=",
|
||||
version = "v0.0.0-20200604182044-b73af7476f6c",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_dchest_siphash",
|
||||
importpath = "github.com/dchest/siphash",
|
||||
sum = "h1:QXwFc8cFOR2dSa/gE6o/HokBMWtLUaNDVd+22aKHeEA=",
|
||||
version = "v1.2.3",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_deckarep_golang_set_v2",
|
||||
importpath = "github.com/deckarep/golang-set/v2",
|
||||
sum = "h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM=",
|
||||
version = "v2.6.0",
|
||||
sum = "h1:swm0rlPCmdWn9mESxKOjWk8hXSqoxOp+ZlfuyaAdFlQ=",
|
||||
version = "v2.8.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_decred_dcrd_crypto_blake256",
|
||||
importpath = "github.com/decred/dcrd/crypto/blake256",
|
||||
sum = "h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=",
|
||||
version = "v1.0.1",
|
||||
sum = "h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=",
|
||||
version = "v1.1.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_decred_dcrd_dcrec_secp256k1_v4",
|
||||
importpath = "github.com/decred/dcrd/dcrec/secp256k1/v4",
|
||||
sum = "h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=",
|
||||
version = "v4.3.0",
|
||||
sum = "h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=",
|
||||
version = "v4.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_deepmap_oapi_codegen",
|
||||
@@ -735,8 +749,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_emicklei_dot",
|
||||
importpath = "github.com/emicklei/dot",
|
||||
sum = "h1:Ase39UD9T9fRBOb5ptgpixrxfx8abVzNWZi2+lr53PI=",
|
||||
version = "v0.11.0",
|
||||
sum = "h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A=",
|
||||
version = "v1.6.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_emicklei_go_restful_v3",
|
||||
@@ -779,6 +793,12 @@ def prysm_deps():
|
||||
sum = "h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=",
|
||||
version = "v2.1.5",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_ethereum_go_bigmodexpfix",
|
||||
importpath = "github.com/ethereum/go-bigmodexpfix",
|
||||
sum = "h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk=",
|
||||
version = "v0.0.0-20250911101455-f9e208c548ab",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_ethereum_go_ethereum",
|
||||
build_directives = [
|
||||
@@ -790,8 +810,8 @@ def prysm_deps():
|
||||
patches = [
|
||||
"//third_party:com_github_ethereum_go_ethereum_secp256k1.patch",
|
||||
],
|
||||
sum = "h1:bRra1zi+/q+qyXZ6fylZOrlaF8kDdnlTtzNTmNHfX+g=",
|
||||
version = "v1.15.9",
|
||||
sum = "h1:qeM4TvbrWK0UC0tgkZ7NiRsmBGwsjqc64BHo20U59UQ=",
|
||||
version = "v1.16.7",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_ethereum_go_verkle",
|
||||
@@ -832,8 +852,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_ferranbt_fastssz",
|
||||
importpath = "github.com/ferranbt/fastssz",
|
||||
sum = "h1:ZI+z3JH05h4kgmFXdHuR1aWYsgrg7o+Fw7/NCzM16Mo=",
|
||||
version = "v0.1.3",
|
||||
sum = "h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY=",
|
||||
version = "v0.1.4",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_fjl_gencodec",
|
||||
@@ -919,12 +939,6 @@ def prysm_deps():
|
||||
sum = "h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays=",
|
||||
version = "v0.0.0-20191108122812-4678299bea08",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_gballet_go_verkle",
|
||||
importpath = "github.com/gballet/go-verkle",
|
||||
sum = "h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE=",
|
||||
version = "v0.1.1-0.20231031103413-a67434b50f46",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_gdamore_encoding",
|
||||
importpath = "github.com/gdamore/encoding",
|
||||
@@ -1132,8 +1146,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_gofrs_flock",
|
||||
importpath = "github.com/gofrs/flock",
|
||||
sum = "h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=",
|
||||
version = "v0.8.1",
|
||||
sum = "h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=",
|
||||
version = "v0.12.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_gogo_googleapis",
|
||||
@@ -1200,8 +1214,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_golang_snappy",
|
||||
importpath = "github.com/golang/snappy",
|
||||
sum = "h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc=",
|
||||
version = "v0.0.5-0.20231225225746-43d5d4cd4e0e",
|
||||
sum = "h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=",
|
||||
version = "v1.0.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_golangci_lint_1",
|
||||
@@ -1275,12 +1289,6 @@ def prysm_deps():
|
||||
sum = "h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=",
|
||||
version = "v0.1.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_google_subcommands",
|
||||
importpath = "github.com/google/subcommands",
|
||||
sum = "h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE=",
|
||||
version = "v1.2.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_google_uuid",
|
||||
importpath = "github.com/google/uuid",
|
||||
@@ -1542,8 +1550,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_holiman_billy",
|
||||
importpath = "github.com/holiman/billy",
|
||||
sum = "h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4=",
|
||||
version = "v0.0.0-20240216141850-2abb0c79d3c4",
|
||||
sum = "h1:IZUYC/xb3giYwBLMnr8d0TGTzPKFGNTCGgGLoyeX330=",
|
||||
version = "v0.0.0-20250707135307-f2f9b9aae7db",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_holiman_bloomfilter_v2",
|
||||
@@ -1554,8 +1562,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_holiman_goevmlab",
|
||||
importpath = "github.com/holiman/goevmlab",
|
||||
sum = "h1:JHZ8k9n9G9KXIo1qrvK5Cxah6ax5BR0qVTA9bFYl1oM=",
|
||||
version = "v0.0.0-20241121133100-cfa6b078c8c4",
|
||||
sum = "h1:krEMViaomzuBfH/L1V4b8w2lVfsEpUbxH1ZJQpUaT6E=",
|
||||
version = "v0.0.0-20250515153315-ab84907ebdb2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_holiman_uint256",
|
||||
@@ -1850,8 +1858,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_klauspost_compress",
|
||||
importpath = "github.com/klauspost/compress",
|
||||
sum = "h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=",
|
||||
version = "v1.17.11",
|
||||
sum = "h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=",
|
||||
version = "v1.18.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_klauspost_cpuid",
|
||||
@@ -2104,18 +2112,6 @@ def prysm_deps():
|
||||
sum = "h1:3l11YT8tm9MnwGFQ4kETwkzpAwY2Jt9lCrumCUW4+z4=",
|
||||
version = "v0.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_mariusvanderwijden_fuzzyvm",
|
||||
importpath = "github.com/MariusVanDerWijden/FuzzyVM",
|
||||
sum = "h1:RQtzNvriR3Yu5CvVBTJPwDmfItBT90TWZ3fFondhc08=",
|
||||
version = "v0.0.0-20240516070431-7828990cad7d",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_mariusvanderwijden_tx_fuzz",
|
||||
importpath = "github.com/MariusVanDerWijden/tx-fuzz",
|
||||
sum = "h1:Tq4lXivsR8mtoP4RpasUDIUpDLHfN1YhFge/kzrzK78=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_marten_seemann_tcp",
|
||||
importpath = "github.com/marten-seemann/tcp",
|
||||
@@ -2143,8 +2139,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_mattn_go_runewidth",
|
||||
importpath = "github.com/mattn/go-runewidth",
|
||||
sum = "h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=",
|
||||
version = "v0.0.15",
|
||||
sum = "h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=",
|
||||
version = "v0.0.16",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_matttproud_golang_protobuf_extensions",
|
||||
@@ -2257,8 +2253,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_mitchellh_mapstructure",
|
||||
importpath = "github.com/mitchellh/mapstructure",
|
||||
sum = "h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=",
|
||||
version = "v1.4.1",
|
||||
sum = "h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=",
|
||||
version = "v1.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_mitchellh_pointerstructure",
|
||||
@@ -2272,12 +2268,6 @@ def prysm_deps():
|
||||
sum = "h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=",
|
||||
version = "v0.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_mmcloughlin_profile",
|
||||
importpath = "github.com/mmcloughlin/profile",
|
||||
sum = "h1:jhDmAqPyebOsVDOCICJoINoLb/AnLBaUw58nFzxWS2w=",
|
||||
version = "v0.1.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_moby_spdystream",
|
||||
importpath = "github.com/moby/spdystream",
|
||||
@@ -2821,6 +2811,12 @@ def prysm_deps():
|
||||
sum = "h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=",
|
||||
version = "v1.1.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_projectzkm_ziren_crates_go_runtime_zkvm_runtime",
|
||||
importpath = "github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime",
|
||||
sum = "h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU=",
|
||||
version = "v0.0.0-20251001021608-1fe7b43fc4d6",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_prometheus_client_golang",
|
||||
importpath = "github.com/prometheus/client_golang",
|
||||
@@ -3267,8 +3263,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_spf13_pflag",
|
||||
importpath = "github.com/spf13/pflag",
|
||||
sum = "h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=",
|
||||
version = "v1.0.5",
|
||||
sum = "h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=",
|
||||
version = "v1.0.6",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_spf13_viper",
|
||||
@@ -3384,14 +3380,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_tklauser_go_sysconf",
|
||||
importpath = "github.com/tklauser/go-sysconf",
|
||||
sum = "h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=",
|
||||
version = "v0.3.13",
|
||||
sum = "h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=",
|
||||
version = "v0.3.15",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_tklauser_numcpus",
|
||||
importpath = "github.com/tklauser/numcpus",
|
||||
sum = "h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4=",
|
||||
version = "v0.7.0",
|
||||
sum = "h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=",
|
||||
version = "v0.10.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_tmc_grpc_websocket_proxy",
|
||||
@@ -3411,12 +3407,6 @@ def prysm_deps():
|
||||
sum = "h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0=",
|
||||
version = "v1.2.7",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_umbracle_gohashtree",
|
||||
importpath = "github.com/umbracle/gohashtree",
|
||||
sum = "h1:CQh33pStIp/E30b7TxDlXfM0145bn2e8boI30IxAhTg=",
|
||||
version = "v0.0.2-alpha.0.20230207094856-5b775a815c10",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_urfave_cli",
|
||||
importpath = "github.com/urfave/cli",
|
||||
@@ -3426,8 +3416,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_urfave_cli_v2",
|
||||
importpath = "github.com/urfave/cli/v2",
|
||||
sum = "h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w=",
|
||||
version = "v2.27.5",
|
||||
sum = "h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g=",
|
||||
version = "v2.27.6",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_urfave_negroni",
|
||||
@@ -3474,8 +3464,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_victoriametrics_fastcache",
|
||||
importpath = "github.com/VictoriaMetrics/fastcache",
|
||||
sum = "h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=",
|
||||
version = "v1.12.2",
|
||||
sum = "h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0=",
|
||||
version = "v1.13.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_vividcortex_gohistogram",
|
||||
@@ -3597,8 +3587,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_yusufpapurcu_wmi",
|
||||
importpath = "github.com/yusufpapurcu/wmi",
|
||||
sum = "h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=",
|
||||
version = "v1.2.3",
|
||||
sum = "h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=",
|
||||
version = "v1.2.4",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_google_cloud_go",
|
||||
@@ -4784,8 +4774,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "org_golang_x_exp",
|
||||
importpath = "golang.org/x/exp",
|
||||
sum = "h1:KL/ZBHXgKGVmuZBZ01Lt57yE5ws8ZPSkkihmEyq7FXc=",
|
||||
version = "v0.0.0-20250128182459-e0ece0dbea4c",
|
||||
sum = "h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI=",
|
||||
version = "v0.0.0-20250506013437-ce4c2cf36ca6",
|
||||
)
|
||||
go_repository(
|
||||
name = "org_golang_x_exp_typeparams",
|
||||
|
||||
55
go.mod
55
go.mod
@@ -3,20 +3,18 @@ module github.com/OffchainLabs/prysm/v7
|
||||
go 1.25.1
|
||||
|
||||
require (
|
||||
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240516070431-7828990cad7d
|
||||
github.com/MariusVanDerWijden/tx-fuzz v1.4.0
|
||||
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506
|
||||
github.com/aristanetworks/goarista v0.0.0-20200805130819-fd197cf57d96
|
||||
github.com/bazelbuild/rules_go v0.23.2
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4
|
||||
github.com/consensys/gnark-crypto v0.14.0
|
||||
github.com/consensys/gnark-crypto v0.18.0
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0
|
||||
github.com/d4l3k/messagediff v1.2.1
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/emicklei/dot v0.11.0
|
||||
github.com/emicklei/dot v1.6.2
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5
|
||||
github.com/ethereum/go-ethereum v1.15.9
|
||||
github.com/ethereum/go-ethereum v1.16.7
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible
|
||||
@@ -24,7 +22,7 @@ require (
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||
github.com/golang/gddo v0.0.0-20200528160355-8d077c1d8f4c
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e
|
||||
github.com/golang/snappy v1.0.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/google/gofuzz v1.2.0
|
||||
github.com/google/uuid v1.6.0
|
||||
@@ -74,7 +72,7 @@ require (
|
||||
github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e
|
||||
github.com/trailofbits/go-mutexasserts v0.0.0-20250212181730-4c2b8e9e784b
|
||||
github.com/tyler-smith/go-bip39 v1.1.0
|
||||
github.com/urfave/cli/v2 v2.27.5
|
||||
github.com/urfave/cli/v2 v2.27.6
|
||||
github.com/uudashr/gocognit v1.0.5
|
||||
github.com/wealdtech/go-bytesutil v1.1.1
|
||||
github.com/wealdtech/go-eth2-util v1.6.3
|
||||
@@ -89,7 +87,7 @@ require (
|
||||
go.uber.org/automaxprocs v1.5.2
|
||||
go.uber.org/mock v0.5.2
|
||||
golang.org/x/crypto v0.44.0
|
||||
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6
|
||||
golang.org/x/sync v0.18.0
|
||||
golang.org/x/tools v0.39.0
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
||||
@@ -105,12 +103,13 @@ require (
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
|
||||
github.com/DataDog/zstd v1.5.5 // indirect
|
||||
github.com/DataDog/zstd v1.5.7 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||
github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.13.0 // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.17.0 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.22.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/cp v1.1.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
@@ -118,27 +117,28 @@ require (
|
||||
github.com/cockroachdb/errors v1.11.3 // indirect
|
||||
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/pebble v1.1.2 // indirect
|
||||
github.com/cockroachdb/pebble v1.1.5 // indirect
|
||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
|
||||
github.com/consensys/bavard v0.1.22 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.6.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
|
||||
github.com/dchest/siphash v1.2.3 // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.8.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/dlclark/regexp2 v1.7.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect
|
||||
github.com/elastic/gosigar v0.14.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.0 // indirect
|
||||
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab // indirect
|
||||
github.com/ethereum/go-verkle v0.2.2 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/ferranbt/fastssz v0.1.3 // indirect
|
||||
github.com/ferranbt/fastssz v0.1.4 // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/getsentry/sentry-go v0.27.0 // indirect
|
||||
@@ -148,7 +148,7 @@ require (
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
@@ -156,9 +156,8 @@ require (
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 // indirect
|
||||
github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/goevmlab v0.0.0-20241121133100-cfa6b078c8c4 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
|
||||
@@ -167,7 +166,7 @@ require (
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||
github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/koron/go-ssdp v0.0.5 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
@@ -184,15 +183,14 @@ require (
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/miekg/dns v1.1.63 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
@@ -248,12 +246,12 @@ require (
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.13 // indirect
|
||||
github.com/tklauser/numcpus v0.7.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
||||
github.com/tklauser/numcpus v0.10.0 // indirect
|
||||
github.com/wealdtech/go-eth2-types/v2 v2.8.2 // indirect
|
||||
github.com/wlynxg/anet v0.0.5 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
@@ -275,7 +273,6 @@ require (
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
lukechampine.com/blake3 v1.3.0 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
|
||||
126
go.sum
126
go.sum
@@ -48,22 +48,20 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
|
||||
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE=
|
||||
github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240516070431-7828990cad7d h1:RQtzNvriR3Yu5CvVBTJPwDmfItBT90TWZ3fFondhc08=
|
||||
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240516070431-7828990cad7d/go.mod h1:gWTykV/ZinShgltWofTEJY4TsletuvGhB6l4+Ai2F+E=
|
||||
github.com/MariusVanDerWijden/tx-fuzz v1.4.0 h1:Tq4lXivsR8mtoP4RpasUDIUpDLHfN1YhFge/kzrzK78=
|
||||
github.com/MariusVanDerWijden/tx-fuzz v1.4.0/go.mod h1:gmOVECg7o5FY5VU3DQ/fY0zTk/ExBdMkUGz0vA8qqms=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506 h1:d/SJkN8/9Ca+1YmuDiUJxAiV4w/a9S8NcsG7GMQSrVI=
|
||||
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506/go.mod h1:6TZI4FU6zT8x6ZfWa1J8YQ2NgW0wLV/W3fHRca8ISBo=
|
||||
github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6 h1:1zYrtlhrZ6/b6SAjLSfKzWtdgqK0U+HtH/VcBWh1BaU=
|
||||
github.com/ProjectZKM/Ziren/crates/go-runtime/zkvm_runtime v0.0.0-20251001021608-1fe7b43fc4d6/go.mod h1:ioLG6R+5bUSO1oeGSDxOV3FADARuMoytZCSX6MEMQkI=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
|
||||
github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0=
|
||||
github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0LNjo6B+4aj2Wmng3TjU=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
@@ -71,9 +69,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
|
||||
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
@@ -100,8 +97,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bits-and-blooms/bitset v1.17.0 h1:1X2TS7aHz1ELcC0yU1y2stUs/0ig5oMU6STFZGrhvHI=
|
||||
github.com/bits-and-blooms/bitset v1.17.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4=
|
||||
github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20170208213004-1952afaa557d/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
|
||||
@@ -115,7 +112,6 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA
|
||||
github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=
|
||||
github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
@@ -145,17 +141,15 @@ github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/e
|
||||
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA=
|
||||
github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU=
|
||||
github.com/cockroachdb/pebble v1.1.5 h1:5AAWCBWbat0uE0blr8qzufZP5tBjkRyy/jWe1QWLnvw=
|
||||
github.com/cockroachdb/pebble v1.1.5/go.mod h1:17wO9el1YEigxkP/YtV8NtCivQDgoCyBg5c4VR/eOWo=
|
||||
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
|
||||
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/consensys/bavard v0.1.22 h1:Uw2CGvbXSZWhqK59X0VG/zOjpTFuOMcPLStrp1ihI0A=
|
||||
github.com/consensys/bavard v0.1.22/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.14.0 h1:DDBdl4HaBtdQsq/wfMwJvZNE80sHidrK3Nfrefatm0E=
|
||||
github.com/consensys/gnark-crypto v0.14.0/go.mod h1:CU4UijNPsHawiVGNxe9co07FkzCeWHHrb1li/n1XoU0=
|
||||
github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0=
|
||||
github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c=
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
@@ -170,8 +164,10 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
|
||||
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
@@ -186,12 +182,14 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||
github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM=
|
||||
github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/dchest/siphash v1.2.3 h1:QXwFc8cFOR2dSa/gE6o/HokBMWtLUaNDVd+22aKHeEA=
|
||||
github.com/dchest/siphash v1.2.3/go.mod h1:0NvQU092bT0ipiFN++/rXm69QG9tVxLAlQHIXMPAkHc=
|
||||
github.com/deckarep/golang-set/v2 v2.8.0 h1:swm0rlPCmdWn9mESxKOjWk8hXSqoxOp+ZlfuyaAdFlQ=
|
||||
github.com/deckarep/golang-set/v2 v2.8.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
|
||||
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
|
||||
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
|
||||
@@ -223,8 +221,8 @@ github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaB
|
||||
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
|
||||
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||
github.com/emicklei/dot v0.11.0 h1:Ase39UD9T9fRBOb5ptgpixrxfx8abVzNWZi2+lr53PI=
|
||||
github.com/emicklei/dot v0.11.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s=
|
||||
github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A=
|
||||
github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
@@ -234,12 +232,12 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
|
||||
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
|
||||
github.com/ethereum/go-ethereum v1.15.9 h1:bRra1zi+/q+qyXZ6fylZOrlaF8kDdnlTtzNTmNHfX+g=
|
||||
github.com/ethereum/go-ethereum v1.15.9/go.mod h1:+S9k+jFzlyVTNcYGvqFhzN/SFhI6vA+aOY4T5tLSPL0=
|
||||
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab h1:rvv6MJhy07IMfEKuARQ9TKojGqLVNxQajaXEp/BoqSk=
|
||||
github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab/go.mod h1:IuLm4IsPipXKF7CW5Lzf68PIbZ5yl7FFd74l/E0o9A8=
|
||||
github.com/ethereum/go-ethereum v1.16.7 h1:qeM4TvbrWK0UC0tgkZ7NiRsmBGwsjqc64BHo20U59UQ=
|
||||
github.com/ethereum/go-ethereum v1.16.7/go.mod h1:Fs6QebQbavneQTYcA39PEKv2+zIjX7rPUZ14DER46wk=
|
||||
github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8=
|
||||
github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
@@ -248,8 +246,8 @@ github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4Nij
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/ferranbt/fastssz v0.0.0-20210120143747-11b9eff30ea9/go.mod h1:DyEu2iuLBnb/T51BlsiO3yLYdJC6UbGMrIkqK1KmQxM=
|
||||
github.com/ferranbt/fastssz v0.1.3 h1:ZI+z3JH05h4kgmFXdHuR1aWYsgrg7o+Fw7/NCzM16Mo=
|
||||
github.com/ferranbt/fastssz v0.1.3/go.mod h1:0Y9TEd/9XuFlh7mskMPfXiI2Dkw4Ddg9EyXt1W7MRvE=
|
||||
github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY=
|
||||
github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
@@ -325,8 +323,8 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
|
||||
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
|
||||
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
@@ -373,8 +371,8 @@ github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8l
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc=
|
||||
github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
|
||||
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
@@ -418,7 +416,6 @@ github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8q
|
||||
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 h1:wlQI2cYY0BsWmmPPAnxfQ8SDW0S3Jasn+4B8kXFxprg=
|
||||
github.com/google/pprof v0.0.0-20250202011525-fc3143867406/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -486,12 +483,10 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J
|
||||
github.com/herumi/bls-eth-go-binary v0.0.0-20210130185500-57372fb27371/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U=
|
||||
github.com/herumi/bls-eth-go-binary v1.31.0 h1:9eeW3EA4epCb7FIHt2luENpAW69MvKGL5jieHlBiP+w=
|
||||
github.com/herumi/bls-eth-go-binary v1.31.0/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U=
|
||||
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4=
|
||||
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
|
||||
github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db h1:IZUYC/xb3giYwBLMnr8d0TGTzPKFGNTCGgGLoyeX330=
|
||||
github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db/go.mod h1:xTEYN9KCHxuYHs+NmrmzFcnvHMzLLNiGFafCb1n3Mfg=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/goevmlab v0.0.0-20241121133100-cfa6b078c8c4 h1:JHZ8k9n9G9KXIo1qrvK5Cxah6ax5BR0qVTA9bFYl1oM=
|
||||
github.com/holiman/goevmlab v0.0.0-20241121133100-cfa6b078c8c4/go.mod h1:+DBd7lup47uusCYWbkJPfHRG4LYjBHvyXU0c+z26/U4=
|
||||
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
|
||||
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@@ -549,8 +544,8 @@ github.com/kisielk/errcheck v1.8.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.10.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
|
||||
@@ -646,8 +641,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
@@ -681,13 +676,11 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
|
||||
github.com/mitchellh/mapstructure v0.0.0-20170523030023-d0303fe80992/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
|
||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -995,8 +988,8 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3
|
||||
github.com/spf13/jwalterweatherman v0.0.0-20170901151539-12bd96e66386/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.1-0.20170901120850-7aff26db30c1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
@@ -1033,20 +1026,18 @@ github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:
|
||||
github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e h1:cR8/SYRgyQCt5cNCMniB/ZScMkhI9nk8U5C7SbISXjo=
|
||||
github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e/go.mod h1:Tu4lItkATkonrYuvtVjG0/rhy15qrNGNTjPdaphtZ/8=
|
||||
github.com/tjfoc/gmsm v1.3.0/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w=
|
||||
github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
|
||||
github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
|
||||
github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4=
|
||||
github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY=
|
||||
github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
|
||||
github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
|
||||
github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
|
||||
github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/trailofbits/go-mutexasserts v0.0.0-20250212181730-4c2b8e9e784b h1:EBoYk5zHOfuHDBqLFx4eSPRVcbnW+L3aFJzoCi8zRnk=
|
||||
github.com/trailofbits/go-mutexasserts v0.0.0-20250212181730-4c2b8e9e784b/go.mod h1:4R6Qam+w871wOlyRq59zRLjhb5x9/De/wgPeaCTaCwI=
|
||||
github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10 h1:CQh33pStIp/E30b7TxDlXfM0145bn2e8boI30IxAhTg=
|
||||
github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10/go.mod h1:x/Pa0FF5Te9kdrlZKJK82YmAkvL8+f989USgz6Jiw7M=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w=
|
||||
github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
|
||||
github.com/urfave/cli/v2 v2.27.6 h1:VdRdS98FNhKZ8/Az8B7MTyGQmpIr36O1EHybx/LaZ4g=
|
||||
github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
|
||||
github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4=
|
||||
github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
@@ -1081,8 +1072,8 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
@@ -1184,8 +1175,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
|
||||
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c h1:KL/ZBHXgKGVmuZBZ01Lt57yE5ws8ZPSkkihmEyq7FXc=
|
||||
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI=
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 h1:1P7xPZEwZMoBoz0Yze5Nx2/4pxj6nw9ZqHWXqP0iRgQ=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
@@ -1391,7 +1382,6 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
@@ -1693,8 +1683,6 @@ lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
||||
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
|
||||
@@ -43,6 +43,7 @@ func WriteJson(w http.ResponseWriter, v any) {
|
||||
func WriteSsz(w http.ResponseWriter, respSsz []byte) {
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(respSsz)))
|
||||
w.Header().Set("Content-Type", api.OctetStreamMediaType)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if _, err := io.Copy(w, io.NopCloser(bytes.NewReader(respSsz))); err != nil {
|
||||
log.WithError(err).Error("Could not write response message")
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ func GethCancunTime(genesisTime time.Time, cfg *clparams.BeaconChainConfig) *uin
|
||||
}
|
||||
|
||||
// GethPragueTime calculates the absolute time of the prague (aka electra) fork block
|
||||
// by adding the relative time of the capella the fork epoch to the given genesis timestamp.
|
||||
// by adding the relative time of the electra fork epoch to the given genesis timestamp.
|
||||
func GethPragueTime(genesisTime time.Time, cfg *clparams.BeaconChainConfig) *uint64 {
|
||||
var pragueTime *uint64
|
||||
if cfg.ElectraForkEpoch != math.MaxUint64 {
|
||||
@@ -128,6 +128,45 @@ func GethOsakaTime(genesisTime time.Time, cfg *clparams.BeaconChainConfig) *uint
|
||||
return osakaTime
|
||||
}
|
||||
|
||||
// GethBPO1Time calculates the absolute time of the BPO1 activation
|
||||
// by finding the first BlobSchedule entry with MaxBlobsPerBlock > 9 (Electra's limit)
|
||||
// which corresponds to the first BPO increase.
|
||||
func GethBPO1Time(genesisTime time.Time, cfg *clparams.BeaconChainConfig) *uint64 {
|
||||
for _, entry := range cfg.BlobSchedule {
|
||||
// BPO1 is the first entry that increases beyond Electra's 9 blobs
|
||||
if entry.MaxBlobsPerBlock > uint64(cfg.DeprecatedMaxBlobsPerBlockElectra) {
|
||||
startSlot, err := slots.EpochStart(entry.Epoch)
|
||||
if err == nil {
|
||||
startTime := slots.UnsafeStartTime(genesisTime, startSlot)
|
||||
newTime := uint64(startTime.Unix())
|
||||
return &newTime
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GethBPO2Time calculates the absolute time of the BPO2 activation
|
||||
// by finding the second BlobSchedule entry with MaxBlobsPerBlock > 9.
|
||||
func GethBPO2Time(genesisTime time.Time, cfg *clparams.BeaconChainConfig) *uint64 {
|
||||
count := 0
|
||||
for _, entry := range cfg.BlobSchedule {
|
||||
// Count entries that are beyond Electra's limit
|
||||
if entry.MaxBlobsPerBlock > uint64(cfg.DeprecatedMaxBlobsPerBlockElectra) {
|
||||
count++
|
||||
if count == 2 { // BPO2 is the second such entry
|
||||
startSlot, err := slots.EpochStart(entry.Epoch)
|
||||
if err == nil {
|
||||
startTime := slots.UnsafeStartTime(genesisTime, startSlot)
|
||||
newTime := uint64(startTime.Unix())
|
||||
return &newTime
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GethTestnetGenesis creates a genesis.json for eth1 clients with a set of defaults suitable for ephemeral testnets,
|
||||
// like in an e2e test. The parameters are minimal but the full value is returned unmarshaled so that it can be
|
||||
// customized as desired.
|
||||
@@ -149,6 +188,8 @@ func GethTestnetGenesis(genesis time.Time, cfg *clparams.BeaconChainConfig) *cor
|
||||
if cfg.FuluForkEpoch == 0 {
|
||||
osakaTime = &genesisTime
|
||||
}
|
||||
bpo1Time := GethBPO1Time(genesis, cfg)
|
||||
bpo2Time := GethBPO2Time(genesis, cfg)
|
||||
cc := ¶ms.ChainConfig{
|
||||
ChainID: big.NewInt(defaultTestChainId),
|
||||
HomesteadBlock: bigz,
|
||||
@@ -171,18 +212,16 @@ func GethTestnetGenesis(genesis time.Time, cfg *clparams.BeaconChainConfig) *cor
|
||||
CancunTime: cancunTime,
|
||||
PragueTime: pragueTime,
|
||||
OsakaTime: osakaTime,
|
||||
BPO1Time: bpo1Time,
|
||||
BPO2Time: bpo2Time,
|
||||
DepositContractAddress: common.HexToAddress(cfg.DepositContractAddress),
|
||||
BlobScheduleConfig: ¶ms.BlobScheduleConfig{
|
||||
Cancun: ¶ms.BlobConfig{
|
||||
Target: 3,
|
||||
Max: 6,
|
||||
UpdateFraction: 3338477,
|
||||
},
|
||||
Prague: ¶ms.BlobConfig{
|
||||
Target: 6,
|
||||
Max: 9,
|
||||
UpdateFraction: 5007716,
|
||||
},
|
||||
Cancun: params.DefaultCancunBlobConfig,
|
||||
Prague: params.DefaultPragueBlobConfig,
|
||||
Osaka: params.DefaultOsakaBlobConfig,
|
||||
BPO1: params.DefaultBPO1BlobConfig,
|
||||
BPO2: params.DefaultBPO2BlobConfig,
|
||||
BPO3: params.DefaultBPO3BlobConfig,
|
||||
},
|
||||
}
|
||||
da := defaultDepositContractAllocation(cfg.DepositContractAddress)
|
||||
|
||||
@@ -5,6 +5,7 @@ load("@prysm//tools/go:def.bzl", "go_test")
|
||||
# gazelle:exclude mainnet_scenario_e2e_test.go
|
||||
# gazelle:exclude minimal_scenario_e2e_test.go
|
||||
# gazelle:exclude minimal_builder_e2e_test.go
|
||||
# gazelle:exclude minimal_postmerge_e2e_test.go
|
||||
|
||||
# Presubmit tests represent the group of endtoend tests that are run on pull
|
||||
# requests and must be passing before a pull request can merge.
|
||||
@@ -27,6 +28,7 @@ test_suite(
|
||||
],
|
||||
tests = [
|
||||
":go_builder_test",
|
||||
":go_minimal_postmerge_test",
|
||||
":go_mainnet_test",
|
||||
],
|
||||
)
|
||||
@@ -88,7 +90,7 @@ common_deps = [
|
||||
# gazelle:ignore
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
size = "enormous",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"component_handler_test.go",
|
||||
@@ -153,6 +155,39 @@ go_test(
|
||||
deps = common_deps,
|
||||
)
|
||||
|
||||
# Full e2e test from post-merge (Bellatrix) through all forks (post-submit only, takes longer)
|
||||
go_test(
|
||||
name = "go_minimal_postmerge_test",
|
||||
size = "enormous",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"component_handler_test.go",
|
||||
"endtoend_setup_test.go",
|
||||
"endtoend_test.go",
|
||||
"minimal_postmerge_e2e_test.go",
|
||||
],
|
||||
args = ["-test.v"],
|
||||
data = [
|
||||
"//:prysm_sh",
|
||||
"//cmd/beacon-chain",
|
||||
"//cmd/validator",
|
||||
"//config/params:custom_configs",
|
||||
"//tools/bootnode",
|
||||
"@com_github_ethereum_go_ethereum//cmd/geth",
|
||||
"@web3signer",
|
||||
],
|
||||
eth_network = "minimal",
|
||||
flaky = True,
|
||||
shard_count = 2,
|
||||
tags = [
|
||||
"e2e",
|
||||
"manual",
|
||||
"minimal",
|
||||
"requires-network",
|
||||
],
|
||||
deps = common_deps,
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_mainnet_test",
|
||||
size = "large",
|
||||
|
||||
@@ -28,6 +28,7 @@ go_library(
|
||||
"//testing/endtoend/types:go_default_library",
|
||||
"//testing/middleware/engine-api-proxy:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//accounts/abi/bind:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//accounts/keystore:go_default_library",
|
||||
@@ -38,8 +39,6 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//ethclient/gethclient:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_mariusvanderwijden_fuzzyvm//filler:go_default_library",
|
||||
"@com_github_mariusvanderwijden_tx_fuzz//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
|
||||
@@ -267,6 +267,10 @@ func (d *Depositor) txops(ctx context.Context) (*bind.TransactOpts, error) {
|
||||
return nil, err
|
||||
}
|
||||
txo.Nonce = big.NewInt(0).SetUint64(nonce)
|
||||
// Set a high gas price to ensure deposit transactions can replace any pending
|
||||
// transactions from the transaction generator that may be using the same nonce.
|
||||
// The transaction generator uses 1e11 (100 Gwei), so we use 2e11 (200 Gwei).
|
||||
txo.GasPrice = big.NewInt(2e11)
|
||||
return txo, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package eth1
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/endtoend/params"
|
||||
@@ -31,10 +32,6 @@ var _ e2etypes.EngineProxy = (*Proxy)(nil)
|
||||
|
||||
// WaitForBlocks waits for a certain amount of blocks to be mined by the ETH1 chain before returning.
|
||||
func WaitForBlocks(ctx context.Context, web3 *ethclient.Client, key *keystore.Key, blocksToWait uint64) error {
|
||||
nonce, err := web3.PendingNonceAt(ctx, key.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chainID, err := web3.NetworkID(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -49,19 +46,36 @@ func WaitForBlocks(ctx context.Context, web3 *ethclient.Client, key *keystore.Ke
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
// Get fresh nonce each iteration to handle any pending transactions
|
||||
nonce, err := web3.PendingNonceAt(ctx, key.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gasPrice, err := web3.SuggestGasPrice(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Bump gas price by 20% to ensure we can replace any pending transactions
|
||||
gasPrice = new(big.Int).Mul(gasPrice, big.NewInt(120))
|
||||
gasPrice = new(big.Int).Div(gasPrice, big.NewInt(100))
|
||||
|
||||
spamTX := types.NewTransaction(nonce, key.Address, big.NewInt(0), params.SpamTxGasLimit, gasPrice, []byte{})
|
||||
signed, err := types.SignTx(spamTX, types.NewEIP155Signer(chainID), key.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = web3.SendTransaction(ctx, signed); err != nil {
|
||||
// If replacement error, try again with next iteration which will get fresh nonce
|
||||
if strings.Contains(err.Error(), "replacement transaction underpriced") {
|
||||
time.Sleep(timeGapPerMiningTX)
|
||||
block, err = web3.BlockByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
nonce++
|
||||
time.Sleep(timeGapPerMiningTX)
|
||||
block, err = web3.BlockByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
|
||||
@@ -10,12 +10,11 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/MariusVanDerWijden/FuzzyVM/filler"
|
||||
txfuzz "github.com/MariusVanDerWijden/tx-fuzz"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/rand"
|
||||
e2e "github.com/OffchainLabs/prysm/v7/testing/endtoend/params"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/accounts/keystore"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -35,11 +34,12 @@ const txCount = 20
|
||||
var fundedAccount *keystore.Key
|
||||
|
||||
type TransactionGenerator struct {
|
||||
keystore string
|
||||
seed int64
|
||||
started chan struct{}
|
||||
cancel context.CancelFunc
|
||||
paused bool
|
||||
keystore string
|
||||
seed int64
|
||||
started chan struct{}
|
||||
cancel context.CancelFunc
|
||||
paused bool
|
||||
useLargeBlobs bool // Use large blob transactions (6 blobs per tx) for BPO testing
|
||||
}
|
||||
|
||||
func (t *TransactionGenerator) UnderlyingProcess() *os.Process {
|
||||
@@ -48,8 +48,8 @@ func (t *TransactionGenerator) UnderlyingProcess() *os.Process {
|
||||
return &os.Process{}
|
||||
}
|
||||
|
||||
func NewTransactionGenerator(keystore string, seed int64) *TransactionGenerator {
|
||||
return &TransactionGenerator{keystore: keystore, seed: seed}
|
||||
func NewTransactionGenerator(keystore string, seed int64, useLargeBlobs bool) *TransactionGenerator {
|
||||
return &TransactionGenerator{keystore: keystore, seed: seed, useLargeBlobs: useLargeBlobs}
|
||||
}
|
||||
|
||||
func (t *TransactionGenerator) Start(ctx context.Context) error {
|
||||
@@ -67,7 +67,7 @@ func (t *TransactionGenerator) Start(ctx context.Context) error {
|
||||
newGen := rand.NewDeterministicGenerator()
|
||||
if seed == 0 {
|
||||
seed = newGen.Int63()
|
||||
logrus.Infof("Seed for transaction generator is: %d", seed)
|
||||
logrus.WithField("Seed", seed).Info("Transaction generator")
|
||||
}
|
||||
// Set seed so that all transactions can be
|
||||
// deterministically generated.
|
||||
@@ -86,12 +86,21 @@ func (t *TransactionGenerator) Start(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
fundedAccount = newKey
|
||||
rnd := make([]byte, 10000)
|
||||
_, err = mathRand.Read(rnd) // #nosec G404
|
||||
if err != nil {
|
||||
// Ensure funding tx is mined before generating txs that rely on balance.
|
||||
// Mine 1 block using the miner key to include the funding transfer.
|
||||
backend := ethclient.NewClient(client)
|
||||
defer backend.Close()
|
||||
|
||||
if err := WaitForBlocks(ctx, backend, mineKey, 1); err != nil {
|
||||
return errors.Wrap(err, "failed to mine block for funding tx")
|
||||
}
|
||||
|
||||
// Ensure the funded account has a comfortable minimum balance for blob and fuzzed txs.
|
||||
minWei := new(big.Int).Mul(big.NewInt(1000), big.NewInt(0).SetUint64(params.BeaconConfig().GweiPerEth))
|
||||
minWei.Mul(minWei, big.NewInt(1e9)) // 1000 ETH in wei
|
||||
if err := ensureMinBalance(ctx, client, backend, mineKey, fundedAccount, minWei); err != nil {
|
||||
return err
|
||||
}
|
||||
f := filler.NewFiller(rnd)
|
||||
// Broadcast Transactions every slot
|
||||
txPeriod := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
ticker := time.NewTicker(txPeriod)
|
||||
@@ -105,7 +114,7 @@ func (t *TransactionGenerator) Start(ctx context.Context) error {
|
||||
continue
|
||||
}
|
||||
backend := ethclient.NewClient(client)
|
||||
err = SendTransaction(client, mineKey.PrivateKey, f, gasPrice, mineKey.Address.String(), txCount, backend, false)
|
||||
err = SendTransaction(client, mineKey.PrivateKey, gasPrice, mineKey.Address.String(), txCount, backend, false, t.useLargeBlobs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -119,7 +128,7 @@ func (s *TransactionGenerator) Started() <-chan struct{} {
|
||||
return s.started
|
||||
}
|
||||
|
||||
func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, f *filler.Filler, gasPrice *big.Int, addr string, N uint64, backend *ethclient.Client, al bool) error {
|
||||
func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, gasPrice *big.Int, addr string, txCount uint64, backend *ethclient.Client, al bool, useLargeBlobs bool) error {
|
||||
sender := common.HexToAddress(addr)
|
||||
nonce, err := backend.PendingNonceAt(context.Background(), fundedAccount.Address)
|
||||
if err != nil {
|
||||
@@ -136,30 +145,65 @@ func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, f *filler.Filler
|
||||
if expectedPrice.Cmp(gasPrice) > 0 {
|
||||
gasPrice = expectedPrice
|
||||
}
|
||||
|
||||
// Check if we're post-Fulu fork
|
||||
currentSlot := slots.CurrentSlot(e2e.TestParams.CLGenesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
fuluForkEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
isPostFulu := currentEpoch >= fuluForkEpoch
|
||||
|
||||
g, _ := errgroup.WithContext(context.Background())
|
||||
txs := make([]*types.Transaction, 10)
|
||||
for i := range uint64(10) {
|
||||
index := i
|
||||
g.Go(func() error {
|
||||
tx, err := RandomBlobTx(client, f, fundedAccount.Address, nonce+index, gasPrice, chainid, al)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Could not create blob tx")
|
||||
// In the event the transaction constructed is not valid, we continue with the routine
|
||||
// rather than complete stop it.
|
||||
//nolint:nilerr
|
||||
|
||||
// Send blob transactions - use different versions pre/post Fulu
|
||||
if isPostFulu {
|
||||
logrus.Info("Sending blob transactions with cell proofs")
|
||||
// Reduced from 10 to 5 to reduce load and prevent builder/EL timeouts
|
||||
for index := range uint64(5) {
|
||||
|
||||
g.Go(func() error {
|
||||
tx, err := RandomBlobCellTx(client, fundedAccount.Address, nonce+index, gasPrice, chainid, al, useLargeBlobs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Could not create blob cell tx")
|
||||
}
|
||||
|
||||
signedTx, err := types.SignTx(tx, types.NewCancunSigner(chainid), fundedAccount.PrivateKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Could not sign blob cell tx")
|
||||
}
|
||||
|
||||
txs[index] = signedTx
|
||||
return nil
|
||||
}
|
||||
signedTx, err := types.SignTx(tx, types.NewCancunSigner(chainid), fundedAccount.PrivateKey)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Could not sign blob tx")
|
||||
// We continue on in the event there is a reason we can't sign this
|
||||
// transaction(unlikely).
|
||||
//nolint:nilerr
|
||||
})
|
||||
}
|
||||
} else {
|
||||
logrus.Info("Sending blob transactions with sidecars")
|
||||
// Reduced from 10 to 5 to reduce load and prevent builder/EL timeouts
|
||||
for index := range uint64(5) {
|
||||
|
||||
g.Go(func() error {
|
||||
tx, err := RandomBlobTx(client, fundedAccount.Address, nonce+index, gasPrice, chainid, al, useLargeBlobs)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Could not create blob tx")
|
||||
// In the event the transaction constructed is not valid, we continue with the routine
|
||||
// rather than complete stop it.
|
||||
//nolint:nilerr
|
||||
return nil
|
||||
}
|
||||
|
||||
signedTx, err := types.SignTx(tx, types.NewCancunSigner(chainid), fundedAccount.PrivateKey)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Could not sign blob tx")
|
||||
// We continue on in the event there is a reason we can't sign this
|
||||
// transaction(unlikely).
|
||||
//nolint:nilerr
|
||||
return nil
|
||||
}
|
||||
|
||||
txs[index] = signedTx
|
||||
return nil
|
||||
}
|
||||
txs[index] = signedTx
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
@@ -169,6 +213,7 @@ func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, f *filler.Filler
|
||||
if tx == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
err = backend.SendTransaction(context.Background(), tx)
|
||||
if err != nil {
|
||||
// Do nothing
|
||||
@@ -181,17 +226,20 @@ func SendTransaction(client *rpc.Client, key *ecdsa.PrivateKey, f *filler.Filler
|
||||
return err
|
||||
}
|
||||
|
||||
txs = make([]*types.Transaction, N)
|
||||
for i := range N {
|
||||
index := i
|
||||
txs = make([]*types.Transaction, txCount)
|
||||
for index := range txCount {
|
||||
|
||||
g.Go(func() error {
|
||||
tx, err := txfuzz.RandomValidTx(client, f, sender, nonce+index, gasPrice, chainid, al)
|
||||
tx, err := randomValidTx(sender, nonce+index, gasPrice, chainid, al)
|
||||
if err != nil {
|
||||
// In the event the transaction constructed is not valid, we continue with the routine
|
||||
// rather than complete stop it.
|
||||
//nolint:nilerr
|
||||
return nil
|
||||
}
|
||||
// Clamp gas to avoid exceeding common EL per-tx gas caps (e.g. 16,777,216) due to EIP-7825: Transaction Gas Limit Cap
|
||||
tx = clampTxGas(tx, 16_000_000)
|
||||
|
||||
signedTx, err := types.SignTx(tx, types.NewLondonSigner(chainid), key)
|
||||
if err != nil {
|
||||
// We continue on in the event there is a reason we can't sign this
|
||||
@@ -237,11 +285,13 @@ func (t *TransactionGenerator) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func RandomBlobTx(rpc *rpc.Client, f *filler.Filler, sender common.Address, nonce uint64, gasPrice, chainID *big.Int, al bool) (*types.Transaction, error) {
|
||||
func RandomBlobCellTx(rpc *rpc.Client, sender common.Address, nonce uint64, gasPrice, chainID *big.Int, al bool, useLargeBlobs bool) (*types.Transaction, error) {
|
||||
// Set fields if non-nil
|
||||
if rpc != nil {
|
||||
client := ethclient.NewClient(rpc)
|
||||
|
||||
var err error
|
||||
|
||||
if gasPrice == nil {
|
||||
gasPrice, err = client.SuggestGasPrice(context.Background())
|
||||
if err != nil {
|
||||
@@ -255,27 +305,133 @@ func RandomBlobTx(rpc *rpc.Client, f *filler.Filler, sender common.Address, nonc
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gas := uint64(100000)
|
||||
to := randomAddress()
|
||||
code := txfuzz.RandomCode(f)
|
||||
// Generate random EVM bytecode (similar to what tx-fuzz RandomCode did)
|
||||
code := generateRandomEVMCode(mathRand.Intn(128)) // #nosec G404
|
||||
value := big.NewInt(0)
|
||||
if len(code) > 128 {
|
||||
code = code[:128]
|
||||
}
|
||||
|
||||
mod := 2
|
||||
if al {
|
||||
mod = 1
|
||||
}
|
||||
switch f.Byte() % byte(mod) {
|
||||
|
||||
// Helper to get blob data based on config
|
||||
getBlobData := func() ([]byte, error) {
|
||||
if useLargeBlobs {
|
||||
return randomBlobDataLarge()
|
||||
}
|
||||
return randomBlobData()
|
||||
}
|
||||
|
||||
// #nosec G404 -- Test code uses deterministic randomness
|
||||
switch mathRand.Intn(mod) {
|
||||
case 0:
|
||||
// 4844 transaction without AL
|
||||
// Blob transaction with cell proofs (Version 1 sidecar)
|
||||
tip, feecap, err := getCaps(rpc, gasPrice)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getCaps")
|
||||
}
|
||||
data, err := randomBlobData()
|
||||
|
||||
data, err := getBlobData()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "randomBlobData")
|
||||
return nil, errors.Wrap(err, "getBlobData")
|
||||
}
|
||||
|
||||
return New4844CellTx(nonce, &to, gas, chainID, tip, feecap, value, code, big.NewInt(1000000), data, make(types.AccessList, 0))
|
||||
case 1:
|
||||
// Blob transaction with cell proofs and access list
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: nonce,
|
||||
To: &to,
|
||||
Value: value,
|
||||
Gas: gas,
|
||||
GasPrice: gasPrice,
|
||||
Data: code,
|
||||
})
|
||||
|
||||
// Use legacy GasPrice for access list simulation to satisfy post-London requirement.
|
||||
msg := ethereum.CallMsg{
|
||||
From: sender,
|
||||
To: tx.To(),
|
||||
Gas: tx.Gas(),
|
||||
GasPrice: gasPrice,
|
||||
Value: tx.Value(),
|
||||
Data: tx.Data(),
|
||||
AccessList: nil,
|
||||
}
|
||||
geth := gethclient.New(rpc)
|
||||
|
||||
al, _, _, err := geth.CreateAccessList(context.Background(), msg)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "CreateAccessList")
|
||||
}
|
||||
tip, feecap, err := getCaps(rpc, gasPrice)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getCaps")
|
||||
}
|
||||
data, err := getBlobData()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getBlobData")
|
||||
}
|
||||
|
||||
return New4844CellTx(nonce, &to, gas, chainID, tip, feecap, value, code, big.NewInt(1000000), data, *al)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func RandomBlobTx(rpc *rpc.Client, sender common.Address, nonce uint64, gasPrice, chainID *big.Int, al bool, useLargeBlobs bool) (*types.Transaction, error) {
|
||||
// Set fields if non-nil
|
||||
if rpc != nil {
|
||||
client := ethclient.NewClient(rpc)
|
||||
var err error
|
||||
if gasPrice == nil {
|
||||
gasPrice, err = client.SuggestGasPrice(context.Background())
|
||||
if err != nil {
|
||||
gasPrice = big.NewInt(1)
|
||||
}
|
||||
}
|
||||
|
||||
if chainID == nil {
|
||||
chainID, err = client.ChainID(context.Background())
|
||||
if err != nil {
|
||||
chainID = big.NewInt(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
gas := uint64(100000)
|
||||
to := randomAddress()
|
||||
// Generate random EVM bytecode (similar to what tx-fuzz RandomCode did)
|
||||
code := generateRandomEVMCode(mathRand.Intn(128)) // #nosec G404
|
||||
value := big.NewInt(0)
|
||||
mod := 2
|
||||
if al {
|
||||
mod = 1
|
||||
}
|
||||
|
||||
// Helper to get blob data based on config
|
||||
getBlobData := func() ([]byte, error) {
|
||||
if useLargeBlobs {
|
||||
return randomBlobDataLarge()
|
||||
}
|
||||
return randomBlobData()
|
||||
}
|
||||
|
||||
// #nosec G404 -- Test code uses deterministic randomness
|
||||
switch mathRand.Intn(mod) {
|
||||
case 0:
|
||||
// 4844 transaction without AL
|
||||
|
||||
tip, feecap, err := getCaps(rpc, gasPrice)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getCaps")
|
||||
}
|
||||
|
||||
data, err := getBlobData()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getBlobData")
|
||||
}
|
||||
return New4844Tx(nonce, &to, gas, chainID, tip, feecap, value, code, big.NewInt(1000000), data, make(types.AccessList, 0)), nil
|
||||
case 1:
|
||||
@@ -289,18 +445,18 @@ func RandomBlobTx(rpc *rpc.Client, f *filler.Filler, sender common.Address, nonc
|
||||
Data: code,
|
||||
})
|
||||
|
||||
// TODO: replace call with al, err := txfuzz.CreateAccessList(rpc, tx, sender) when txfuzz is fixed in new release
|
||||
// an error occurs mentioning error="CreateAccessList: both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified"
|
||||
// Use legacy GasPrice for access list simulation to satisfy post-London requirement.
|
||||
msg := ethereum.CallMsg{
|
||||
From: sender,
|
||||
To: tx.To(),
|
||||
Gas: tx.Gas(),
|
||||
GasPrice: tx.GasPrice(),
|
||||
GasPrice: gasPrice,
|
||||
Value: tx.Value(),
|
||||
Data: tx.Data(),
|
||||
AccessList: nil,
|
||||
}
|
||||
geth := gethclient.New(rpc)
|
||||
|
||||
al, _, _, err := geth.CreateAccessList(context.Background(), msg)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "CreateAccessList")
|
||||
@@ -309,15 +465,52 @@ func RandomBlobTx(rpc *rpc.Client, f *filler.Filler, sender common.Address, nonc
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getCaps")
|
||||
}
|
||||
data, err := randomBlobData()
|
||||
data, err := getBlobData()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "randomBlobData")
|
||||
return nil, errors.Wrap(err, "getBlobData")
|
||||
}
|
||||
return New4844Tx(nonce, &to, gas, chainID, tip, feecap, value, code, big.NewInt(1000000), data, *al), nil
|
||||
}
|
||||
return nil, errors.New("asdf")
|
||||
}
|
||||
|
||||
func New4844CellTx(nonce uint64, to *common.Address, gasLimit uint64, chainID, tip, feeCap, value *big.Int, code []byte, blobFeeCap *big.Int, blobData []byte, al types.AccessList) (*types.Transaction, error) {
|
||||
blobs, comms, _, versionedHashes, err := EncodeBlobs(blobData)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to encode blobs")
|
||||
}
|
||||
|
||||
// Create a Version 0 sidecar first
|
||||
sidecar := &types.BlobTxSidecar{
|
||||
Version: types.BlobSidecarVersion0,
|
||||
Blobs: blobs,
|
||||
Commitments: comms,
|
||||
Proofs: make([]kzg4844.Proof, len(blobs)), // Placeholder, will be replaced by ToV1
|
||||
}
|
||||
|
||||
// Convert to Version 1 which will compute and attach cell proofs
|
||||
if err := sidecar.ToV1(); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert sidecar to V1")
|
||||
}
|
||||
|
||||
tx := types.NewTx(&types.BlobTx{
|
||||
ChainID: uint256.MustFromBig(chainID),
|
||||
Nonce: nonce,
|
||||
GasTipCap: uint256.MustFromBig(tip),
|
||||
GasFeeCap: uint256.MustFromBig(feeCap),
|
||||
Gas: gasLimit,
|
||||
To: *to,
|
||||
Value: uint256.MustFromBig(value),
|
||||
Data: code,
|
||||
AccessList: al,
|
||||
BlobFeeCap: uint256.MustFromBig(blobFeeCap),
|
||||
BlobHashes: versionedHashes,
|
||||
Sidecar: sidecar,
|
||||
})
|
||||
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
func New4844Tx(nonce uint64, to *common.Address, gasLimit uint64, chainID, tip, feeCap, value *big.Int, code []byte, blobFeeCap *big.Int, blobData []byte, al types.AccessList) *types.Transaction {
|
||||
blobs, comms, proofs, versionedHashes, err := EncodeBlobs(blobData)
|
||||
if err != nil {
|
||||
@@ -344,15 +537,91 @@ func New4844Tx(nonce uint64, to *common.Address, gasLimit uint64, chainID, tip,
|
||||
return tx
|
||||
}
|
||||
|
||||
// clampTxGas returns a copy of tx with Gas reduced to cap if it exceeds cap.
|
||||
// This avoids EL errors like "transaction gas limit too high" on networks with
|
||||
// per-transaction gas caps (commonly ~16,777,216).
|
||||
func clampTxGas(tx *types.Transaction, gasCap uint64) *types.Transaction {
|
||||
if tx == nil || tx.Gas() <= gasCap {
|
||||
return tx
|
||||
}
|
||||
|
||||
to := tx.To()
|
||||
switch tx.Type() {
|
||||
case types.LegacyTxType:
|
||||
return types.NewTx(&types.LegacyTx{
|
||||
Nonce: tx.Nonce(),
|
||||
To: to,
|
||||
Value: tx.Value(),
|
||||
Gas: gasCap,
|
||||
GasPrice: tx.GasPrice(),
|
||||
Data: tx.Data(),
|
||||
})
|
||||
case types.AccessListTxType:
|
||||
return types.NewTx(&types.AccessListTx{
|
||||
ChainID: tx.ChainId(),
|
||||
Nonce: tx.Nonce(),
|
||||
To: to,
|
||||
Value: tx.Value(),
|
||||
Gas: gasCap,
|
||||
GasPrice: tx.GasPrice(),
|
||||
Data: tx.Data(),
|
||||
AccessList: tx.AccessList(),
|
||||
})
|
||||
case types.DynamicFeeTxType:
|
||||
return types.NewTx(&types.DynamicFeeTx{
|
||||
ChainID: tx.ChainId(),
|
||||
Nonce: tx.Nonce(),
|
||||
To: to,
|
||||
Value: tx.Value(),
|
||||
Gas: gasCap,
|
||||
Data: tx.Data(),
|
||||
AccessList: tx.AccessList(),
|
||||
GasTipCap: tx.GasTipCap(),
|
||||
GasFeeCap: tx.GasFeeCap(),
|
||||
})
|
||||
case types.BlobTxType:
|
||||
// Leave blob txs unchanged here; blob tx construction paths set gas explicitly.
|
||||
return tx
|
||||
default:
|
||||
return tx
|
||||
}
|
||||
}
|
||||
|
||||
// ensureMinBalance tops up dest account from miner if its balance is below minWei.
|
||||
func ensureMinBalance(ctx context.Context, rpcCli *rpc.Client, backend *ethclient.Client, minerKey, destKey *keystore.Key, minWei *big.Int) error {
|
||||
bal, err := backend.BalanceAt(ctx, destKey.Address, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bal.Cmp(minWei) >= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := fundAccount(rpcCli, minerKey, destKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := WaitForBlocks(ctx, backend, minerKey, 1); err != nil {
|
||||
return errors.Wrap(err, "failed to mine block for top-up tx")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func encodeBlobs(data []byte) []kzg4844.Blob {
|
||||
blobs := []kzg4844.Blob{{}}
|
||||
blobIndex := 0
|
||||
fieldIndex := -1
|
||||
numOfElems := fieldparams.BlobLength / 32
|
||||
// Allow up to 6 blobs per transaction to properly test BPO limits.
|
||||
// With 10 blob txs per slot × 6 blobs = 60 max blobs submitted,
|
||||
// which exceeds the highest BPO limit (21) and ensures we can hit it.
|
||||
const maxBlobsPerTx = 6
|
||||
for i := 0; i < len(data); i += 31 {
|
||||
fieldIndex++
|
||||
if fieldIndex == numOfElems {
|
||||
if blobIndex >= 1 {
|
||||
if blobIndex >= maxBlobsPerTx-1 {
|
||||
break
|
||||
}
|
||||
blobs = append(blobs, kzg4844.Blob{})
|
||||
@@ -405,6 +674,7 @@ func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash {
|
||||
}
|
||||
|
||||
func randomBlobData() ([]byte, error) {
|
||||
// Generate random data for up to 1 blob. This is used for pre-Fulu tests.
|
||||
size := mathRand.Intn(fieldparams.BlobSize) // #nosec G404
|
||||
data := make([]byte, size)
|
||||
n, err := mathRand.Read(data) // #nosec G404
|
||||
@@ -417,6 +687,24 @@ func randomBlobData() ([]byte, error) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// randomBlobDataLarge generates 6 blobs worth of data for BPO testing.
|
||||
// This is used post-Fulu to ensure we can test increased blob limits.
|
||||
// With 5 blob txs per slot × 6 blobs = 30 max blobs submitted,
|
||||
// which exceeds the highest BPO limit (21) and ensures we can hit it.
|
||||
func randomBlobDataLarge() ([]byte, error) {
|
||||
const numBlobs = 6
|
||||
size := (numBlobs-1)*fieldparams.BlobSize + 1
|
||||
data := make([]byte, size)
|
||||
n, err := mathRand.Read(data) // #nosec G404
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != size {
|
||||
return nil, fmt.Errorf("could not create random blob data with size %d: %w", size, err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func randomAddress() common.Address {
|
||||
rNum := mathRand.Int31n(5) // #nosec G404
|
||||
switch rNum {
|
||||
@@ -468,7 +756,8 @@ func fundAccount(client *rpc.Client, sourceKey, destKey *keystore.Key) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
val, ok := big.NewInt(0).SetString("10000000000000000000000000", 10)
|
||||
// Increased funding to 100 million ETH to handle extended test runs with blob transactions
|
||||
val, ok := big.NewInt(0).SetString("100000000000000000000000000", 10)
|
||||
if !ok {
|
||||
return errors.New("could not set big int for value")
|
||||
}
|
||||
@@ -479,3 +768,132 @@ func fundAccount(client *rpc.Client, sourceKey, destKey *keystore.Key) error {
|
||||
}
|
||||
return backend.SendTransaction(context.Background(), signedTx)
|
||||
}
|
||||
|
||||
// generateRandomEVMCode generates random but valid-looking EVM bytecode
|
||||
// This mimics what tx-fuzz's RandomCode did (which used FuzzyVM's generator)
|
||||
func generateRandomEVMCode(maxLen int) []byte {
|
||||
if maxLen == 0 {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// Common EVM opcodes that are safe for testing
|
||||
// Including: PUSH, DUP, SWAP, arithmetic, logic, and STOP
|
||||
safeOpcodes := []byte{
|
||||
0x00, // STOP
|
||||
0x01, // ADD
|
||||
0x02, // MUL
|
||||
0x03, // SUB
|
||||
0x04, // DIV
|
||||
0x10, // LT
|
||||
0x11, // GT
|
||||
0x14, // EQ
|
||||
0x16, // AND
|
||||
0x17, // OR
|
||||
0x18, // XOR
|
||||
0x50, // POP
|
||||
0x52, // MSTORE
|
||||
0x54, // SLOAD
|
||||
0x55, // SSTORE
|
||||
0x56, // JUMP
|
||||
0x57, // JUMPI
|
||||
0x58, // PC
|
||||
0x59, // MSIZE
|
||||
0x5A, // GAS
|
||||
0x60, // PUSH1
|
||||
0x80, // DUP1
|
||||
0x90, // SWAP1
|
||||
}
|
||||
|
||||
code := make([]byte, 0, maxLen)
|
||||
for i := 0; i < maxLen; i++ {
|
||||
opcode := safeOpcodes[mathRand.Intn(len(safeOpcodes))] // #nosec G404
|
||||
code = append(code, opcode)
|
||||
|
||||
// If PUSH1, add a random byte
|
||||
if opcode == 0x60 && i+1 < maxLen {
|
||||
code = append(code, byte(mathRand.Intn(256))) // #nosec G404
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
||||
// randomValidTx generates a random valid transaction
|
||||
// This replaces tx-fuzz's RandomValidTx functionality
|
||||
func randomValidTx(sender common.Address, nonce uint64, gasPrice, chainID *big.Int, forceAccessList bool) (*types.Transaction, error) {
|
||||
gas := uint64(21000 + mathRand.Intn(100000)) // #nosec G404
|
||||
to := randomAddress()
|
||||
code := generateRandomEVMCode(mathRand.Intn(256)) // #nosec G404
|
||||
value := big.NewInt(0)
|
||||
|
||||
// Randomly choose transaction type
|
||||
// 0: Legacy, 1: AccessList, 2: DynamicFee
|
||||
txType := mathRand.Intn(3) // #nosec G404
|
||||
if forceAccessList {
|
||||
txType = 1 // Force AccessList type
|
||||
}
|
||||
|
||||
switch txType {
|
||||
case 0:
|
||||
// Legacy transaction
|
||||
return types.NewTx(&types.LegacyTx{
|
||||
Nonce: nonce,
|
||||
To: &to,
|
||||
Value: value,
|
||||
Gas: gas,
|
||||
GasPrice: gasPrice,
|
||||
Data: code,
|
||||
}), nil
|
||||
case 1:
|
||||
// AccessList transaction
|
||||
accessList := make(types.AccessList, 0)
|
||||
// Optionally add some random access list entries
|
||||
// #nosec G404 -- Test code uses deterministic randomness
|
||||
if mathRand.Intn(2) == 0 {
|
||||
// #nosec G404 -- Test code uses deterministic randomness
|
||||
numEntries := mathRand.Intn(3) + 1
|
||||
for range numEntries {
|
||||
addr := randomAddress()
|
||||
storageKeys := make([]common.Hash, mathRand.Intn(3)) // #nosec G404
|
||||
for j := range storageKeys {
|
||||
b := make([]byte, 32)
|
||||
_, _ = mathRand.Read(b) // #nosec G404
|
||||
storageKeys[j] = common.BytesToHash(b)
|
||||
}
|
||||
accessList = append(accessList, types.AccessTuple{
|
||||
Address: addr,
|
||||
StorageKeys: storageKeys,
|
||||
})
|
||||
}
|
||||
}
|
||||
return types.NewTx(&types.AccessListTx{
|
||||
ChainID: chainID,
|
||||
Nonce: nonce,
|
||||
To: &to,
|
||||
Value: value,
|
||||
Gas: gas,
|
||||
GasPrice: gasPrice,
|
||||
Data: code,
|
||||
AccessList: accessList,
|
||||
}), nil
|
||||
case 2:
|
||||
// DynamicFee transaction (EIP-1559)
|
||||
tip := new(big.Int).Div(gasPrice, big.NewInt(10)) // 10% tip
|
||||
feeCap := new(big.Int).Add(gasPrice, tip)
|
||||
accessList := make(types.AccessList, 0)
|
||||
return types.NewTx(&types.DynamicFeeTx{
|
||||
ChainID: chainID,
|
||||
Nonce: nonce,
|
||||
To: &to,
|
||||
Value: value,
|
||||
Gas: gas,
|
||||
GasTipCap: tip,
|
||||
GasFeeCap: feeCap,
|
||||
Data: code,
|
||||
AccessList: accessList,
|
||||
}), nil
|
||||
}
|
||||
|
||||
return nil, errors.New("invalid transaction type")
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func e2eMinimal(t *testing.T, cfg *params.BeaconChainConfig, cfgo ...types.E2ECo
|
||||
require.NoError(t, e2eParams.Init(t, e2eParams.StandardBeaconCount))
|
||||
|
||||
var err error
|
||||
epochsToRun := 16
|
||||
epochsToRun := 18
|
||||
epochStr, longRunning := os.LookupEnv("E2E_EPOCHS")
|
||||
if longRunning {
|
||||
epochsToRun, err = strconv.Atoi(epochStr)
|
||||
@@ -35,6 +35,10 @@ func e2eMinimal(t *testing.T, cfg *params.BeaconChainConfig, cfgo ...types.E2ECo
|
||||
}
|
||||
tracingPort := e2eParams.TestParams.Ports.JaegerTracingPort
|
||||
tracingEndpoint := fmt.Sprintf("127.0.0.1:%d", tracingPort)
|
||||
// Default exit epoch used for voluntary exit tests.
|
||||
// Can be overridden via WithExitEpoch option for shorter test runs.
|
||||
exitEpoch := primitives.Epoch(7)
|
||||
|
||||
evals := []types.Evaluator{
|
||||
ev.PeersConnect,
|
||||
ev.HealthzCheck,
|
||||
@@ -44,10 +48,7 @@ func e2eMinimal(t *testing.T, cfg *params.BeaconChainConfig, cfgo ...types.E2ECo
|
||||
ev.FinalizationOccurs(3),
|
||||
ev.VerifyBlockGraffiti,
|
||||
ev.PeersCheck,
|
||||
ev.ProposeVoluntaryExit,
|
||||
ev.ValidatorsHaveExited,
|
||||
ev.SubmitWithdrawal,
|
||||
ev.ValidatorsHaveWithdrawn,
|
||||
// Exit-related evaluators are added after processing options to allow custom exit epoch.
|
||||
ev.ProcessesDepositsInBlocks,
|
||||
ev.ActivatesDepositedValidators,
|
||||
ev.DepositedValidatorsAreActive,
|
||||
@@ -64,6 +65,11 @@ func e2eMinimal(t *testing.T, cfg *params.BeaconChainConfig, cfgo ...types.E2ECo
|
||||
evals = addIfForkSet(evals, cfg.CapellaForkEpoch, ev.CapellaForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.DenebForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.ElectraForkEpoch, ev.ElectraForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.FuluForkTransition)
|
||||
// Blob evaluators - run from Deneb onwards
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.BlobsIncludedInBlocks)
|
||||
// BPO (Blob Parameter Optimization) evaluator - runs from Fulu onwards
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.BlobLimitsRespected)
|
||||
|
||||
testConfig := &types.E2EConfig{
|
||||
BeaconFlags: []string{
|
||||
@@ -87,6 +93,18 @@ func e2eMinimal(t *testing.T, cfg *params.BeaconChainConfig, cfgo ...types.E2ECo
|
||||
for _, o := range cfgo {
|
||||
o(testConfig)
|
||||
}
|
||||
|
||||
// Add exit-related evaluators using custom exit epoch if configured, otherwise use default.
|
||||
if testConfig.ExitEpoch > 0 {
|
||||
exitEpoch = testConfig.ExitEpoch
|
||||
}
|
||||
testConfig.Evaluators = append(testConfig.Evaluators,
|
||||
ev.ProposeVoluntaryExitAtEpoch(exitEpoch),
|
||||
ev.ValidatorsHaveExitedAtEpoch(exitEpoch+1),
|
||||
ev.SubmitWithdrawalAtEpoch(exitEpoch+1),
|
||||
ev.ValidatorsHaveWithdrawnAfterExitAtEpoch(exitEpoch),
|
||||
)
|
||||
|
||||
if testConfig.UseBuilder {
|
||||
testConfig.Evaluators = append(testConfig.Evaluators, ev.BuilderIsActive)
|
||||
}
|
||||
@@ -140,6 +158,11 @@ func e2eMainnet(t *testing.T, usePrysmSh, useMultiClient bool, cfg *params.Beaco
|
||||
evals = addIfForkSet(evals, cfg.CapellaForkEpoch, ev.CapellaForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.DenebForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.ElectraForkEpoch, ev.ElectraForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.FuluForkTransition)
|
||||
// Blob evaluators - run from Deneb onwards
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.BlobsIncludedInBlocks)
|
||||
// BPO (Blob Parameter Optimization) evaluator - runs from Fulu onwards
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.BlobLimitsRespected)
|
||||
|
||||
testConfig := &types.E2EConfig{
|
||||
BeaconFlags: []string{
|
||||
@@ -208,6 +231,11 @@ func scenarioEvals(cfg *params.BeaconChainConfig) []types.Evaluator {
|
||||
evals = addIfForkSet(evals, cfg.CapellaForkEpoch, ev.CapellaForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.DenebForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.ElectraForkEpoch, ev.ElectraForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.FuluForkTransition)
|
||||
// Blob evaluators - run from Deneb onwards
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.BlobsIncludedInBlocks)
|
||||
// BPO (Blob Parameter Optimization) evaluator - runs from Fulu onwards
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.BlobLimitsRespected)
|
||||
return evals
|
||||
}
|
||||
|
||||
@@ -229,5 +257,10 @@ func scenarioEvalsMulti(cfg *params.BeaconChainConfig) []types.Evaluator {
|
||||
evals = addIfForkSet(evals, cfg.CapellaForkEpoch, ev.CapellaForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.DenebForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.ElectraForkEpoch, ev.ElectraForkTransition)
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.FuluForkTransition)
|
||||
// Blob evaluators - run from Deneb onwards
|
||||
evals = addIfForkSet(evals, cfg.DenebForkEpoch, ev.BlobsIncludedInBlocks)
|
||||
// BPO (Blob Parameter Optimization) evaluator - runs from Fulu onwards
|
||||
evals = addIfForkSet(evals, cfg.FuluForkEpoch, ev.BlobLimitsRespected)
|
||||
return evals
|
||||
}
|
||||
|
||||
@@ -225,9 +225,9 @@ func (r *testRunner) testDepositsAndTx(ctx context.Context, g *errgroup.Group,
|
||||
if err := helpers.ComponentsStarted(ctx, []e2etypes.ComponentRunner{r.depositor}); err != nil {
|
||||
return errors.Wrap(err, "testDepositsAndTx unable to run, depositor did not Start")
|
||||
}
|
||||
go func() {
|
||||
if r.config.TestDeposits {
|
||||
log.Info("Running deposit tests")
|
||||
go func() {
|
||||
if r.config.TestDeposits {
|
||||
log.Info("Running deposit tests")
|
||||
// The validators with an index < minGenesisActiveCount all have deposits already from the chain start.
|
||||
// Skip all of those chain start validators by seeking to minGenesisActiveCount in the validator list
|
||||
// for further deposit testing.
|
||||
@@ -238,9 +238,12 @@ func (r *testRunner) testDepositsAndTx(ctx context.Context, g *errgroup.Group,
|
||||
r.t.Error(errors.Wrap(err, "depositor.SendAndMine failed"))
|
||||
}
|
||||
}
|
||||
}
|
||||
r.testTxGeneration(ctx, g, keystorePath, []e2etypes.ComponentRunner{})
|
||||
}()
|
||||
}
|
||||
// Only generate background transactions when relevant for the test.
|
||||
if r.config.TestDeposits || r.config.TestFeature || r.config.UseBuilder {
|
||||
r.testTxGeneration(ctx, g, keystorePath, []e2etypes.ComponentRunner{})
|
||||
}
|
||||
}()
|
||||
if r.config.TestDeposits {
|
||||
return depositCheckValidator.Start(ctx)
|
||||
}
|
||||
@@ -249,7 +252,7 @@ func (r *testRunner) testDepositsAndTx(ctx context.Context, g *errgroup.Group,
|
||||
}
|
||||
|
||||
func (r *testRunner) testTxGeneration(ctx context.Context, g *errgroup.Group, keystorePath string, requiredNodes []e2etypes.ComponentRunner) {
|
||||
txGenerator := eth1.NewTransactionGenerator(keystorePath, r.config.Seed)
|
||||
txGenerator := eth1.NewTransactionGenerator(keystorePath, r.config.Seed, r.config.UseLargeBlobs)
|
||||
r.comHandler.txGen = txGenerator
|
||||
g.Go(func() error {
|
||||
if err := helpers.ComponentsStarted(ctx, requiredNodes); err != nil {
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"blob_limits.go",
|
||||
"builder.go",
|
||||
"data.go",
|
||||
"execution_engine.go",
|
||||
|
||||
246
testing/endtoend/evaluators/blob_limits.go
Normal file
246
testing/endtoend/evaluators/blob_limits.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package evaluators
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/endtoend/policies"
|
||||
e2etypes "github.com/OffchainLabs/prysm/v7/testing/endtoend/types"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// Minimum blob utilization percentage required to pass the evaluator.
|
||||
// We expect the transaction generator to produce enough blobs to hit at least this threshold.
|
||||
const minBlobUtilizationPercent = 50
|
||||
|
||||
// BlobsIncludedInBlocks verifies that blocks contain blobs and that the blob count
|
||||
// does not exceed the maximum allowed for the epoch (respecting BPO schedule).
|
||||
var BlobsIncludedInBlocks = e2etypes.Evaluator{
|
||||
Name: "blobs_included_in_blocks_epoch_%d",
|
||||
Policy: func(currentEpoch primitives.Epoch) bool {
|
||||
// Only run from Deneb onwards
|
||||
return policies.OnwardsNthEpoch(params.BeaconConfig().DenebForkEpoch)(currentEpoch)
|
||||
},
|
||||
Evaluation: blobsIncludedInBlocks,
|
||||
}
|
||||
|
||||
// BlobLimitsRespected verifies that the BPO (Blob Parameter Optimization) limits
|
||||
// are correctly enforced after Fulu fork, checking that:
|
||||
// 1. Blocks don't exceed MaxBlobsPerBlock for their respective epoch
|
||||
// 2. We're utilizing the increased capacity (at least 50% utilization)
|
||||
// 3. When BPO activates (limit increases), blocks actually use the new higher limit
|
||||
var BlobLimitsRespected = e2etypes.Evaluator{
|
||||
Name: "blob_limits_respected_epoch_%d",
|
||||
Policy: func(currentEpoch primitives.Epoch) bool {
|
||||
// Only run from Fulu onwards where BPO is active
|
||||
return policies.OnwardsNthEpoch(params.BeaconConfig().FuluForkEpoch)(currentEpoch)
|
||||
},
|
||||
Evaluation: blobLimitsRespected,
|
||||
}
|
||||
|
||||
func blobsIncludedInBlocks(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
|
||||
chainHead, err := client.GetChainHead(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get chain head")
|
||||
}
|
||||
|
||||
// Check blocks from the previous epoch
|
||||
epoch := chainHead.HeadEpoch
|
||||
if epoch > 0 {
|
||||
epoch = epoch - 1
|
||||
}
|
||||
|
||||
// Skip if we're before Deneb
|
||||
if epoch < params.BeaconConfig().DenebForkEpoch {
|
||||
return nil
|
||||
}
|
||||
|
||||
req := ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: epoch}}
|
||||
blks, err := client.ListBeaconBlocks(context.Background(), req)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blocks from beacon-chain")
|
||||
}
|
||||
|
||||
blocksWithBlobs := 0
|
||||
for _, ctr := range blks.BlockContainers {
|
||||
blk, err := blocks.BeaconBlockContainerToSignedBeaconBlock(ctr)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to convert block container to signed beacon block")
|
||||
}
|
||||
|
||||
if blk == nil || blk.IsNil() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip blocks before Deneb
|
||||
if blk.Version() < version.Deneb {
|
||||
continue
|
||||
}
|
||||
|
||||
commitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blob kzg commitments")
|
||||
}
|
||||
|
||||
if len(commitments) > 0 {
|
||||
blocksWithBlobs++
|
||||
}
|
||||
}
|
||||
|
||||
// We expect at least some blocks to have blobs since we're sending blob transactions
|
||||
if blocksWithBlobs == 0 {
|
||||
return errors.Errorf("no blocks with blobs found in epoch %d", epoch)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func blobLimitsRespected(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
nodeClient := ethpb.NewNodeClient(conn)
|
||||
beaconClient := ethpb.NewBeaconChainClient(conn)
|
||||
|
||||
genesis, err := nodeClient.GetGenesis(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get genesis")
|
||||
}
|
||||
|
||||
currSlot := slots.CurrentSlot(genesis.GenesisTime.AsTime())
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
|
||||
// Check the previous epoch to ensure blocks are finalized
|
||||
epochToCheck := currEpoch
|
||||
if epochToCheck > 0 {
|
||||
epochToCheck = epochToCheck - 1
|
||||
}
|
||||
|
||||
// Skip if we're before Fulu
|
||||
if epochToCheck < params.BeaconConfig().FuluForkEpoch {
|
||||
return nil
|
||||
}
|
||||
|
||||
req := ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: epochToCheck}}
|
||||
blks, err := beaconClient.ListBeaconBlocks(context.Background(), req)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blocks from beacon-chain")
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
maxBlobsForEpoch := cfg.MaxBlobsPerBlockAtEpoch(epochToCheck)
|
||||
|
||||
// Get the previous epoch's limit to detect BPO transitions
|
||||
var prevEpochMaxBlobs int
|
||||
if epochToCheck > 0 {
|
||||
prevEpochMaxBlobs = cfg.MaxBlobsPerBlockAtEpoch(epochToCheck - 1)
|
||||
}
|
||||
|
||||
// Check if this is a BPO transition epoch (limit increased from previous epoch)
|
||||
isBPOTransitionEpoch := maxBlobsForEpoch > prevEpochMaxBlobs
|
||||
|
||||
var totalBlobs int
|
||||
var maxBlobsInBlock int
|
||||
var blockCount int
|
||||
|
||||
for _, ctr := range blks.BlockContainers {
|
||||
blk, err := blocks.BeaconBlockContainerToSignedBeaconBlock(ctr)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to convert block container to signed beacon block")
|
||||
}
|
||||
|
||||
if blk == nil || blk.IsNil() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip blocks before Deneb (shouldn't happen if we're checking post-Fulu epochs)
|
||||
if blk.Version() < version.Deneb {
|
||||
continue
|
||||
}
|
||||
|
||||
slot := blk.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(slot)
|
||||
|
||||
commitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blob kzg commitments")
|
||||
}
|
||||
|
||||
blobCount := len(commitments)
|
||||
|
||||
// Verify we don't exceed the limit
|
||||
if blobCount > maxBlobsForEpoch {
|
||||
return errors.Errorf(
|
||||
"block at slot %d (epoch %d) has %d blobs, exceeding max allowed %d for this epoch",
|
||||
slot, blockEpoch, blobCount, maxBlobsForEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
totalBlobs += blobCount
|
||||
blockCount++
|
||||
if blobCount > maxBlobsInBlock {
|
||||
maxBlobsInBlock = blobCount
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate utilization
|
||||
if blockCount == 0 {
|
||||
return errors.Errorf("no blocks found in epoch %d", epochToCheck)
|
||||
}
|
||||
|
||||
utilizationPercent := (maxBlobsInBlock * 100) / maxBlobsForEpoch
|
||||
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"epoch": epochToCheck,
|
||||
"maxBlobsForEpoch": maxBlobsForEpoch,
|
||||
"prevEpochMaxBlobs": prevEpochMaxBlobs,
|
||||
"maxBlobsInBlock": maxBlobsInBlock,
|
||||
"totalBlobs": totalBlobs,
|
||||
"blockCount": blockCount,
|
||||
"utilizationPercent": utilizationPercent,
|
||||
"isBPOTransitionEpoch": isBPOTransitionEpoch,
|
||||
}).Info("Blob utilization stats for epoch")
|
||||
|
||||
// For BPO transition epochs, verify that BPO actually activated by checking
|
||||
// that at least one block exceeded the previous epoch's limit
|
||||
if isBPOTransitionEpoch {
|
||||
if maxBlobsInBlock <= prevEpochMaxBlobs {
|
||||
return errors.Errorf(
|
||||
"BPO transition at epoch %d: limit increased from %d to %d, but no block exceeded the old limit. "+
|
||||
"Max blobs in any block was %d. This indicates BPO may not be working correctly or "+
|
||||
"the transaction generator is not producing enough blobs to test the increased limit.",
|
||||
epochToCheck, prevEpochMaxBlobs, maxBlobsForEpoch, maxBlobsInBlock,
|
||||
)
|
||||
}
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"epoch": epochToCheck,
|
||||
"prevLimit": prevEpochMaxBlobs,
|
||||
"newLimit": maxBlobsForEpoch,
|
||||
"maxBlobsInBlock": maxBlobsInBlock,
|
||||
}).Info("BPO activation verified: blocks are using capacity beyond previous limit")
|
||||
}
|
||||
|
||||
// For all BPO epochs (where limit > Electra's 9), verify we're utilizing the capacity
|
||||
electraMax := cfg.DeprecatedMaxBlobsPerBlockElectra
|
||||
if maxBlobsForEpoch > electraMax {
|
||||
if utilizationPercent < minBlobUtilizationPercent {
|
||||
return errors.Errorf(
|
||||
"BPO epoch %d has max blobs %d but only achieved %d%% utilization (max blobs in any block: %d). "+
|
||||
"Expected at least %d%% utilization to verify BPO is working. "+
|
||||
"This may indicate the transaction generator is not producing enough blobs.",
|
||||
epochToCheck, maxBlobsForEpoch, utilizationPercent, maxBlobsInBlock, minBlobUtilizationPercent,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/testing/endtoend/policies"
|
||||
e2etypes "github.com/OffchainLabs/prysm/v7/testing/endtoend/types"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
@@ -27,6 +26,11 @@ var BuilderIsActive = e2etypes.Evaluator{
|
||||
Evaluation: builderActive,
|
||||
}
|
||||
|
||||
// maxNonBuilderBlocks is the maximum number of blocks that can be built locally
|
||||
// instead of by the builder before the test fails. This allows tolerance for
|
||||
// occasional builder timeouts or failures.
|
||||
const maxNonBuilderBlocks = 2
|
||||
|
||||
func builderActive(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewNodeClient(conn)
|
||||
@@ -49,6 +53,10 @@ func builderActive(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nonBuilderBlocks := 0
|
||||
builderBlocks := 0
|
||||
|
||||
blockCtrs, err := beaconClient.ListBeaconBlocks(context.Background(), ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: lowestBound}})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get beacon blocks")
|
||||
@@ -84,13 +92,18 @@ func builderActive(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) err
|
||||
continue
|
||||
}
|
||||
if string(execPayload.ExtraData()) != "prysm-builder" {
|
||||
return errors.Errorf("%s block with slot %d was not built by the builder. It has an extra data of %s and txRoot of %s", version.String(b.Version()), b.Block().Slot(), string(execPayload.ExtraData()), hexutil.Encode(txRoot))
|
||||
nonBuilderBlocks++
|
||||
continue
|
||||
}
|
||||
builderBlocks++
|
||||
if execPayload.GasLimit() == 0 {
|
||||
return errors.Errorf("%s block with slot %d has a gas limit of 0, when it should be in the 30M range", version.String(b.Version()), b.Block().Slot())
|
||||
}
|
||||
}
|
||||
if lowestBound == currEpoch {
|
||||
if nonBuilderBlocks > maxNonBuilderBlocks {
|
||||
return errors.Errorf("too many non-builder blocks: %d (max allowed: %d), builder blocks: %d", nonBuilderBlocks, maxNonBuilderBlocks, builderBlocks)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
blockCtrs, err = beaconClient.ListBeaconBlocks(context.Background(), ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: currEpoch}})
|
||||
@@ -127,11 +140,16 @@ func builderActive(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) err
|
||||
continue
|
||||
}
|
||||
if string(execPayload.ExtraData()) != "prysm-builder" {
|
||||
return errors.Errorf("%s block with slot %d was not built by the builder. It has an extra data of %s and txRoot of %s", version.String(b.Version()), b.Block().Slot(), string(execPayload.ExtraData()), hexutil.Encode(txRoot))
|
||||
nonBuilderBlocks++
|
||||
continue
|
||||
}
|
||||
builderBlocks++
|
||||
if execPayload.GasLimit() == 0 {
|
||||
return errors.Errorf("%s block with slot %d has a gas limit of 0, when it should be in the 30M range", version.String(b.Version()), b.Block().Slot())
|
||||
}
|
||||
}
|
||||
if nonBuilderBlocks > maxNonBuilderBlocks {
|
||||
return errors.Errorf("too many non-builder blocks: %d (max allowed: %d), builder blocks: %d", nonBuilderBlocks, maxNonBuilderBlocks, builderBlocks)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -88,11 +88,28 @@ var ElectraForkTransition = e2etypes.Evaluator{
|
||||
Evaluation: electraForkOccurs,
|
||||
}
|
||||
|
||||
// FuluForkTransition ensures that the fulu hard fork has occurred successfully
|
||||
var FuluForkTransition = e2etypes.Evaluator{
|
||||
Name: "fulu_fork_transition_%d",
|
||||
Policy: func(e primitives.Epoch) bool {
|
||||
// Only run if we started before Fulu
|
||||
if e2etypes.GenesisFork() >= version.Fulu {
|
||||
return false
|
||||
}
|
||||
fEpoch := params.BeaconConfig().FuluForkEpoch
|
||||
return policies.OnEpoch(fEpoch)(e)
|
||||
},
|
||||
Evaluation: fuluForkOccurs,
|
||||
}
|
||||
|
||||
func altairForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconNodeValidatorClient(conn)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), streamDeadline)
|
||||
defer cancel()
|
||||
|
||||
stream, err := client.StreamBlocksAltair(ctx, ðpb.StreamBlocksRequest{VerifiedOnly: true})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get stream")
|
||||
@@ -101,6 +118,7 @@ func altairForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
return errors.New("context canceled prematurely")
|
||||
}
|
||||
@@ -121,20 +139,24 @@ func altairForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return err
|
||||
}
|
||||
if blk.Block().Slot() < fSlot {
|
||||
return errors.Errorf("wanted a block >= %d but received %d", fSlot, blk.Block().Slot())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func bellatrixForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconNodeValidatorClient(conn)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), streamDeadline)
|
||||
defer cancel()
|
||||
|
||||
stream, err := client.StreamBlocksAltair(ctx, ðpb.StreamBlocksRequest{VerifiedOnly: true})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get stream")
|
||||
@@ -143,6 +165,7 @@ func bellatrixForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientCon
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
return errors.New("context canceled prematurely")
|
||||
}
|
||||
@@ -166,6 +189,7 @@ func bellatrixForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientCon
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -188,6 +212,7 @@ func capellaForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
return errors.New("context canceled prematurely")
|
||||
}
|
||||
@@ -209,6 +234,7 @@ func capellaForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -231,6 +257,7 @@ func denebForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
return errors.New("context canceled prematurely")
|
||||
}
|
||||
@@ -252,6 +279,7 @@ func denebForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -274,6 +302,7 @@ func electraForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
return errors.New("context canceled prematurely")
|
||||
}
|
||||
@@ -295,6 +324,7 @@ func electraForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -303,3 +333,57 @@ func electraForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func fuluForkOccurs(_ *e2etypes.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconNodeValidatorClient(conn)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), streamDeadline)
|
||||
defer cancel()
|
||||
|
||||
stream, err := client.StreamBlocksAltair(ctx, ðpb.StreamBlocksRequest{VerifiedOnly: true})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get stream")
|
||||
}
|
||||
|
||||
fSlot, err := slots.EpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if errors.Is(ctx.Err(), context.Canceled) {
|
||||
return errors.New("context canceled prematurely")
|
||||
}
|
||||
|
||||
res, err := stream.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if res == nil || res.Block == nil {
|
||||
return errors.New("nil block returned by beacon node")
|
||||
}
|
||||
|
||||
if res.GetBlock() == nil {
|
||||
return errors.New("nil block returned by beacon node")
|
||||
}
|
||||
|
||||
if res.GetFuluBlock() == nil {
|
||||
return errors.Errorf("non-fulu block returned after the fork with type %T", res.Block)
|
||||
}
|
||||
|
||||
blk, err := blocks.NewSignedBeaconBlock(res.GetFuluBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if blk.Block().Slot() < fSlot {
|
||||
return errors.Errorf("wanted a block at slot >= %d but received %d", fSlot, blk.Block().Slot())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -117,7 +117,10 @@ func metricsTest(_ *types.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
return err
|
||||
}
|
||||
timeSlot := slots.CurrentSlot(genesisResp.GenesisTime.AsTime())
|
||||
if uint64(chainHead.HeadSlot) != uint64(timeSlot) {
|
||||
// Allow 1 slot tolerance due to race between calculating current slot
|
||||
// and fetching chain head - a slot boundary may occur between these calls.
|
||||
// Check: chainHead.HeadSlot <= timeSlot <= chainHead.HeadSlot + 1
|
||||
if uint64(chainHead.HeadSlot) > uint64(timeSlot) || uint64(timeSlot) > uint64(chainHead.HeadSlot)+1 {
|
||||
return fmt.Errorf("expected metrics slot to equal chain head slot, expected %d, received %d", timeSlot, chainHead.HeadSlot)
|
||||
}
|
||||
|
||||
|
||||
@@ -40,7 +40,9 @@ var depositsInBlockStart = params.E2ETestConfig().EpochsPerEth1VotingPeriod * 2
|
||||
// deposits included + finalization + MaxSeedLookahead for activation.
|
||||
var depositActivationStartEpoch = depositsInBlockStart + 2 + params.E2ETestConfig().MaxSeedLookahead
|
||||
var depositEndEpoch = depositActivationStartEpoch + primitives.Epoch(math.Ceil(float64(depositValCount)/float64(params.E2ETestConfig().MinPerEpochChurnLimit)))
|
||||
var exitSubmissionEpoch = primitives.Epoch(7)
|
||||
|
||||
// Default exit submission epoch for standard tests.
|
||||
var defaultExitSubmissionEpoch = primitives.Epoch(7)
|
||||
|
||||
// ProcessesDepositsInBlocks ensures the expected amount of deposits are accepted into blocks.
|
||||
// Note: This evaluator only works for pre-Electra genesis since Electra uses EIP-6110 deposit requests.
|
||||
@@ -92,55 +94,90 @@ var DepositedValidatorsAreActive = e2etypes.Evaluator{
|
||||
}
|
||||
|
||||
// ProposeVoluntaryExit sends a voluntary exit from randomly selected validator in the genesis set.
|
||||
var ProposeVoluntaryExit = e2etypes.Evaluator{
|
||||
Name: "propose_voluntary_exit_epoch_%d",
|
||||
Policy: policies.OnEpoch(exitSubmissionEpoch),
|
||||
Evaluation: proposeVoluntaryExit,
|
||||
// Uses the default exit submission epoch (7).
|
||||
var ProposeVoluntaryExit = ProposeVoluntaryExitAtEpoch(defaultExitSubmissionEpoch)
|
||||
|
||||
// ProposeVoluntaryExitAtEpoch sends a voluntary exit at the specified epoch.
|
||||
var ProposeVoluntaryExitAtEpoch = func(epoch primitives.Epoch) e2etypes.Evaluator {
|
||||
return e2etypes.Evaluator{
|
||||
Name: "propose_voluntary_exit_epoch_%d",
|
||||
Policy: policies.OnEpoch(epoch),
|
||||
Evaluation: proposeVoluntaryExit,
|
||||
}
|
||||
}
|
||||
|
||||
// ValidatorsHaveExited checks the beacon state for the exited validator and ensures its marked as exited.
|
||||
var ValidatorsHaveExited = e2etypes.Evaluator{
|
||||
Name: "voluntary_has_exited_%d",
|
||||
Policy: policies.OnEpoch(8),
|
||||
Evaluation: validatorsHaveExited,
|
||||
// Uses the default exit submission epoch + 1 (epoch 8).
|
||||
var ValidatorsHaveExited = ValidatorsHaveExitedAtEpoch(defaultExitSubmissionEpoch + 1)
|
||||
|
||||
// ValidatorsHaveExitedAtEpoch checks validators have exited at the specified epoch.
|
||||
var ValidatorsHaveExitedAtEpoch = func(epoch primitives.Epoch) e2etypes.Evaluator {
|
||||
return e2etypes.Evaluator{
|
||||
Name: "voluntary_has_exited_%d",
|
||||
Policy: policies.OnEpoch(epoch),
|
||||
Evaluation: validatorsHaveExited,
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitWithdrawal sends a withdrawal from a previously exited validator.
|
||||
var SubmitWithdrawal = e2etypes.Evaluator{
|
||||
Name: "submit_withdrawal_epoch_%d",
|
||||
Policy: func(currentEpoch primitives.Epoch) bool {
|
||||
fEpoch := params.BeaconConfig().CapellaForkEpoch
|
||||
// If Capella is disabled (starting at Deneb+), run after exit submission
|
||||
if e2etypes.GenesisFork() >= version.Deneb {
|
||||
return policies.OnEpoch(exitSubmissionEpoch + 1)(currentEpoch)
|
||||
}
|
||||
return policies.BetweenEpochs(fEpoch-2, fEpoch+1)(currentEpoch)
|
||||
},
|
||||
Evaluation: submitWithdrawal,
|
||||
// Uses default timing based on exit submission epoch.
|
||||
var SubmitWithdrawal = SubmitWithdrawalAtEpoch(defaultExitSubmissionEpoch + 1)
|
||||
|
||||
// SubmitWithdrawalAtEpoch sends a withdrawal at the specified epoch.
|
||||
// For pre-Deneb genesis, it runs around the Capella fork epoch instead.
|
||||
var SubmitWithdrawalAtEpoch = func(epoch primitives.Epoch) e2etypes.Evaluator {
|
||||
return e2etypes.Evaluator{
|
||||
Name: "submit_withdrawal_epoch_%d",
|
||||
Policy: func(currentEpoch primitives.Epoch) bool {
|
||||
fEpoch := params.BeaconConfig().CapellaForkEpoch
|
||||
// If Capella is disabled (starting at Deneb+), run at the specified epoch
|
||||
if e2etypes.GenesisFork() >= version.Deneb {
|
||||
return policies.OnEpoch(epoch)(currentEpoch)
|
||||
}
|
||||
return policies.BetweenEpochs(fEpoch-2, fEpoch+1)(currentEpoch)
|
||||
},
|
||||
Evaluation: submitWithdrawal,
|
||||
}
|
||||
}
|
||||
|
||||
// ValidatorsHaveWithdrawn checks the beacon state for the withdrawn validator and ensures it has been withdrawn.
|
||||
var ValidatorsHaveWithdrawn = e2etypes.Evaluator{
|
||||
Name: "validator_has_withdrawn_%d",
|
||||
Policy: func(currentEpoch primitives.Epoch) bool {
|
||||
// TODO: Fix this for mainnet configs.
|
||||
if params.BeaconConfig().ConfigName != params.EndToEndName {
|
||||
return false
|
||||
}
|
||||
// Only run this for minimal setups after capella
|
||||
fEpoch := params.BeaconConfig().CapellaForkEpoch
|
||||
// If Capella is disabled (starting at Deneb+), run after withdrawal submission
|
||||
var validWithdrawnEpoch primitives.Epoch
|
||||
if e2etypes.GenesisFork() >= version.Deneb {
|
||||
validWithdrawnEpoch = exitSubmissionEpoch + 2
|
||||
} else {
|
||||
validWithdrawnEpoch = fEpoch + 1
|
||||
}
|
||||
// Uses default timing based on exit submission epoch.
|
||||
var ValidatorsHaveWithdrawn = ValidatorsHaveWithdrawnAfterExitAtEpoch(defaultExitSubmissionEpoch)
|
||||
|
||||
requiredPolicy := policies.OnEpoch(validWithdrawnEpoch)
|
||||
return requiredPolicy(currentEpoch)
|
||||
},
|
||||
Evaluation: validatorsAreWithdrawn,
|
||||
// ValidatorsHaveWithdrawnAfterExitAtEpoch checks validators have withdrawn after exiting at the specified epoch.
|
||||
// For pre-Deneb genesis, it runs at CapellaForkEpoch + 1 instead.
|
||||
var ValidatorsHaveWithdrawnAfterExitAtEpoch = func(exitSubmitEpoch primitives.Epoch) e2etypes.Evaluator {
|
||||
return e2etypes.Evaluator{
|
||||
Name: "validator_has_withdrawn_%d",
|
||||
Policy: func(currentEpoch primitives.Epoch) bool {
|
||||
// TODO: Fix this for mainnet configs.
|
||||
if params.BeaconConfig().ConfigName != params.EndToEndName {
|
||||
return false
|
||||
}
|
||||
// Only run this for minimal setups after capella
|
||||
fEpoch := params.BeaconConfig().CapellaForkEpoch
|
||||
// If Capella is disabled (starting at Deneb+), run after withdrawal submission
|
||||
var validWithdrawnEpoch primitives.Epoch
|
||||
if e2etypes.GenesisFork() >= version.Deneb {
|
||||
// Exit submitted at exitSubmitEpoch
|
||||
// Exit epoch = exitSubmitEpoch + 1 + MAX_SEED_LOOKAHEAD
|
||||
// Withdrawable epoch = exit epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
// Add 1 more epoch for the sweep to process
|
||||
exitEpoch := exitSubmitEpoch + 1 + primitives.Epoch(params.BeaconConfig().MaxSeedLookahead)
|
||||
withdrawableEpoch := exitEpoch + primitives.Epoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
validWithdrawnEpoch = withdrawableEpoch + 1
|
||||
} else {
|
||||
// For pre-Deneb genesis, give 2 epochs after Capella for:
|
||||
// 1. BLS-to-exec changes to be processed (submitted in epoch before Capella)
|
||||
// 2. Withdrawal sweep to reach all exited validators
|
||||
validWithdrawnEpoch = fEpoch + 2
|
||||
}
|
||||
|
||||
requiredPolicy := policies.OnEpoch(validWithdrawnEpoch)
|
||||
return requiredPolicy(currentEpoch)
|
||||
},
|
||||
Evaluation: validatorsAreWithdrawn,
|
||||
}
|
||||
}
|
||||
|
||||
// ValidatorsVoteWithTheMajority verifies whether validator vote for eth1data using the majority algorithm.
|
||||
@@ -351,8 +388,7 @@ func depositedValidatorsAreActive(ec *e2etypes.EvaluationContext, conns ...*grpc
|
||||
continue // we aren't checking for this validator
|
||||
}
|
||||
// ignore voluntary exits when checking balance and active status
|
||||
exited := ec.ExitedVals[key]
|
||||
if exited {
|
||||
if _, exited := ec.ExitedVals[key]; exited {
|
||||
nexits++
|
||||
delete(expected, key)
|
||||
continue
|
||||
@@ -457,7 +493,7 @@ func proposeVoluntaryExit(ec *e2etypes.EvaluationContext, conns ...*grpc.ClientC
|
||||
return errors.Wrap(err, "could not propose exit")
|
||||
}
|
||||
pubk := bytesutil.ToBytes48(deposits[exitedIndex].Data.PublicKey)
|
||||
ec.ExitedVals[pubk] = true
|
||||
ec.ExitedVals[pubk] = chainHead.HeadEpoch // Store submission epoch
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -471,7 +507,7 @@ func proposeVoluntaryExit(ec *e2etypes.EvaluationContext, conns ...*grpc.ClientC
|
||||
// Send an exit for a non-exited validator.
|
||||
for i := 0; i < numOfExits; {
|
||||
randIndex := primitives.ValidatorIndex(rand.Uint64() % params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
if ec.ExitedVals[bytesutil.ToBytes48(privKeys[randIndex].PublicKey().Marshal())] {
|
||||
if _, alreadyExited := ec.ExitedVals[bytesutil.ToBytes48(privKeys[randIndex].PublicKey().Marshal())]; alreadyExited {
|
||||
continue
|
||||
}
|
||||
if err := sendExit(randIndex); err != nil {
|
||||
@@ -567,6 +603,14 @@ func validatorsVoteWithTheMajority(ec *e2etypes.EvaluationContext, conns ...*grp
|
||||
b := blk.GetBlindedElectraBlock().Message
|
||||
slot = b.Slot
|
||||
vote = b.Body.Eth1Data.BlockHash
|
||||
case *ethpb.BeaconBlockContainer_FuluBlock:
|
||||
b := blk.GetFuluBlock().Block
|
||||
slot = b.Slot
|
||||
vote = b.Body.Eth1Data.BlockHash
|
||||
case *ethpb.BeaconBlockContainer_BlindedFuluBlock:
|
||||
b := blk.GetBlindedFuluBlock().Message
|
||||
slot = b.Slot
|
||||
vote = b.Body.Eth1Data.BlockHash
|
||||
default:
|
||||
return fmt.Errorf("block of type %T is unknown", blk.Block)
|
||||
}
|
||||
@@ -587,20 +631,28 @@ func validatorsVoteWithTheMajority(ec *e2etypes.EvaluationContext, conns ...*grp
|
||||
}
|
||||
if isFirstSlotInVotingPeriod {
|
||||
ec.ExpectedEth1DataVote = vote
|
||||
ec.Eth1DataMismatchCount = 0 // Reset for new voting period
|
||||
return nil
|
||||
}
|
||||
|
||||
if !bytes.Equal(vote, ec.ExpectedEth1DataVote) {
|
||||
for i := primitives.Slot(0); i < slot; i++ {
|
||||
v, ok := ec.SeenVotes[i]
|
||||
if ok {
|
||||
fmt.Printf("vote at slot=%d = %#x\n", i, v)
|
||||
} else {
|
||||
fmt.Printf("did not see slot=%d\n", i)
|
||||
// Allow some tolerance for eth1data vote differences.
|
||||
// Validators may have slightly different views of the eth1 chain
|
||||
// as new blocks arrive during the voting period.
|
||||
ec.Eth1DataMismatchCount++
|
||||
// Allow up to 2 mismatches per voting period before failing.
|
||||
if ec.Eth1DataMismatchCount > 2 {
|
||||
for i := primitives.Slot(0); i < slot; i++ {
|
||||
v, ok := ec.SeenVotes[i]
|
||||
if ok {
|
||||
fmt.Printf("vote at slot=%d = %#x\n", i, v)
|
||||
} else {
|
||||
fmt.Printf("did not see slot=%d\n", i)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("incorrect eth1data vote for slot %d; expected: %#x vs voted: %#x (mismatch count: %d)",
|
||||
slot, ec.ExpectedEth1DataVote, vote, ec.Eth1DataMismatchCount)
|
||||
}
|
||||
return fmt.Errorf("incorrect eth1data vote for slot %d; expected: %#x vs voted: %#x",
|
||||
slot, ec.ExpectedEth1DataVote, vote)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -644,8 +696,12 @@ func submitWithdrawal(ec *e2etypes.EvaluationContext, conns ...*grpc.ClientConn)
|
||||
}
|
||||
changes := make([]*structs.SignedBLSToExecutionChange, 0)
|
||||
// Only send half the number of changes each time, to allow us to test
|
||||
// at the fork boundary.
|
||||
// at the fork boundary. When starting from Deneb+ at genesis, there's no
|
||||
// fork boundary to test so we send all changes.
|
||||
wantedChanges := numOfExits / 2
|
||||
if e2etypes.GenesisFork() >= version.Deneb {
|
||||
wantedChanges = numOfExits
|
||||
}
|
||||
for _, idx := range exitedIndices {
|
||||
// Exit sending more change messages.
|
||||
if len(changes) >= wantedChanges {
|
||||
|
||||
@@ -26,11 +26,11 @@ import (
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
var expectedParticipation = 0.99
|
||||
var expectedParticipation = 0.98
|
||||
|
||||
var expectedMulticlientParticipation = 0.95
|
||||
|
||||
var expectedSyncParticipation = 0.99
|
||||
var expectedSyncParticipation = 0.95
|
||||
|
||||
// ValidatorsAreActive ensures the expected amount of validators are active.
|
||||
var ValidatorsAreActive = types.Evaluator{
|
||||
@@ -62,6 +62,7 @@ var ValidatorSyncParticipation = types.Evaluator{
|
||||
func validatorsAreActive(ec *types.EvaluationContext, conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
|
||||
// Balances actually fluctuate but we just want to check initial balance.
|
||||
validatorRequest := ðpb.ListValidatorsRequest{
|
||||
PageSize: int32(params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
@@ -72,17 +73,26 @@ func validatorsAreActive(ec *types.EvaluationContext, conns ...*grpc.ClientConn)
|
||||
return errors.Wrap(err, "failed to get validators")
|
||||
}
|
||||
|
||||
expectedCount := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
// Count should be MinGenesisActiveValidatorCount minus any validators that have exited.
|
||||
// We determine actual exited count from the difference, as exits may be submitted but
|
||||
// not yet processed, or affected by churn limits.
|
||||
receivedCount := uint64(len(validators.ValidatorList))
|
||||
if expectedCount != receivedCount {
|
||||
return fmt.Errorf("expected validator count to be %d, received %d", expectedCount, receivedCount)
|
||||
maxExpected := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
minExpected := maxExpected - uint64(len(ec.ExitedVals))
|
||||
|
||||
if receivedCount > maxExpected {
|
||||
return fmt.Errorf("validator count %d exceeds genesis count %d", receivedCount, maxExpected)
|
||||
}
|
||||
if receivedCount < minExpected {
|
||||
return fmt.Errorf("validator count %d is less than expected minimum %d (genesis %d - %d submitted exits)",
|
||||
receivedCount, minExpected, maxExpected, len(ec.ExitedVals))
|
||||
}
|
||||
|
||||
effBalanceLowCount := 0
|
||||
exitEpochWrongCount := 0
|
||||
withdrawEpochWrongCount := 0
|
||||
for _, item := range validators.ValidatorList {
|
||||
if ec.ExitedVals[bytesutil.ToBytes48(item.Validator.PublicKey)] {
|
||||
if _, exited := ec.ExitedVals[bytesutil.ToBytes48(item.Validator.PublicKey)]; exited {
|
||||
continue
|
||||
}
|
||||
if item.Validator.EffectiveBalance < params.BeaconConfig().MaxEffectiveBalance {
|
||||
@@ -262,6 +272,11 @@ func validatorsSyncParticipation(_ *types.EvaluationContext, conns ...*grpc.Clie
|
||||
// Skip fork slot.
|
||||
continue
|
||||
}
|
||||
// Skip slots 1-2 at genesis - validators need time to ramp up after chain start
|
||||
// due to doppelganger protection. This is a startup timing issue, not a fork transition issue.
|
||||
if b.Block().Slot() < 3 {
|
||||
continue
|
||||
}
|
||||
expectedParticipation := expectedSyncParticipation
|
||||
switch slots.ToEpoch(b.Block().Slot()) {
|
||||
case params.BeaconConfig().AltairForkEpoch:
|
||||
@@ -295,20 +310,34 @@ func validatorsSyncParticipation(_ *types.EvaluationContext, conns ...*grpc.Clie
|
||||
if b == nil || b.IsNil() {
|
||||
return errors.New("nil block provided")
|
||||
}
|
||||
forkSlot, err := slots.EpochStart(params.BeaconConfig().AltairForkEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
// Skip evaluation of fork transition slots as sync participation
|
||||
// tends to drop briefly when transitioning between forks.
|
||||
forkEpochs := []primitives.Epoch{
|
||||
params.BeaconConfig().AltairForkEpoch,
|
||||
params.BeaconConfig().BellatrixForkEpoch,
|
||||
params.BeaconConfig().CapellaForkEpoch,
|
||||
params.BeaconConfig().DenebForkEpoch,
|
||||
params.BeaconConfig().ElectraForkEpoch,
|
||||
params.BeaconConfig().FuluForkEpoch,
|
||||
}
|
||||
nexForkSlot, err := slots.EpochStart(params.BeaconConfig().BellatrixForkEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
skipSlot := false
|
||||
for _, forkEpoch := range forkEpochs {
|
||||
// Skip fork epochs set to far future (not scheduled).
|
||||
if forkEpoch == params.BeaconConfig().FarFutureEpoch {
|
||||
continue
|
||||
}
|
||||
forkSlot, err := slots.EpochStart(forkEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip the first two slots of each fork epoch.
|
||||
if b.Block().Slot() == forkSlot || b.Block().Slot() == forkSlot+1 {
|
||||
skipSlot = true
|
||||
break
|
||||
}
|
||||
}
|
||||
switch b.Block().Slot() {
|
||||
case forkSlot, forkSlot + 1, nexForkSlot:
|
||||
// Skip evaluation of the slot.
|
||||
if skipSlot {
|
||||
continue
|
||||
default:
|
||||
// no-op
|
||||
}
|
||||
syncAgg, err := b.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
@@ -353,6 +382,14 @@ func syncCompatibleBlockFromCtr(container *ethpb.BeaconBlockContainer) (interfac
|
||||
if container.GetBlindedElectraBlock() != nil {
|
||||
return blocks.NewSignedBeaconBlock(container.GetBlindedElectraBlock())
|
||||
}
|
||||
|
||||
if container.GetFuluBlock() != nil {
|
||||
return blocks.NewSignedBeaconBlock(container.GetFuluBlock())
|
||||
}
|
||||
|
||||
if container.GetBlindedFuluBlock() != nil {
|
||||
return blocks.NewSignedBeaconBlock(container.GetBlindedFuluBlock())
|
||||
}
|
||||
return nil, errors.New("no supported block type in container")
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
// Run mainnet e2e config with the current release validator against latest beacon node.
|
||||
func TestEndToEnd_MainnetConfig_ValidatorAtCurrentRelease(t *testing.T) {
|
||||
r := e2eMainnet(t, true, false, types.InitForkCfg(version.Bellatrix, version.Electra, params.E2EMainnetTestConfig()))
|
||||
r := e2eMainnet(t, true, false, types.InitForkCfg(version.Bellatrix, version.Fulu, params.E2EMainnetTestConfig()))
|
||||
r.run()
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,38 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/testing/endtoend/types"
|
||||
)
|
||||
|
||||
// TestEndToEnd_MinimalConfig is the pre-submit e2e test from Electra to Fulu
|
||||
// with compressed epochs. It runs 10 epochs with exit at epoch 4 (the earliest
|
||||
// possible due to ShardCommitteePeriod=4), allowing all evaluators to complete:
|
||||
// - Participation at epoch 2
|
||||
// - Finalization at epoch 3
|
||||
// - Fulu fork transition at epoch 2
|
||||
// - BPO 1 at epoch 3 (15 blobs)
|
||||
// - BPO 2 at epoch 4 (21 blobs)
|
||||
// - Exit proposed at epoch 4
|
||||
// - Exit confirmed at epoch 5
|
||||
// - Withdrawal submitted at epoch 5
|
||||
// - Withdrawal verified at epoch 8 (exit epoch 4 + 1 + MaxSeedLookahead + MinValidatorWithdrawabilityDelay + 1)
|
||||
func TestEndToEnd_MinimalConfig(t *testing.T) {
|
||||
r := e2eMinimal(t, types.InitForkCfg(version.Bellatrix, version.Electra, params.E2ETestConfig()), types.WithCheckpointSync())
|
||||
cfg := params.E2ETestConfig()
|
||||
cfg = types.InitForkCfg(version.Electra, version.Fulu, cfg)
|
||||
// Set Fulu fork at epoch 2 for a quick fork transition test
|
||||
cfg.FuluForkEpoch = 2
|
||||
// Update BlobSchedule to use the new FuluForkEpoch for BPO testing
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: cfg.DenebForkEpoch, MaxBlobsPerBlock: uint64(cfg.DeprecatedMaxBlobsPerBlock)},
|
||||
{Epoch: cfg.ElectraForkEpoch, MaxBlobsPerBlock: uint64(cfg.DeprecatedMaxBlobsPerBlockElectra)},
|
||||
// BPO (Blob Parameter Optimization) schedule for Fulu
|
||||
{Epoch: cfg.FuluForkEpoch + 1, MaxBlobsPerBlock: 15},
|
||||
{Epoch: cfg.FuluForkEpoch + 2, MaxBlobsPerBlock: 21},
|
||||
}
|
||||
cfg.InitializeForkSchedule()
|
||||
|
||||
r := e2eMinimal(t, cfg,
|
||||
types.WithCheckpointSync(),
|
||||
types.WithEpochs(10),
|
||||
types.WithExitEpoch(4), // Minimum due to ShardCommitteePeriod=4
|
||||
types.WithLargeBlobs(), // Use large blob transactions for BPO testing
|
||||
)
|
||||
r.run()
|
||||
}
|
||||
17
testing/endtoend/minimal_postmerge_e2e_test.go
Normal file
17
testing/endtoend/minimal_postmerge_e2e_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package endtoend
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/endtoend/types"
|
||||
)
|
||||
|
||||
// TestEndToEnd_MinimalConfig_PostMerge is a post-submit test that runs the full
|
||||
// e2e test from Bellatrix (post-merge) through all fork transitions to the latest fork.
|
||||
// This test takes longer but provides comprehensive coverage of all forks.
|
||||
func TestEndToEnd_MinimalConfig_PostMerge(t *testing.T) {
|
||||
r := e2eMinimal(t, types.InitForkCfg(version.Bellatrix, version.Fulu, params.E2ETestConfig()), types.WithCheckpointSync())
|
||||
r.run()
|
||||
}
|
||||
@@ -59,6 +59,15 @@ func WithBuilder() E2EConfigOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// WithLargeBlobs configures the transaction generator to use large blob
|
||||
// transactions (6 blobs per tx) for testing BPO limits. Without this option,
|
||||
// small blob transactions (1 blob per tx) are used by default.
|
||||
func WithLargeBlobs() E2EConfigOpt {
|
||||
return func(cfg *E2EConfig) {
|
||||
cfg.UseLargeBlobs = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithSSZOnly() E2EConfigOpt {
|
||||
return func(cfg *E2EConfig) {
|
||||
if err := os.Setenv(params.EnvNameOverrideAccept, api.OctetStreamMediaType); err != nil {
|
||||
@@ -67,6 +76,14 @@ func WithSSZOnly() E2EConfigOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// WithExitEpoch sets a custom epoch for voluntary exit submission.
|
||||
// This affects ProposeVoluntaryExit, ValidatorsHaveExited, SubmitWithdrawal, and ValidatorsHaveWithdrawn evaluators.
|
||||
func WithExitEpoch(e primitives.Epoch) E2EConfigOpt {
|
||||
return func(cfg *E2EConfig) {
|
||||
cfg.ExitEpoch = e
|
||||
}
|
||||
}
|
||||
|
||||
// E2EConfig defines the struct for all configurations needed for E2E testing.
|
||||
type E2EConfig struct {
|
||||
TestCheckpointSync bool
|
||||
@@ -81,7 +98,9 @@ type E2EConfig struct {
|
||||
UseValidatorCrossClient bool
|
||||
UseBeaconRestApi bool
|
||||
UseBuilder bool
|
||||
UseLargeBlobs bool // Use large blob transactions (6 blobs per tx) for BPO testing
|
||||
EpochsToRun uint64
|
||||
ExitEpoch primitives.Epoch // Custom epoch for voluntary exit submission (0 means use default)
|
||||
Seed int64
|
||||
TracingSinkEndpoint string
|
||||
Evaluators []Evaluator
|
||||
@@ -95,6 +114,9 @@ type E2EConfig struct {
|
||||
func GenesisFork() int {
|
||||
cfg := params.BeaconConfig()
|
||||
// Check from highest fork to lowest to find the genesis fork.
|
||||
if cfg.FuluForkEpoch == 0 {
|
||||
return version.Fulu
|
||||
}
|
||||
if cfg.ElectraForkEpoch == 0 {
|
||||
return version.Electra
|
||||
}
|
||||
@@ -146,16 +168,21 @@ type DepositBalancer interface {
|
||||
// EvaluationContext allows for additional data to be provided to evaluators that need extra state.
|
||||
type EvaluationContext struct {
|
||||
DepositBalancer
|
||||
ExitedVals map[[48]byte]bool
|
||||
// ExitedVals maps validator pubkey to the epoch when their exit was submitted.
|
||||
// The actual exit takes effect at: submission_epoch + 1 + MaxSeedLookahead
|
||||
ExitedVals map[[48]byte]primitives.Epoch
|
||||
SeenVotes map[primitives.Slot][]byte
|
||||
ExpectedEth1DataVote []byte
|
||||
// Eth1DataMismatchCount tracks how many eth1data vote mismatches have been seen
|
||||
// in the current voting period. Some tolerance is allowed for timing differences.
|
||||
Eth1DataMismatchCount int
|
||||
}
|
||||
|
||||
// NewEvaluationContext handles initializing internal datastructures (like maps) provided by the EvaluationContext.
|
||||
func NewEvaluationContext(d DepositBalancer) *EvaluationContext {
|
||||
return &EvaluationContext{
|
||||
DepositBalancer: d,
|
||||
ExitedVals: make(map[[48]byte]bool),
|
||||
ExitedVals: make(map[[48]byte]primitives.Epoch),
|
||||
SeenVotes: make(map[primitives.Slot][]byte),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -692,14 +692,25 @@ func (p *Builder) handleHeaderRequestElectra(w http.ResponseWriter) {
|
||||
}
|
||||
|
||||
func (p *Builder) handleBlindedBlock(w http.ResponseWriter, req *http.Request) {
|
||||
// TODO update for fork specific
|
||||
sb := &builderAPI.SignedBlindedBeaconBlockBellatrix{
|
||||
SignedBlindedBeaconBlockBellatrix: ð.SignedBlindedBeaconBlockBellatrix{},
|
||||
// Decode blinded block based on the current fork version.
|
||||
// The beacon node sends JSON using api/server/structs types.
|
||||
var err error
|
||||
switch p.currVersion {
|
||||
case version.Electra:
|
||||
var sb structs.SignedBlindedBeaconBlockElectra
|
||||
err = json.NewDecoder(req.Body).Decode(&sb)
|
||||
case version.Deneb:
|
||||
var sb structs.SignedBlindedBeaconBlockDeneb
|
||||
err = json.NewDecoder(req.Body).Decode(&sb)
|
||||
case version.Capella:
|
||||
var sb structs.SignedBlindedBeaconBlockCapella
|
||||
err = json.NewDecoder(req.Body).Decode(&sb)
|
||||
default:
|
||||
var sb structs.SignedBlindedBeaconBlockBellatrix
|
||||
err = json.NewDecoder(req.Body).Decode(&sb)
|
||||
}
|
||||
err := json.NewDecoder(req.Body).Decode(sb)
|
||||
if err != nil {
|
||||
p.cfg.logger.WithError(err).Error("Could not decode blinded block")
|
||||
// TODO: Allow the method to unmarshal blinded blocks correctly
|
||||
p.cfg.logger.WithError(err).WithField("version", version.String(p.currVersion)).Error("Could not decode blinded block")
|
||||
}
|
||||
if p.currPayload == nil {
|
||||
p.cfg.logger.Error("No payload is cached")
|
||||
|
||||
@@ -44,6 +44,13 @@ func WithProposerSigning(idx primitives.ValidatorIndex, sk bls.SecretKey, valRoo
|
||||
}
|
||||
}
|
||||
|
||||
// WithProposer sets the proposer index for the generated block without signing.
|
||||
func WithProposer(idx primitives.ValidatorIndex) DenebBlockGeneratorOption {
|
||||
return func(g *denebBlockGenerator) {
|
||||
g.proposer = idx
|
||||
}
|
||||
}
|
||||
|
||||
func WithPayloadSetter(p *enginev1.ExecutionPayloadDeneb) DenebBlockGeneratorOption {
|
||||
return func(g *denebBlockGenerator) {
|
||||
g.payload = p
|
||||
|
||||
171
third_party/com_github_consensys_gnark_crypto.patch
vendored
Normal file
171
third_party/com_github_consensys_gnark_crypto.patch
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
diff --git a/ecc/bls12-381/fp/BUILD.bazel b/ecc/bls12-381/fp/BUILD.bazel
|
||||
index 4d57841..5b1dec4 100644
|
||||
--- a/ecc/bls12-381/fp/BUILD.bazel
|
||||
+++ b/ecc/bls12-381/fp/BUILD.bazel
|
||||
@@ -7,15 +7,21 @@ go_library(
|
||||
"arith.go",
|
||||
"doc.go",
|
||||
"element.go",
|
||||
- "element_amd64.go",
|
||||
- "element_amd64.s",
|
||||
- "element_arm64.go",
|
||||
- "element_arm64.s",
|
||||
"element_exp.go",
|
||||
"element_purego.go",
|
||||
"vector.go",
|
||||
"vector_purego.go",
|
||||
- ],
|
||||
+ ] + select({
|
||||
+ "@io_bazel_rules_go//go/platform:amd64": [
|
||||
+ "element_amd64.go",
|
||||
+ "//field/asm/element_6w:element_6w_amd64"
|
||||
+ ],
|
||||
+ "@io_bazel_rules_go//go/platform:arm64": [
|
||||
+ "element_arm64.go",
|
||||
+ "//field/asm/element_6w:element_6w_arm64"
|
||||
+ ],
|
||||
+ "//conditions:default": [],
|
||||
+ }),
|
||||
importpath = "github.com/consensys/gnark-crypto/ecc/bls12-381/fp",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
diff --git a/ecc/bls12-381/fr/BUILD.bazel b/ecc/bls12-381/fr/BUILD.bazel
|
||||
index e24b29b..6beeb04 100644
|
||||
--- a/ecc/bls12-381/fr/BUILD.bazel
|
||||
+++ b/ecc/bls12-381/fr/BUILD.bazel
|
||||
@@ -7,17 +7,24 @@ go_library(
|
||||
"arith.go",
|
||||
"doc.go",
|
||||
"element.go",
|
||||
- "element_amd64.go",
|
||||
- "element_amd64.s",
|
||||
- "element_arm64.go",
|
||||
- "element_arm64.s",
|
||||
"element_exp.go",
|
||||
"element_purego.go",
|
||||
"generator.go",
|
||||
"vector.go",
|
||||
- "vector_amd64.go",
|
||||
"vector_purego.go",
|
||||
- ],
|
||||
+ ] + select({
|
||||
+ "@io_bazel_rules_go//go/platform:amd64": [
|
||||
+ "element_amd64.go",
|
||||
+ "//field/asm/element_4w:element_4w_amd64",
|
||||
+ "vector_amd64.go",
|
||||
+ ],
|
||||
+ "@io_bazel_rules_go//go/platform:arm64": [
|
||||
+ "element_arm64.go",
|
||||
+ "//field/asm/element_4w:element_4w_arm64"
|
||||
+ ],
|
||||
+ "//conditions:default": [],
|
||||
+ }),
|
||||
+
|
||||
importpath = "github.com/consensys/gnark-crypto/ecc/bls12-381/fr",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
diff --git a/field/asm/element_4w/BUILD.bazel b/field/asm/element_4w/BUILD.bazel
|
||||
index ef0882d..975e6f9 100644
|
||||
--- a/field/asm/element_4w/BUILD.bazel
|
||||
+++ b/field/asm/element_4w/BUILD.bazel
|
||||
@@ -16,3 +16,15 @@ alias(
|
||||
actual = ":element_4w",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
+
|
||||
+filegroup(
|
||||
+ name = "element_4w_amd64",
|
||||
+ srcs = ["element_4w_amd64.s"],
|
||||
+ visibility = ["//visibility:public"],
|
||||
+)
|
||||
+
|
||||
+filegroup(
|
||||
+ name = "element_4w_arm64",
|
||||
+ srcs = ["element_4w_arm64.s"],
|
||||
+ visibility = ["//visibility:public"],
|
||||
+)
|
||||
diff --git a/field/asm/element_6w/BUILD.bazel b/field/asm/element_6w/BUILD.bazel
|
||||
index fbbe81c..fd99a62 100644
|
||||
--- a/field/asm/element_6w/BUILD.bazel
|
||||
+++ b/field/asm/element_6w/BUILD.bazel
|
||||
@@ -16,3 +16,15 @@ alias(
|
||||
actual = ":element_6w",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
+
|
||||
+filegroup(
|
||||
+ name = "element_6w_amd64",
|
||||
+ srcs = ["element_6w_amd64.s"],
|
||||
+ visibility = ["//visibility:public"],
|
||||
+)
|
||||
+
|
||||
+filegroup(
|
||||
+ name = "element_6w_arm64",
|
||||
+ srcs = ["element_6w_arm64.s"],
|
||||
+ visibility = ["//visibility:public"],
|
||||
+)
|
||||
diff -urN gnark-patch-a/ecc/bn254/fp/BUILD.bazel gnark-patch-b/ecc/bn254/fp/BUILD.bazel
|
||||
--- gnark-patch-a/ecc/bn254/fp/BUILD.bazel 2025-11-19 13:10:39
|
||||
+++ gnark-patch-b/ecc/bn254/fp/BUILD.bazel 2025-11-19 13:08:50
|
||||
@@ -7,16 +7,22 @@
|
||||
"arith.go",
|
||||
"doc.go",
|
||||
"element.go",
|
||||
- "element_amd64.go",
|
||||
- "element_amd64.s",
|
||||
- "element_arm64.go",
|
||||
- "element_arm64.s",
|
||||
"element_exp.go",
|
||||
"element_purego.go",
|
||||
"vector.go",
|
||||
- "vector_amd64.go",
|
||||
"vector_purego.go",
|
||||
- ],
|
||||
+ ] + select({
|
||||
+ "@io_bazel_rules_go//go/platform:amd64": [
|
||||
+ "element_amd64.go",
|
||||
+ "//field/asm/element_4w:element_4w_amd64",
|
||||
+ "vector_amd64.go",
|
||||
+ ],
|
||||
+ "@io_bazel_rules_go//go/platform:arm64": [
|
||||
+ "element_arm64.go",
|
||||
+ "//field/asm/element_4w:element_4w_arm64",
|
||||
+ ],
|
||||
+ "//conditions:default": [],
|
||||
+ }),
|
||||
importpath = "github.com/consensys/gnark-crypto/ecc/bn254/fp",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
diff -urN gnark-patch-a/ecc/bn254/fr/BUILD.bazel gnark-patch-b/ecc/bn254/fr/BUILD.bazel
|
||||
--- gnark-patch-a/ecc/bn254/fr/BUILD.bazel 2025-11-19 13:10:39
|
||||
+++ gnark-patch-b/ecc/bn254/fr/BUILD.bazel 2025-11-19 13:08:55
|
||||
@@ -7,17 +7,23 @@
|
||||
"arith.go",
|
||||
"doc.go",
|
||||
"element.go",
|
||||
- "element_amd64.go",
|
||||
- "element_amd64.s",
|
||||
- "element_arm64.go",
|
||||
- "element_arm64.s",
|
||||
"element_exp.go",
|
||||
"element_purego.go",
|
||||
"generator.go",
|
||||
"vector.go",
|
||||
- "vector_amd64.go",
|
||||
"vector_purego.go",
|
||||
- ],
|
||||
+ ] + select({
|
||||
+ "@io_bazel_rules_go//go/platform:amd64": [
|
||||
+ "element_amd64.go",
|
||||
+ "//field/asm/element_4w:element_4w_amd64",
|
||||
+ "vector_amd64.go",
|
||||
+ ],
|
||||
+ "@io_bazel_rules_go//go/platform:arm64": [
|
||||
+ "element_arm64.go",
|
||||
+ "//field/asm/element_4w:element_4w_arm64",
|
||||
+ ],
|
||||
+ "//conditions:default": [],
|
||||
+ }),
|
||||
importpath = "github.com/consensys/gnark-crypto/ecc/bn254/fr",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
@@ -3,7 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["analyzer.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/tools/analyzers/httperror",
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/tools/analyzers/httpwriter",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@org_golang_x_tools//go/analysis:go_default_library",
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user