mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
3 Commits
d869754e2e
...
pulltip
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9692993b6a | ||
|
|
2ac30f5ce6 | ||
|
|
7418c00ad6 |
@@ -88,9 +88,6 @@ func (s *Store) pullTips(state state.BeaconState, node *Node, jc, fc *ethpb.Chec
|
||||
}
|
||||
}
|
||||
if uf.Epoch > s.unrealizedFinalizedCheckpoint.Epoch {
|
||||
s.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{
|
||||
Epoch: uj.Epoch, Root: bytesutil.ToBytes32(uj.Root),
|
||||
}
|
||||
s.unrealizedFinalizedCheckpoint = &forkchoicetypes.Checkpoint{
|
||||
Epoch: uf.Epoch, Root: bytesutil.ToBytes32(uf.Root),
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -243,8 +244,10 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
|
||||
// Compute missing indices by root, excluding those already in storage.
|
||||
var lastRoot [fieldparams.RootLength]byte
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(incompleteRoots))
|
||||
for root := range incompleteRoots {
|
||||
lastRoot = root
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
|
||||
missingIndices := make(map[uint64]bool, len(requestedIndices))
|
||||
@@ -259,6 +262,7 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
initialMissingRootCount := len(missingIndicesByRoot)
|
||||
initialMissingCount := computeTotalCount(missingIndicesByRoot)
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(params.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
@@ -301,11 +305,19 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
}).Debug("Requested direct data column sidecars from peers")
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingRootCount": initialMissingRootCount,
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingRootCount": len(missingIndicesByRoot),
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
})
|
||||
|
||||
if initialMissingRootCount == 1 {
|
||||
log = log.WithField("root", fmt.Sprintf("%#x", lastRoot))
|
||||
}
|
||||
|
||||
log.Debug("Requested direct data column sidecars from peers")
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
@@ -3,9 +3,9 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
@@ -21,13 +21,23 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var pendingAttsLimit = 32768
|
||||
const pendingAttsLimit = 32768
|
||||
|
||||
// aggregatorIndexFilter defines how aggregator index should be handled in equality checks.
|
||||
type aggregatorIndexFilter int
|
||||
|
||||
const (
|
||||
// ignoreAggregatorIndex means aggregates differing only by aggregator index are considered equal.
|
||||
ignoreAggregatorIndex aggregatorIndexFilter = iota
|
||||
// includeAggregatorIndex means aggregator index must also match for aggregates to be considered equal.
|
||||
includeAggregatorIndex
|
||||
)
|
||||
|
||||
// This method processes pending attestations as a "known" block as arrived. With validations,
|
||||
// the valid attestations get saved into the operation mem pool, and the invalid attestations gets deleted
|
||||
@@ -50,16 +60,7 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
attestations := s.blkRootToPendingAtts[bRoot]
|
||||
s.pendingAttsLock.RUnlock()
|
||||
|
||||
if len(attestations) > 0 {
|
||||
start := time.Now()
|
||||
s.processAttestations(ctx, attestations)
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
"pendingAttsCount": len(attestations),
|
||||
"duration": duration,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
s.processAttestations(ctx, attestations)
|
||||
|
||||
randGen := rand.NewGenerator()
|
||||
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
|
||||
@@ -79,26 +80,71 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
|
||||
}
|
||||
|
||||
// processAttestations processes a list of attestations.
|
||||
// It assumes (for logging purposes only) that all attestations pertain to the same block.
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []any) {
|
||||
if len(attestations) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
firstAttestation := attestations[0]
|
||||
var blockRoot []byte
|
||||
switch v := firstAttestation.(type) {
|
||||
case ethpb.Att:
|
||||
blockRoot = v.GetData().BeaconBlockRoot
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
blockRoot = v.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping processing", v)
|
||||
return
|
||||
}
|
||||
|
||||
validAggregates := make([]ethpb.SignedAggregateAttAndProof, 0, len(attestations))
|
||||
startAggregate := time.Now()
|
||||
atts := make([]ethpb.Att, 0, len(attestations))
|
||||
aggregateAttAndProofCount := 0
|
||||
for _, att := range attestations {
|
||||
switch v := att.(type) {
|
||||
case ethpb.Att:
|
||||
atts = append(atts, v)
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
s.processAggregate(ctx, v)
|
||||
aggregateAttAndProofCount++
|
||||
// Avoid processing multiple aggregates only differing by aggregator index.
|
||||
if slices.ContainsFunc(validAggregates, func(other ethpb.SignedAggregateAttAndProof) bool {
|
||||
return pendingAggregatesAreEqual(v, other, ignoreAggregatorIndex)
|
||||
}) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.processAggregate(ctx, v); err != nil {
|
||||
log.WithError(err).Debug("Pending aggregate attestation could not be processed")
|
||||
continue
|
||||
}
|
||||
|
||||
validAggregates = append(validAggregates, v)
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping", v)
|
||||
}
|
||||
}
|
||||
durationAggregateAttAndProof := time.Since(startAggregate)
|
||||
|
||||
startAtts := time.Now()
|
||||
for _, bucket := range bucketAttestationsByData(atts) {
|
||||
s.processAttestationBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
durationAtts := time.Since(startAtts)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
"totalCount": len(attestations),
|
||||
"aggregateAttAndProofCount": aggregateAttAndProofCount,
|
||||
"uniqueAggregateAttAndProofCount": len(validAggregates),
|
||||
"attCount": len(atts),
|
||||
"durationTotal": durationAggregateAttAndProof + durationAtts,
|
||||
"durationAggregateAttAndProof": durationAggregateAttAndProof,
|
||||
"durationAtts": durationAtts,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
// attestationBucket groups attestations with the same AttestationData for batch processing.
|
||||
@@ -303,21 +349,20 @@ func (s *Service) processVerifiedAttestation(
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) {
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) error {
|
||||
res, err := s.validateAggregatedAtt(ctx, aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
return errors.Wrap(err, "validate aggregated att")
|
||||
}
|
||||
|
||||
if res != pubsub.ValidationAccept || !s.validateBlockInAttestation(ctx, aggregate) {
|
||||
log.Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
return errors.New("Pending aggregated attestation failed validation")
|
||||
}
|
||||
|
||||
att := aggregate.AggregateAttestationAndProof().AggregateVal()
|
||||
if err := s.saveAttestation(att); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregated attestation")
|
||||
return
|
||||
return errors.Wrap(err, "save attestation")
|
||||
}
|
||||
|
||||
_ = s.setAggregatorIndexEpochSeen(att.GetData().Target.Epoch, aggregate.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
@@ -325,6 +370,8 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
|
||||
if err := s.cfg.p2p.Broadcast(ctx, aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast aggregated attestation")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This defines how pending aggregates are saved in the map. The key is the
|
||||
@@ -336,7 +383,7 @@ func (s *Service) savePendingAggregate(agg ethpb.SignedAggregateAttAndProof) {
|
||||
|
||||
s.savePending(root, agg, func(other any) bool {
|
||||
a, ok := other.(ethpb.SignedAggregateAttAndProof)
|
||||
return ok && pendingAggregatesAreEqual(agg, a)
|
||||
return ok && pendingAggregatesAreEqual(agg, a, includeAggregatorIndex)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -391,13 +438,19 @@ func (s *Service) savePending(root [32]byte, pending any, isEqual func(other any
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], pending)
|
||||
}
|
||||
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
|
||||
// pendingAggregatesAreEqual checks if two pending aggregate attestations are equal.
|
||||
// The filter parameter controls whether aggregator index is considered in the equality check.
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof, filter aggregatorIndexFilter) bool {
|
||||
if a.Version() != b.Version() {
|
||||
return false
|
||||
}
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
|
||||
if filter == includeAggregatorIndex {
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
aAtt := a.AggregateAttestationAndProof().AggregateVal()
|
||||
bAtt := b.AggregateAttestationAndProof().AggregateVal()
|
||||
if aAtt.GetData().Slot != bAtt.GetData().Slot {
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
// Process block A (which exists and has no pending attestations)
|
||||
// This should skip processing attestations for A and request blocks B and C
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), rootA))
|
||||
require.LogsContain(t, hook, "Requesting block by root")
|
||||
require.LogsContain(t, hook, "Requesting blocks by root")
|
||||
}
|
||||
|
||||
func TestProcessPendingAtts_HasBlockSaveUnaggregatedAtt(t *testing.T) {
|
||||
@@ -911,17 +911,17 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different version", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProofElectra{Message: ðpb.AggregateAttestationAndProofElectra{AggregatorIndex: 1}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 2}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different slot", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -942,7 +942,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different committee index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -963,7 +963,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregation bits", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -984,7 +984,30 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1000},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index should be equal while ignoring aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, ignoreAggregatorIndex))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -44,11 +43,13 @@ func (s *Service) processPendingBlocksQueue() {
|
||||
if !s.chainIsStarted() {
|
||||
return
|
||||
}
|
||||
|
||||
locker.Lock()
|
||||
defer locker.Unlock()
|
||||
|
||||
if err := s.processPendingBlocks(s.ctx); err != nil {
|
||||
log.WithError(err).Debug("Could not process pending blocks")
|
||||
}
|
||||
locker.Unlock()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -73,8 +74,10 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
randGen := rand.NewGenerator()
|
||||
var parentRoots [][32]byte
|
||||
|
||||
blkRoots := make([][32]byte, 0, len(sortedSlots)*maxBlocksPerSlot)
|
||||
|
||||
// Iterate through sorted slots.
|
||||
for _, slot := range sortedSlots {
|
||||
for i, slot := range sortedSlots {
|
||||
// Skip processing if slot is in the future.
|
||||
if slot > s.cfg.clock.CurrentSlot() {
|
||||
continue
|
||||
@@ -91,6 +94,9 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
|
||||
// Process each block in the queue.
|
||||
for _, b := range blocksInCache {
|
||||
start := time.Now()
|
||||
totalDuration := time.Duration(0)
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -147,19 +153,34 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
}
|
||||
cancelFunction()
|
||||
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
blkRoots = append(blkRoots, blkRoot)
|
||||
|
||||
// Remove the processed block from the queue.
|
||||
if err := s.removeBlockFromQueue(b, blkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(logrus.Fields{"slot": slot, "blockRoot": hex.EncodeToString(bytesutil.Trunc(blkRoot[:]))}).Debug("Processed pending block and cleared it in cache")
|
||||
|
||||
duration := time.Since(start)
|
||||
totalDuration += duration
|
||||
log.WithFields(logrus.Fields{
|
||||
"slotIndex": fmt.Sprintf("%d/%d", i+1, len(sortedSlots)),
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", blkRoot),
|
||||
"duration": duration,
|
||||
"totalDuration": totalDuration,
|
||||
}).Debug("Processed pending block and cleared it in cache")
|
||||
}
|
||||
|
||||
span.End()
|
||||
}
|
||||
|
||||
for _, blkRoot := range blkRoots {
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
}
|
||||
|
||||
return s.sendBatchRootRequest(ctx, parentRoots, randGen)
|
||||
}
|
||||
|
||||
@@ -379,6 +400,19 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
||||
req = roots[:maxReqBlock]
|
||||
}
|
||||
|
||||
if logrus.GetLevel() >= logrus.DebugLevel {
|
||||
rootsStr := make([]string, 0, len(roots))
|
||||
for _, req := range roots {
|
||||
rootsStr = append(rootsStr, fmt.Sprintf("%#x", req))
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"count": len(req),
|
||||
"roots": rootsStr,
|
||||
}).Debug("Requesting blocks by root")
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -438,8 +472,6 @@ func (s *Service) filterOutPendingAndSynced(roots [][fieldparams.RootLength]byte
|
||||
roots = append(roots[:i], roots[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
|
||||
}
|
||||
return roots
|
||||
}
|
||||
|
||||
@@ -51,14 +51,12 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
// Decode the message, reject if it fails.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Reject messages that are not of the expected type.
|
||||
dcsc, ok := m.(*eth.DataColumnSidecar)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *eth.DataColumnSidecar")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
|
||||
@@ -361,7 +361,7 @@ func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.
|
||||
}
|
||||
|
||||
if !dv.fc.HasNode(parentRoot) {
|
||||
return columnErrBuilder(errSidecarParentNotSeen)
|
||||
return columnErrBuilder(errors.Wrapf(errSidecarParentNotSeen, "parent root: %#x", parentRoot))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
3
changelog/manu-agg.md
Normal file
3
changelog/manu-agg.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them.
|
||||
2
changelog/manu-remove-error-logs.md
Normal file
2
changelog/manu-remove-error-logs.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- `validateDataColumn`: Remove error logs.
|
||||
3
changelog/satushh-pulltip.md
Normal file
3
changelog/satushh-pulltip.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Removed redundant justified checkpoint update in pullTips.
|
||||
Reference in New Issue
Block a user