Compare commits

..

20 Commits

Author SHA1 Message Date
Raul Jordan
0cd34f061b Merge branch 'develop' into reorg-mon 2024-03-21 09:55:39 -05:00
Preston Van Loon
cfb2b6008e Merge branch 'develop' into reorg-mon 2024-03-12 15:10:54 -05:00
Preston Van Loon
5421d8bdff Merge branch 'develop' into reorg-mon 2024-03-08 10:38:14 -06:00
Raul Jordan
82c49d1d73 Update tools/reorg-monitor/main.go 2024-03-06 15:26:54 -06:00
Raul Jordan
48f992020a Merge branch 'develop' into reorg-mon 2024-03-06 15:26:25 -06:00
Preston Van Loon
ab8966bb86 Fix proto link conflict. cc https://github.com/bazelbuild/rules_go/issues/3423 2024-03-06 12:42:28 -06:00
Raul Jordan
60269719f9 edit 2024-03-06 11:57:17 -06:00
Raul Jordan
64921b77fe Merge branch 'reorg-mon' of github.com:prysmaticlabs/prysm into reorg-mon 2024-03-06 10:50:03 -06:00
Raul Jordan
e04ddedffa order 2024-03-06 10:49:50 -06:00
Raul Jordan
c0197da846 Merge branch 'develop' into reorg-mon 2024-03-06 10:48:03 -06:00
Raul Jordan
8b80de294c gaz 2024-03-06 10:47:53 -06:00
Raul Jordan
d9da431b6e comment 2024-03-06 10:45:58 -06:00
Raul Jordan
809712e0a7 lint 2024-03-06 10:43:33 -06:00
Raul Jordan
c1a5144a76 Merge branch 'reorg-mon' of github.com:prysmaticlabs/prysm into reorg-mon 2024-03-06 10:32:31 -06:00
Raul Jordan
21680bd1f8 tidy 2024-03-06 10:32:19 -06:00
Raul Jordan
4563501b1a Merge branch 'develop' into reorg-mon 2024-03-06 10:30:50 -06:00
Raul Jordan
2aaef27ebe sse add 2024-03-06 10:30:34 -06:00
Raul Jordan
4643cb357b reorg monitor inclusion 2024-03-06 10:27:33 -06:00
Raul Jordan
913619e396 add build file 2024-03-06 10:26:03 -06:00
Raul Jordan
1df39f310c add in reorg monitor with sse client 2024-03-06 10:23:04 -06:00
219 changed files with 3159 additions and 5593 deletions

3
.gitignore vendored
View File

@@ -41,6 +41,3 @@ jwt.hex
# manual testing
tmp
# spectest coverage reports
report.txt

View File

@@ -130,9 +130,9 @@ aspect_bazel_lib_register_toolchains()
http_archive(
name = "rules_oci",
sha256 = "4a276e9566c03491649eef63f27c2816cc222f41ccdebd97d2c5159e84917c3b",
strip_prefix = "rules_oci-1.7.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.7.4/rules_oci-v1.7.4.tar.gz",
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
strip_prefix = "rules_oci-1.3.4",
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
)
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
@@ -342,6 +342,22 @@ filegroup(
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
)
http_archive(
name = "goerli_testnet",
build_file_content = """
filegroup(
name = "configs",
srcs = [
"prater/config.yaml",
],
visibility = ["//visibility:public"],
)
""",
sha256 = "43fc0f55ddff7b511713e2de07aa22846a67432df997296fb4fc09cd8ed1dcdb",
strip_prefix = "goerli-6522ac6684693740cd4ddcc2a0662e03702aa4a1",
url = "https://github.com/eth-clients/goerli/archive/6522ac6684693740cd4ddcc2a0662e03702aa4a1.tar.gz",
)
http_archive(
name = "holesky_testnet",
build_file_content = """

View File

@@ -88,7 +88,7 @@ func TestToggle(t *testing.T) {
}
}
func TestToggleMultipleTimes(t *testing.T) {
func TestToogleMultipleTimes(t *testing.T) {
t.Parallel()
v := New()
@@ -101,16 +101,16 @@ func TestToggleMultipleTimes(t *testing.T) {
expected := i%2 != 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toggle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
t.Fatalf("AtomicBool.Toogle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
}
if pre == v.IsSet() {
t.Fatalf("AtomicBool.Toggle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
t.Fatalf("AtomicBool.Toogle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
}
}
}
func TestToggleAfterOverflow(t *testing.T) {
func TestToogleAfterOverflow(t *testing.T) {
t.Parallel()
var value int32 = math.MaxInt32
@@ -122,7 +122,7 @@ func TestToggleAfterOverflow(t *testing.T) {
v.Toggle()
expected := math.MaxInt32%2 == 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toggle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toogle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
}
// make sure overflow happened
@@ -135,7 +135,7 @@ func TestToggleAfterOverflow(t *testing.T) {
v.Toggle()
expected = !expected
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toggle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toogle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
}
}

View File

@@ -20,7 +20,6 @@ package event
import (
"errors"
"reflect"
"slices"
"sync"
)
@@ -220,9 +219,12 @@ type caseList []reflect.SelectCase
// find returns the index of a case containing the given channel.
func (cs caseList) find(channel interface{}) int {
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
return selectCase.Chan.Interface() == channel
})
for i, cas := range cs {
if cas.Chan.Interface() == channel {
return i
}
}
return -1
}
// delete removes the given case from cs.

View File

@@ -63,7 +63,7 @@ func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, err
return results, nil
}
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelization.
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelisation.
func calculateChunkSize(items int) int {
// Start with a simple even split
chunkSize := items / runtime.GOMAXPROCS(0)

View File

@@ -2,7 +2,7 @@
# This script serves as a wrapper around bazel to limit the scope of environment variables that
# may change the action output. Using this script should result in a higher cache hit ratio for
# cached actions with a more hermetic build.
# cached actions with a more heremtic build.
env -i \
PATH=/usr/bin:/bin \

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -398,6 +399,14 @@ func (s *Service) InForkchoice(root [32]byte) bool {
return s.cfg.ForkChoiceStore.HasNode(root)
}
// IsViableForCheckpoint returns whether the given checkpoint is a checkpoint in any
// chain known to forkchoice
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
}
// IsOptimisticForRoot takes the root as argument instead of the current head
// and returns true if it is optimistic.
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {

View File

@@ -61,7 +61,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
require.NoError(t, err)
// NextSyncCommittee should be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
require.NotEqual(t, 0, len(indices))
}

View File

@@ -586,7 +586,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
s.blobNotifiers.delete(root)
return nil
case <-ctx.Done():
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
}
}
}
@@ -594,7 +594,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
return logrus.Fields{
"slot": slot,
"root": fmt.Sprintf("%#x", root),
"root": root,
"blobsExpected": expected,
"blobsWaiting": missing,
}

View File

@@ -60,7 +60,7 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
// logNonCanonicalBlockReceived prints a message informing that the received
// block is not the head of the chain. It requires the caller holds a lock on
// Forkchoice.
// Foprkchoice.
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
if err != nil {

View File

@@ -170,7 +170,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// Send finalized events and finalized deposits in the background
if newFinalized {
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
go s.sendNewFinalizedEvent(ctx, postState)
go s.sendNewFinalizedEvent(blockCopy, postState)
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
go func() {
s.insertFinalizedDeposits(depCtx, finalized.Root)
@@ -443,7 +443,7 @@ func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postS
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
// event feed. It needs to be called on the background
func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.BeaconState) {
func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState) {
isValidPayload := false
s.headLock.RLock()
if s.head != nil {
@@ -451,17 +451,8 @@ func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.Bea
}
s.headLock.RUnlock()
blk, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root))
if err != nil {
log.WithError(err).Error("Could not retrieve block for finalized checkpoint root. Finalized event will not be emitted")
return
}
if blk == nil || blk.IsNil() || blk.Block() == nil || blk.Block().IsNil() {
log.WithError(err).Error("Block retrieved for finalized checkpoint root is nil. Finalized event will not be emitted")
return
}
stateRoot := blk.Block().StateRoot()
// Send an event regarding the new finalized checkpoint over a common event feed.
stateRoot := signed.Block().StateRoot()
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.FinalizedCheckpoint,
Data: &ethpbv1.EventFinalizedCheckpoint{

View File

@@ -8,14 +8,12 @@ import (
blockchainTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -380,38 +378,3 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
require.Equal(t, false, pool.ValidatorExists(idx))
})
}
func Test_sendNewFinalizedEvent(t *testing.T) {
s, _ := minimalTestService(t)
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
s.cfg.StateNotifier = notifier
finalizedSt, err := util.NewBeaconState()
require.NoError(t, err)
finalizedStRoot, err := finalizedSt.HashTreeRoot(s.ctx)
require.NoError(t, err)
b := util.NewBeaconBlock()
b.Block.StateRoot = finalizedStRoot[:]
sbb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
sbbRoot, err := sbb.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, s.cfg.BeaconDB.SaveBlock(s.ctx, sbb))
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetFinalizedCheckpoint(&ethpb.Checkpoint{
Epoch: 123,
Root: sbbRoot[:],
}))
s.sendNewFinalizedEvent(s.ctx, st)
require.Equal(t, 1, len(notifier.ReceivedEvents()))
e := notifier.ReceivedEvents()[0]
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
require.Equal(t, true, ok, "event has wrong data type")
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
assert.DeepEqual(t, sbbRoot[:], fc.Block)
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
assert.Equal(t, false, fc.ExecutionOptimistic)
}

View File

@@ -290,10 +290,18 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
if params.BeaconConfig().ConfigName != params.PraterName {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
} else {
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
}
}
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")

View File

@@ -804,7 +804,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
assert.NoError(t, err)
// Perform this in a nonsensical ordering
// Perform this in a non-sensical ordering
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 10, [32]byte{}, 0))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3, [32]byte{}, 0))

View File

@@ -784,7 +784,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
assert.NoError(t, err)
// Perform this in a nonsensical ordering
// Perform this in a non-sensical ordering
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)

View File

@@ -12,6 +12,7 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
)
var (
@@ -132,6 +133,9 @@ func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx primitives.
//
// In the attestation must be within the range of 95 to 102 in the example above.
func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
if err := slots.ValidateClock(attSlot, uint64(genesisTime.Unix())); err != nil {
return err
}
attTime, err := slots.ToTime(uint64(genesisTime.Unix()), attSlot)
if err != nil {
return err
@@ -178,15 +182,24 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
}
// EIP-7045: Starting in Deneb, allow any attestations from the current or previous epoch.
currentEpoch := slots.ToEpoch(currentSlot)
if attEpoch+1 < currentEpoch {
prevEpoch, err := currentEpoch.SafeSub(1)
if err != nil {
log.WithError(err).Debug("Ignoring underflow for a deneb attestation inclusion check in epoch 0")
prevEpoch = 0
}
attSlotEpoch := slots.ToEpoch(attSlot)
if attSlotEpoch != currentEpoch && attSlotEpoch != prevEpoch {
attError = fmt.Errorf(
"attestation epoch %d not within current epoch %d or previous epoch",
attEpoch,
"attestation epoch %d not within current epoch %d or previous epoch %d",
attSlot/params.BeaconConfig().SlotsPerEpoch,
currentEpoch,
prevEpoch,
)
return errors.Join(ErrTooLate, attError)
}
return nil
}

View File

@@ -197,7 +197,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
-500 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
).Add(200 * time.Millisecond),
},
wantedErr: "attestation epoch 8 not within current epoch 15 or previous epoch",
wantedErr: "attestation epoch 8 not within current epoch 15 or previous epoch 14",
},
{
name: "attestation.slot is well beyond current slot",
@@ -205,7 +205,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
attSlot: 1 << 32,
genesisTime: prysmTime.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
},
wantedErr: "attestation slot 4294967296 not within attestation propagation range of 0 to 15 (current slot)",
wantedErr: "which exceeds max allowed value relative to the local clock",
},
}
for _, tt := range tests {

View File

@@ -22,7 +22,7 @@ var balanceCache = cache.NewEffectiveBalanceCache()
// """
// Return the combined effective balance of the ``indices``.
// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
// Math safe up to ~10B ETH, after which this overflows uint64.
// Math safe up to ~10B ETH, afterwhich this overflows uint64.
// """
// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices])))
func TotalBalance(state state.ReadOnlyValidators, indices []primitives.ValidatorIndex) uint64 {

View File

@@ -59,7 +59,7 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch,
return ComputeDomainAndSignWithoutState(st.Fork(), epoch, domain, st.GenesisValidatorsRoot(), obj, key)
}
// ComputeDomainAndSignWithoutState offers the same functionality as ComputeDomainAndSign without the need to provide a BeaconState.
// ComputeDomainAndSignWithoutState offers the same functionalit as ComputeDomainAndSign without the need to provide a BeaconState.
// This is particularly helpful for signing values in tests.
func ComputeDomainAndSignWithoutState(fork *ethpb.Fork, epoch primitives.Epoch, domain [4]byte, vr []byte, obj fssz.HashRoot, key bls.SecretKey) ([]byte, error) {
// EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits.

View File

@@ -94,15 +94,6 @@ func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current pri
entry := s.cache.ensure(key)
defer s.cache.delete(key)
root := b.Root()
sumz, err := s.store.WaitForSummarizer(ctx)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
WithError(err).
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
} else {
entry.setDiskSummary(sumz.Summary(root))
}
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
// ignore their response and decrease their peer score.

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -60,12 +59,7 @@ func (c *cache) delete(key cacheKey) {
// cacheEntry holds a fixed-length cache of BlobSidecars.
type cacheEntry struct {
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
diskSummary filesystem.BlobStorageSummary
}
func (e *cacheEntry) setDiskSummary(sum filesystem.BlobStorageSummary) {
e.diskSummary = sum
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
}
// stash adds an item to the in-memory cache of BlobSidecars.
@@ -87,17 +81,9 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
// the cache do not match those found in the block. If err is nil, then all expected
// commitments were found in the cache and the sidecar slice return value can be used
// to perform a DA check against the cached sidecars.
// filter only returns blobs that need to be checked. Blobs already available on disk will be excluded.
func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROBlob, error) {
if e.diskSummary.AllAvailable(kc.count()) {
return nil, nil
}
scs := make([]blocks.ROBlob, 0, kc.count())
scs := make([]blocks.ROBlob, kc.count())
for i := uint64(0); i < fieldparams.MaxBlobsPerBlock; i++ {
// We already have this blob, we don't need to write it or validate it.
if e.diskSummary.HasIndex(i) {
continue
}
if kc[i] == nil {
if e.scs[i] != nil {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
@@ -111,7 +97,7 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
if !bytes.Equal(kc[i], e.scs[i].KzgCommitment) {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitment, kc[i])
}
scs = append(scs, *e.scs[i])
scs[i] = *e.scs[i]
}
return scs, nil

View File

@@ -3,14 +3,9 @@ package das
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func TestCacheEnsureDelete(t *testing.T) {
@@ -28,145 +23,3 @@ func TestCacheEnsureDelete(t *testing.T) {
var nilEntry *cacheEntry
require.Equal(t, nilEntry, c.entries[k])
}
type filterTestCaseSetupFunc func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpected int) filterTestCaseSetupFunc {
return func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
blk, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, nBlobs)
commits, err := commitmentsToCheck(blk, blk.Block().Slot())
require.NoError(t, err)
entry := &cacheEntry{}
if len(onDisk) > 0 {
od := map[[32]byte][]int{blk.Root(): onDisk}
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
sum := sumz.Summary(blk.Root())
entry.setDiskSummary(sum)
}
expected := make([]blocks.ROBlob, 0, nBlobs)
for i := 0; i < commits.count(); i++ {
if entry.diskSummary.HasIndex(uint64(i)) {
continue
}
// If we aren't telling the cache a blob is on disk, add it to the expected list and stash.
expected = append(expected, blobs[i])
require.NoError(t, entry.stash(&blobs[i]))
}
require.Equal(t, numExpected, len(expected))
return entry, commits, expected
}
}
func TestFilterDiskSummary(t *testing.T) {
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
cases := []struct {
name string
setup filterTestCaseSetupFunc
}{
{
name: "full blobs, all on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{0, 1, 2, 3, 4, 5}, 0),
},
{
name: "full blobs, first on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{0}, 5),
},
{
name: "full blobs, middle on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{2}, 5),
},
{
name: "full blobs, last on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{5}, 5),
},
{
name: "full blobs, none on disk",
setup: filterTestCaseSetup(denebSlot, 6, []int{}, 6),
},
{
name: "one commitment, on disk",
setup: filterTestCaseSetup(denebSlot, 1, []int{0}, 0),
},
{
name: "one commitment, not on disk",
setup: filterTestCaseSetup(denebSlot, 1, []int{}, 1),
},
{
name: "two commitments, first on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{0}, 1),
},
{
name: "two commitments, last on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{1}, 1),
},
{
name: "two commitments, none on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{}, 2),
},
{
name: "two commitments, all on disk",
setup: filterTestCaseSetup(denebSlot, 2, []int{0, 1}, 0),
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
entry, commits, expected := c.setup(t)
// first (root) argument doesn't matter, it is just for logs
got, err := entry.filter([32]byte{}, commits)
require.NoError(t, err)
require.Equal(t, len(expected), len(got))
})
}
}
func TestFilter(t *testing.T) {
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
cases := []struct {
name string
setup func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob)
err error
}{
{
name: "commitments mismatch - extra sidecar",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
commits[5] = nil
return entry, commits, expected
},
err: errCommitmentMismatch,
},
{
name: "sidecar missing",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
entry.scs[5] = nil
return entry, commits, expected
},
err: errMissingSidecar,
},
{
name: "commitments mismatch - different bytes",
setup: func(t *testing.T) (*cacheEntry, safeCommitmentArray, []blocks.ROBlob) {
entry, commits, expected := filterTestCaseSetup(denebSlot, 6, []int{0, 1}, 4)(t)
entry.scs[5].KzgCommitment = []byte("nope")
return entry, commits, expected
},
err: errCommitmentMismatch,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
entry, commits, expected := c.setup(t)
// first (root) argument doesn't matter, it is just for logs
got, err := entry.filter([32]byte{}, commits)
if c.err != nil {
require.ErrorIs(t, err, c.err)
return
}
require.NoError(t, err)
require.Equal(t, len(expected), len(got))
})
}
}

View File

@@ -4,10 +4,9 @@ go_library(
name = "go_default_library",
srcs = [
"blob.go",
"cache.go",
"ephemeral.go",
"log.go",
"metrics.go",
"mock.go",
"pruner.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
@@ -34,7 +33,6 @@ go_test(
name = "go_default_test",
srcs = [
"blob_test.go",
"cache_test.go",
"pruner_test.go",
],
embed = [":go_default_library"],

View File

@@ -1,9 +1,7 @@
package filesystem
import (
"context"
"fmt"
"math"
"os"
"path"
"strconv"
@@ -105,29 +103,12 @@ func (bs *BlobStorage) WarmCache() {
return
}
go func() {
start := time.Now()
if err := bs.pruner.warmCache(); err != nil {
if err := bs.pruner.prune(0); err != nil {
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
}
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
}()
}
// ErrBlobStorageSummarizerUnavailable is a sentinel error returned when there is no pruner/cache available.
// This should be used by code that optionally uses the summarizer to optimize rpc requests. Being able to
// fallback when there is no summarizer allows client code to avoid test complexity where the summarizer doesn't matter.
var ErrBlobStorageSummarizerUnavailable = errors.New("BlobStorage not initialized with a pruner or cache")
// WaitForSummarizer blocks until the BlobStorageSummarizer is ready to use.
// BlobStorageSummarizer is not ready immediately on node startup because it needs to sample the blob filesystem to
// determine which blobs are available.
func (bs *BlobStorage) WaitForSummarizer(ctx context.Context) (BlobStorageSummarizer, error) {
if bs == nil || bs.pruner == nil {
return nil, ErrBlobStorageSummarizerUnavailable
}
return bs.pruner.waitForCache(ctx)
}
// Save saves blobs given a list of sidecars.
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
startTime := time.Now()
@@ -300,15 +281,6 @@ func (bs *BlobStorage) Clear() error {
return nil
}
// WithinRetentionPeriod checks if the requested epoch is within the blob retention period.
func (bs *BlobStorage) WithinRetentionPeriod(requested, current primitives.Epoch) bool {
if requested > math.MaxUint64-bs.retentionEpochs {
// If there is an overflow, then the retention period was set to an extremely large number.
return true
}
return requested+bs.retentionEpochs >= current
}
type blobNamer struct {
root [32]byte
index uint64

View File

@@ -2,7 +2,6 @@ package filesystem
import (
"bytes"
"math"
"os"
"path"
"sync"
@@ -249,50 +248,3 @@ func TestNewBlobStorage(t *testing.T) {
_, err = NewBlobStorage(WithBasePath(path.Join(t.TempDir(), "good")))
require.NoError(t, err)
}
func TestConfig_WithinRetentionPeriod(t *testing.T) {
retention := primitives.Epoch(16)
storage := &BlobStorage{retentionEpochs: retention}
cases := []struct {
name string
requested primitives.Epoch
current primitives.Epoch
within bool
}{
{
name: "before",
requested: 0,
current: retention + 1,
within: false,
},
{
name: "same",
requested: 0,
current: 0,
within: true,
},
{
name: "boundary",
requested: 0,
current: retention,
within: true,
},
{
name: "one less",
requested: retention - 1,
current: retention,
within: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
require.Equal(t, c.within, storage.WithinRetentionPeriod(c.requested, c.current))
})
}
t.Run("overflow", func(t *testing.T) {
storage := &BlobStorage{retentionEpochs: math.MaxUint64}
require.Equal(t, true, storage.WithinRetentionPeriod(1, 1))
})
}

View File

@@ -1,119 +0,0 @@
package filesystem
import (
"sync"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
type BlobStorageSummary struct {
slot primitives.Slot
mask blobIndexMask
}
// HasIndex returns true if the BlobSidecar at the given index is available in the filesystem.
func (s BlobStorageSummary) HasIndex(idx uint64) bool {
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
if idx >= fieldparams.MaxBlobsPerBlock {
return false
}
return s.mask[idx]
}
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
func (s BlobStorageSummary) AllAvailable(count int) bool {
if count > fieldparams.MaxBlobsPerBlock {
return false
}
for i := 0; i < count; i++ {
if !s.mask[i] {
return false
}
}
return true
}
// BlobStorageSummarizer can be used to receive a summary of metadata about blobs on disk for a given root.
// The BlobStorageSummary can be used to check which indices (if any) are available for a given block by root.
type BlobStorageSummarizer interface {
Summary(root [32]byte) BlobStorageSummary
}
type blobStorageCache struct {
mu sync.RWMutex
nBlobs float64
cache map[string]BlobStorageSummary
}
var _ BlobStorageSummarizer = &blobStorageCache{}
func newBlobStorageCache() *blobStorageCache {
return &blobStorageCache{
cache: make(map[string]BlobStorageSummary, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
}
}
// Summary returns the BlobStorageSummary for `root`. The BlobStorageSummary can be used to check for the presence of
// BlobSidecars based on Index.
func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
k := rootString(root)
s.mu.RLock()
defer s.mu.RUnlock()
return s.cache[k]
}
func (s *blobStorageCache) ensure(key string, slot primitives.Slot, idx uint64) error {
if idx >= fieldparams.MaxBlobsPerBlock {
return errIndexOutOfBounds
}
s.mu.Lock()
defer s.mu.Unlock()
v := s.cache[key]
v.slot = slot
if !v.mask[idx] {
s.updateMetrics(1)
}
v.mask[idx] = true
s.cache[key] = v
return nil
}
func (s *blobStorageCache) slot(key string) (primitives.Slot, bool) {
s.mu.RLock()
defer s.mu.RUnlock()
v, ok := s.cache[key]
if !ok {
return 0, false
}
return v.slot, ok
}
func (s *blobStorageCache) evict(key string) {
var deleted float64
s.mu.Lock()
v, ok := s.cache[key]
if ok {
for i := range v.mask {
if v.mask[i] {
deleted += 1
}
}
}
delete(s.cache, key)
s.mu.Unlock()
if deleted > 0 {
s.updateMetrics(-deleted)
}
}
func (s *blobStorageCache) updateMetrics(delta float64) {
s.nBlobs += delta
blobDiskCount.Set(s.nBlobs)
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
}

View File

@@ -1,150 +0,0 @@
package filesystem
import (
"testing"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestSlotByRoot_Summary(t *testing.T) {
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
firstSet[0] = true
lastSet[len(lastSet)-1] = true
oneSet[1] = true
for i := range allSet {
allSet[i] = true
}
cases := []struct {
name string
root [32]byte
expected *blobIndexMask
}{
{
name: "not found",
},
{
name: "none set",
expected: &noneSet,
},
{
name: "index 1 set",
expected: &oneSet,
},
{
name: "all set",
expected: &allSet,
},
{
name: "first set",
expected: &firstSet,
},
{
name: "last set",
expected: &lastSet,
},
}
sc := newBlobStorageCache()
for _, c := range cases {
if c.expected != nil {
key := rootString(bytesutil.ToBytes32([]byte(c.name)))
sc.cache[key] = BlobStorageSummary{slot: 0, mask: *c.expected}
}
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
key := bytesutil.ToBytes32([]byte(c.name))
sum := sc.Summary(key)
for i := range c.expected {
ui := uint64(i)
if c.expected == nil {
require.Equal(t, false, sum.HasIndex(ui))
} else {
require.Equal(t, c.expected[i], sum.HasIndex(ui))
}
}
})
}
}
func TestAllAvailable(t *testing.T) {
idxUpTo := func(u int) []int {
r := make([]int, u)
for i := range r {
r[i] = i
}
return r
}
require.DeepEqual(t, []int{}, idxUpTo(0))
require.DeepEqual(t, []int{0}, idxUpTo(1))
require.DeepEqual(t, []int{0, 1, 2, 3, 4, 5}, idxUpTo(6))
cases := []struct {
name string
idxSet []int
count int
aa bool
}{
{
// If there are no blobs committed, then all the committed blobs are available.
name: "none in idx, 0 arg",
count: 0,
aa: true,
},
{
name: "none in idx, 1 arg",
count: 1,
aa: false,
},
{
name: "first in idx, 1 arg",
idxSet: []int{0},
count: 1,
aa: true,
},
{
name: "second in idx, 1 arg",
idxSet: []int{1},
count: 1,
aa: false,
},
{
name: "first missing, 2 arg",
idxSet: []int{1},
count: 2,
aa: false,
},
{
name: "all missing, 1 arg",
count: 6,
aa: false,
},
{
name: "out of bound is safe",
count: fieldparams.MaxBlobsPerBlock + 1,
aa: false,
},
{
name: "max present",
count: fieldparams.MaxBlobsPerBlock,
idxSet: idxUpTo(fieldparams.MaxBlobsPerBlock),
aa: true,
},
{
name: "one present",
count: 1,
idxSet: idxUpTo(1),
aa: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var mask blobIndexMask
for _, idx := range c.idxSet {
mask[idx] = true
}
sum := BlobStorageSummary{mask: mask}
require.Equal(t, c.aa, sum.AllAvailable(c.count))
})
}
}

View File

@@ -12,7 +12,7 @@ import (
// improving test performance and simplifying cleanup.
func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
fs := afero.NewMemMapFs()
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
if err != nil {
t.Fatal("test setup issue", err)
}
@@ -23,7 +23,7 @@ func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
// in order to interact with it outside the parameters of the BlobStorage api.
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage, error) {
fs := afero.NewMemMapFs()
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
if err != nil {
t.Fatal("test setup issue", err)
}
@@ -61,15 +61,3 @@ func NewEphemeralBlobStorageWithMocker(_ testing.TB) (*BlobMocker, *BlobStorage)
bs := &BlobStorage{fs: fs}
return &BlobMocker{fs: fs, bs: bs}, bs
}
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
c := newBlobStorageCache()
for k, v := range set {
for i := range v {
if err := c.ensure(rootString(k), 0, uint64(v[i])); err != nil {
t.Fatal(err)
}
}
}
return c
}

View File

@@ -1,7 +1,6 @@
package filesystem
import (
"context"
"encoding/binary"
"io"
"path"
@@ -13,6 +12,7 @@ import (
"time"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -32,39 +32,22 @@ type blobPruner struct {
sync.Mutex
prunedBefore atomic.Uint64
windowSize primitives.Slot
cache *blobStorageCache
cacheReady chan struct{}
warmed bool
slotMap *slotForRoot
fs afero.Fs
}
type prunerOpt func(*blobPruner) error
func withWarmedCache() prunerOpt {
return func(p *blobPruner) error {
return p.warmCache()
}
}
func newBlobPruner(fs afero.Fs, retain primitives.Epoch, opts ...prunerOpt) (*blobPruner, error) {
func newBlobPruner(fs afero.Fs, retain primitives.Epoch) (*blobPruner, error) {
r, err := slots.EpochStart(retain + retentionBuffer)
if err != nil {
return nil, errors.Wrap(err, "could not set retentionSlots")
}
cw := make(chan struct{})
p := &blobPruner{fs: fs, windowSize: r, cache: newBlobStorageCache(), cacheReady: cw}
for _, o := range opts {
if err := o(p); err != nil {
return nil, err
}
}
return p, nil
return &blobPruner{fs: fs, windowSize: r, slotMap: newSlotForRoot()}, nil
}
// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache
// of root->slot mappings and decide when to evict old blobs based on the age of present blobs.
func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) error {
if err := p.cache.ensure(rootString(root), latest, idx); err != nil {
if err := p.slotMap.ensure(rootString(root), latest, idx); err != nil {
return err
}
pruned := uint64(windowMin(latest, p.windowSize))
@@ -72,8 +55,6 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
return nil
}
go func() {
p.Lock()
defer p.Unlock()
if err := p.prune(primitives.Slot(pruned)); err != nil {
log.WithError(err).Errorf("Failed to prune blobs from slot %d", latest)
}
@@ -81,7 +62,7 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
return nil
}
func windowMin(latest, offset primitives.Slot) primitives.Slot {
func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
// Safely compute the first slot in the epoch for the latest slot
latest = latest - latest%params.BeaconConfig().SlotsPerEpoch
if latest < offset {
@@ -90,32 +71,12 @@ func windowMin(latest, offset primitives.Slot) primitives.Slot {
return latest - offset
}
func (p *blobPruner) warmCache() error {
p.Lock()
defer p.Unlock()
if err := p.prune(0); err != nil {
return err
}
if !p.warmed {
p.warmed = true
close(p.cacheReady)
}
return nil
}
func (p *blobPruner) waitForCache(ctx context.Context) (*blobStorageCache, error) {
select {
case <-p.cacheReady:
return p.cache, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
// Prune prunes blobs in the base directory based on the retention epoch.
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
p.Lock()
defer p.Unlock()
start := time.Now()
totalPruned, totalErr := 0, 0
// Customize logging/metrics behavior for the initial cache warmup when slot=0.
@@ -161,7 +122,7 @@ func shouldRetain(slot, pruneBefore primitives.Slot) bool {
func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) {
root := rootFromDir(dir)
slot, slotCached := p.cache.slot(root)
slot, slotCached := p.slotMap.slot(root)
// Return early if the slot is cached and doesn't need pruning.
if slotCached && shouldRetain(slot, pruneBefore) {
return 0, nil
@@ -190,7 +151,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
if err != nil {
return 0, errors.Wrapf(err, "index could not be determined for blob file %s", scFiles[i])
}
if err := p.cache.ensure(root, slot, idx); err != nil {
if err := p.slotMap.ensure(root, slot, idx); err != nil {
return 0, errors.Wrapf(err, "could not update prune cache for blob file %s", scFiles[i])
}
}
@@ -218,7 +179,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
return removed, errors.Wrapf(err, "unable to remove blob directory %s", dir)
}
p.cache.evict(rootFromDir(dir))
p.slotMap.evict(rootFromDir(dir))
return len(scFiles), nil
}
@@ -308,3 +269,71 @@ func filterSsz(s string) bool {
func filterPart(s string) bool {
return filepath.Ext(s) == dotPartExt
}
func newSlotForRoot() *slotForRoot {
return &slotForRoot{
cache: make(map[string]*slotCacheEntry, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
}
}
type slotCacheEntry struct {
slot primitives.Slot
mask [fieldparams.MaxBlobsPerBlock]bool
}
type slotForRoot struct {
sync.RWMutex
nBlobs float64
cache map[string]*slotCacheEntry
}
func (s *slotForRoot) updateMetrics(delta float64) {
s.nBlobs += delta
blobDiskCount.Set(s.nBlobs)
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
}
func (s *slotForRoot) ensure(key string, slot primitives.Slot, idx uint64) error {
if idx >= fieldparams.MaxBlobsPerBlock {
return errIndexOutOfBounds
}
s.Lock()
defer s.Unlock()
v, ok := s.cache[key]
if !ok {
v = &slotCacheEntry{}
}
v.slot = slot
if !v.mask[idx] {
s.updateMetrics(1)
}
v.mask[idx] = true
s.cache[key] = v
return nil
}
func (s *slotForRoot) slot(key string) (primitives.Slot, bool) {
s.RLock()
defer s.RUnlock()
v, ok := s.cache[key]
if !ok {
return 0, false
}
return v.slot, ok
}
func (s *slotForRoot) evict(key string) {
s.Lock()
defer s.Unlock()
v, ok := s.cache[key]
var deleted float64
if ok {
for i := range v.mask {
if v.mask[i] {
deleted += 1
}
}
s.updateMetrics(-deleted)
}
delete(s.cache, key)
}

View File

@@ -28,7 +28,7 @@ func TestTryPruneDir_CachedNotExpired(t *testing.T) {
root := fmt.Sprintf("%#x", sc.BlockRoot())
// This slot is right on the edge of what would need to be pruned, so by adding it to the cache and
// skipping any other test setup, we can be certain the hot cache path never touches the filesystem.
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
pruned, err := pr.tryPruneDir(root, pr.windowSize)
require.NoError(t, err)
require.Equal(t, 0, pruned)
@@ -45,7 +45,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
require.NoError(t, err)
root := fmt.Sprintf("%#x", sc.BlockRoot())
require.NoError(t, fs.Mkdir(root, directoryPermissions)) // make empty directory
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
pruned, err := pr.tryPruneDir(root, slot+1)
require.NoError(t, err)
require.Equal(t, 0, pruned)
@@ -63,7 +63,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
// check that the root->slot is cached
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
cs, cok := bs.pruner.cache.slot(root)
cs, cok := bs.pruner.slotMap.slot(root)
require.Equal(t, true, cok)
require.Equal(t, slot, cs)
@@ -95,12 +95,12 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
// check that the root->slot is cached
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
cs, ok := bs.pruner.cache.slot(root)
cs, ok := bs.pruner.slotMap.slot(root)
require.Equal(t, true, ok)
require.Equal(t, slot, cs)
// evict it from the cache so that we trigger the file read path
bs.pruner.cache.evict(root)
_, ok = bs.pruner.cache.slot(root)
bs.pruner.slotMap.evict(root)
_, ok = bs.pruner.slotMap.slot(root)
require.Equal(t, false, ok)
// ensure that we see the saved files in the filesystem
@@ -119,7 +119,7 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
// Set slot equal to the window size, so it should be retained.
slot := bs.pruner.windowSize
var slot primitives.Slot = bs.pruner.windowSize
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
@@ -129,8 +129,8 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
// Evict slot mapping from the cache so that we trigger the file read path.
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
bs.pruner.cache.evict(root)
_, ok := bs.pruner.cache.slot(root)
bs.pruner.slotMap.evict(root)
_, ok := bs.pruner.slotMap.slot(root)
require.Equal(t, false, ok)
// Ensure that we see the saved files in the filesystem.
@@ -243,8 +243,10 @@ func TestListDir(t *testing.T) {
}
blobWithSszAndTmp := dirFiles{name: "0x1234567890", isDir: true,
children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}}
fsLayout.children = append(fsLayout.children,
notABlob, childlessBlob, blobWithSsz, blobWithSszAndTmp)
fsLayout.children = append(fsLayout.children, notABlob)
fsLayout.children = append(fsLayout.children, childlessBlob)
fsLayout.children = append(fsLayout.children, blobWithSsz)
fsLayout.children = append(fsLayout.children, blobWithSszAndTmp)
topChildren := make([]string, len(fsLayout.children))
for i := range fsLayout.children {
@@ -280,7 +282,10 @@ func TestListDir(t *testing.T) {
dirPath: ".",
expected: []string{notABlob.name},
filter: func(s string) bool {
return s == notABlob.name
if s == notABlob.name {
return true
}
return false
},
},
{

View File

@@ -118,7 +118,7 @@ type HeadAccessDatabase interface {
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
type SlasherDatabase interface {
io.Closer
SaveLastEpochWrittenForValidators(
SaveLastEpochsWrittenForValidators(
ctx context.Context, epochByValidator map[primitives.ValidatorIndex]primitives.Epoch,
) error
SaveAttestationRecordsForValidators(

View File

@@ -20,7 +20,6 @@ go_library(
"migration.go",
"migration_archived_index.go",
"migration_block_slot_index.go",
"migration_finalized_parent.go",
"migration_state_validators.go",
"schema.go",
"state.go",

View File

@@ -14,7 +14,6 @@ var migrations = []migration{
migrateArchivedIndex,
migrateBlockSlotIndex,
migrateStateValidators,
migrateFinalizedParent,
}
// RunMigrations defined in the migrations array.

View File

@@ -1,87 +0,0 @@
package kv
import (
"bytes"
"context"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
)
var migrationFinalizedParent = []byte("parent_bug_32fb183")
func migrateFinalizedParent(ctx context.Context, db *bolt.DB) error {
if updateErr := db.Update(func(tx *bolt.Tx) error {
mb := tx.Bucket(migrationsBucket)
if b := mb.Get(migrationFinalizedParent); bytes.Equal(b, migrationCompleted) {
return nil // Migration already completed.
}
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
if bkt == nil {
return fmt.Errorf("unable to read %s bucket for migration", finalizedBlockRootsIndexBucket)
}
bb := tx.Bucket(blocksBucket)
if bb == nil {
return fmt.Errorf("unable to read %s bucket for migration", blocksBucket)
}
c := bkt.Cursor()
var slotsWithoutBug primitives.Slot
maxBugSearch := params.BeaconConfig().SlotsPerEpoch * 10
for k, v := c.Last(); k != nil; k, v = c.Prev() {
// check if context is cancelled in between
if ctx.Err() != nil {
return ctx.Err()
}
idxEntry := &ethpb.FinalizedBlockRootContainer{}
if err := decode(ctx, v, idxEntry); err != nil {
return errors.Wrapf(err, "unable to decode finalized block root container for root=%#x", k)
}
// Not one of the corrupt values
if !bytes.Equal(idxEntry.ParentRoot, k) {
slotsWithoutBug += 1
if slotsWithoutBug > maxBugSearch {
break
}
continue
}
slotsWithoutBug = 0
log.WithField("root", fmt.Sprintf("%#x", k)).Debug("found index entry with incorrect parent root")
// Look up full block to get the correct parent root.
encBlk := bb.Get(k)
if encBlk == nil {
return errors.Wrapf(ErrNotFound, "could not find block for corrupt finalized index entry %#x", k)
}
blk, err := unmarshalBlock(ctx, encBlk)
if err != nil {
return errors.Wrapf(err, "unable to decode block for root=%#x", k)
}
// Replace parent root in the index with the correct value and write it back.
pr := blk.Block().ParentRoot()
idxEntry.ParentRoot = pr[:]
idxEnc, err := encode(ctx, idxEntry)
if err != nil {
return errors.Wrapf(err, "failed to encode finalized index entry for root=%#x", k)
}
if err := bkt.Put(k, idxEnc); err != nil {
return errors.Wrapf(err, "failed to update finalized index entry for root=%#x", k)
}
log.WithField("root", fmt.Sprintf("%#x", k)).
WithField("parentRoot", fmt.Sprintf("%#x", idxEntry.ParentRoot)).
Debug("updated corrupt index entry with correct parent")
}
// Mark migration complete.
return mb.Put(migrationFinalizedParent, migrationCompleted)
}); updateErr != nil {
log.WithError(updateErr).Errorf("could not run finalized parent root index repair migration")
return updateErr
}
return nil
}

View File

@@ -70,12 +70,12 @@ func (s *Store) LastEpochWrittenForValidators(
return attestedEpochs, err
}
// SaveLastEpochWrittenForValidators saves the latest epoch
// that each validator has attested to in the provided map.
func (s *Store) SaveLastEpochWrittenForValidators(
// SaveLastEpochsWrittenForValidators updates the latest epoch a slice
// of validator indices has attested to.
func (s *Store) SaveLastEpochsWrittenForValidators(
ctx context.Context, epochByValIndex map[primitives.ValidatorIndex]primitives.Epoch,
) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochWrittenForValidators")
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochsWrittenForValidators")
defer span.End()
const batchSize = 10000
@@ -157,7 +157,7 @@ func (s *Store) CheckAttesterDoubleVotes(
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
encEpoch := encodeTargetEpoch(attToProcess.IndexedAttestation.Data.Target.Epoch)
localDoubleVotes := make([]*slashertypes.AttesterDoubleVote, 0)
localDoubleVotes := []*slashertypes.AttesterDoubleVote{}
for _, valIdx := range attToProcess.IndexedAttestation.AttestingIndices {
// Check if there is signing root in the database for this combination
@@ -166,7 +166,7 @@ func (s *Store) CheckAttesterDoubleVotes(
validatorEpochKey := append(encEpoch, encIdx...)
attRecordsKey := signingRootsBkt.Get(validatorEpochKey)
// An attestation record key consists of a signing root (32 bytes).
// An attestation record key is comprised of a signing root (32 bytes).
if len(attRecordsKey) < attestationRecordKeySize {
// If there is no signing root for this combination,
// then there is no double vote. We can continue to the next validator.
@@ -697,7 +697,7 @@ func decodeSlasherChunk(enc []byte) ([]uint16, error) {
}
// Encode attestation record to bytes.
// The output encoded attestation record consists in the signing root concatenated with the compressed attestation record.
// The output encoded attestation record consists in the signing root concatened with the compressed attestation record.
func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byte, error) {
if att == nil || att.IndexedAttestation == nil {
return []byte{}, errors.New("nil proposal record")
@@ -716,7 +716,7 @@ func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byt
}
// Decode attestation record from bytes.
// The input encoded attestation record consists in the signing root concatenated with the compressed attestation record.
// The input encoded attestation record consists in the signing root concatened with the compressed attestation record.
func decodeAttestationRecord(encoded []byte) (*slashertypes.IndexedAttestationWrapper, error) {
if len(encoded) < rootSize {
return nil, fmt.Errorf("wrong length for encoded attestation record, want minimum %d, got %d", rootSize, len(encoded))

View File

@@ -89,7 +89,7 @@ func TestStore_LastEpochWrittenForValidators(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 0, len(attestedEpochs))
err = beaconDB.SaveLastEpochWrittenForValidators(ctx, epochsByValidator)
err = beaconDB.SaveLastEpochsWrittenForValidators(ctx, epochsByValidator)
require.NoError(t, err)
retrievedEpochs, err := beaconDB.LastEpochWrittenForValidators(ctx, indices)

View File

@@ -10,7 +10,7 @@ import (
)
// TestCleanup ensures that the cleanup function unregisters the prometheus.Collection
// also tests the interchangeability of the explicit prometheus Register/Unregister
// also tests the interchangability of the explicit prometheus Register/Unregister
// and the implicit methods within the collector implementation
func TestCleanup(t *testing.T) {
ctx := context.Background()
@@ -32,11 +32,11 @@ func TestCleanup(t *testing.T) {
assert.Equal(t, true, unregistered, "prometheus.Unregister failed to unregister PowchainCollector on final cleanup")
}
// TestCancellation tests that canceling the context passed into
// TestCancelation tests that canceling the context passed into
// NewPowchainCollector cleans everything up as expected. This
// does come at the cost of an extra channel cluttering up
// PowchainCollector, just for this test.
func TestCancellation(t *testing.T) {
func TestCancelation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
pc, err := NewPowchainCollector(ctx)
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")

View File

@@ -707,7 +707,6 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),

View File

@@ -217,9 +217,9 @@ func Test_hasNetworkFlag(t *testing.T) {
want bool
}{
{
name: "Holesky testnet",
networkName: features.HoleskyTestnet.Name,
networkValue: "holesky",
name: "Prater testnet",
networkName: features.PraterTestnet.Name,
networkValue: "prater",
want: true,
},
{

View File

@@ -90,7 +90,6 @@ go_library(
"@com_github_libp2p_go_libp2p//core/peerstore:go_default_library",
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
"@com_github_libp2p_go_libp2p_mplex//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",

View File

@@ -10,7 +10,6 @@ import (
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
@@ -138,11 +137,11 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
// In the event our attestation is outdated and beyond the
// acceptable threshold, we exit early and do not broadcast it.
currSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
if err := helpers.ValidateAttestationTime(att.Data.Slot, s.genesisTime, params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
if att.Data.Slot+params.BeaconConfig().SlotsPerEpoch < currSlot {
log.WithFields(logrus.Fields{
"attestationSlot": att.Data.Slot,
"currentSlot": currSlot,
}).WithError(err).Warning("Attestation is too old to broadcast, discarding it")
}).Warning("Attestation is too old to broadcast, discarding it")
return
}

View File

@@ -24,7 +24,6 @@ type Config struct {
PrivateKey string
DataDir string
MetaDataDir string
QUICPort uint
TCPPort uint
UDPPort uint
MaxPeers uint

View File

@@ -15,7 +15,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
@@ -40,11 +39,6 @@ const (
udp6
)
type quicProtocol uint16
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
func (quicProtocol) ENRKey() string { return "quic" }
// RefreshENR uses an epoch to refresh the enr entry for our node
// with the tracked committee ids for the epoch, allowing our node
// to be dynamically discoverable by others given our tracked committee ids.
@@ -106,15 +100,14 @@ func (s *Service) RefreshENR() {
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
func (s *Service) listenForNewNodes() {
iterator := enode.Filter(s.dv5Listener.RandomNodes(), s.filterPeer)
iterator := s.dv5Listener.RandomNodes()
iterator = enode.Filter(iterator, s.filterPeer)
defer iterator.Close()
for {
// Exit if service's context is canceled.
// Exit if service's context is canceled
if s.ctx.Err() != nil {
break
}
if s.isPeerAtLimit(false /* inbound */) {
// Pause the main loop for a period to stop looking
// for new peers.
@@ -122,22 +115,16 @@ func (s *Service) listenForNewNodes() {
time.Sleep(pollingPeriod)
continue
}
if exists := iterator.Next(); !exists {
exists := iterator.Next()
if !exists {
break
}
node := iterator.Node()
peerInfo, _, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Error("Could not convert to peer info")
continue
}
if peerInfo == nil {
continue
}
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
s.Peers().RandomizeBackOff(peerInfo.ID)
go func(info *peer.AddrInfo) {
@@ -180,7 +167,8 @@ func (s *Service) createListener(
// Listen to all network interfaces
// for both ip protocols.
conn, err := net.ListenUDP("udp", udpAddr)
networkVersion := "udp"
conn, err := net.ListenUDP(networkVersion, udpAddr)
if err != nil {
return nil, errors.Wrap(err, "could not listen to UDP")
}
@@ -190,7 +178,6 @@ func (s *Service) createListener(
ipAddr,
int(s.cfg.UDPPort),
int(s.cfg.TCPPort),
int(s.cfg.QUICPort),
)
if err != nil {
return nil, errors.Wrap(err, "could not create local node")
@@ -222,7 +209,7 @@ func (s *Service) createListener(
func (s *Service) createLocalNode(
privKey *ecdsa.PrivateKey,
ipAddr net.IP,
udpPort, tcpPort, quicPort int,
udpPort, tcpPort int,
) (*enode.LocalNode, error) {
db, err := enode.OpenDB("")
if err != nil {
@@ -231,19 +218,11 @@ func (s *Service) createLocalNode(
localNode := enode.NewLocalNode(db, privKey)
ipEntry := enr.IP(ipAddr)
localNode.Set(ipEntry)
udpEntry := enr.UDP(udpPort)
localNode.Set(udpEntry)
tcpEntry := enr.TCP(tcpPort)
localNode.Set(ipEntry)
localNode.Set(udpEntry)
localNode.Set(tcpEntry)
if features.Get().EnableQUIC {
quicEntry := quicProtocol(quicPort)
localNode.Set(quicEntry)
}
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)
@@ -298,7 +277,7 @@ func (s *Service) startDiscoveryV5(
// filterPeer validates each node that we retrieve from our dht. We
// try to ascertain that the peer can be a valid protocol peer.
// Validity Conditions:
// 1. Peer has a valid IP and a (QUIC and/or TCP) port set in their enr.
// 1. Peer has a valid IP and TCP port set in their enr.
// 2. Peer hasn't been marked as 'bad'.
// 3. Peer is not currently active or connected.
// 4. Peer is ready to receive incoming connections.
@@ -315,13 +294,17 @@ func (s *Service) filterPeer(node *enode.Node) bool {
return false
}
peerData, multiAddrs, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Debug("Could not convert to peer data")
// Ignore nodes with their TCP ports not set.
if err := node.Record().Load(enr.WithEntry("tcp", new(enr.TCP))); err != nil {
if !enr.IsNotFound(err) {
log.WithError(err).Debug("Could not retrieve tcp port")
}
return false
}
if peerData == nil || len(multiAddrs) == 0 {
peerData, multiAddr, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Debug("Could not convert to peer data")
return false
}
@@ -354,9 +337,6 @@ func (s *Service) filterPeer(node *enode.Node) bool {
}
}
// If the peer has 2 multiaddrs, favor the QUIC address, which is in first position.
multiAddr := multiAddrs[0]
// Add peer to peer handler.
s.peers.Add(nodeENR, peerData.ID, multiAddr, network.DirUnknown)
@@ -400,11 +380,11 @@ func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
if err != nil {
return nil, errors.Wrapf(err, "Could not get enode from string")
}
nodeAddrs, err := retrieveMultiAddrsFromNode(enodeAddr)
addr, err := convertToSingleMultiAddr(enodeAddr)
if err != nil {
return nil, errors.Wrapf(err, "Could not get multiaddr")
}
allAddrs = append(allAddrs, nodeAddrs...)
allAddrs = append(allAddrs, addr)
}
return allAddrs, nil
}
@@ -439,139 +419,45 @@ func parseGenericAddrs(addrs []string) (enodeString, multiAddrString []string) {
}
func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
// Expect each node to have a TCP and a QUIC address.
multiAddrs := make([]ma.Multiaddr, 0, 2*len(nodes))
var multiAddrs []ma.Multiaddr
for _, node := range nodes {
// Skip nodes with no ip address stored.
// ignore nodes with no ip address stored
if node.IP() == nil {
continue
}
// Get up to two multiaddrs (TCP and QUIC) for each node.
nodeMultiAddrs, err := retrieveMultiAddrsFromNode(node)
multiAddr, err := convertToSingleMultiAddr(node)
if err != nil {
log.WithError(err).Errorf("Could not convert to multiAddr node %s", node)
log.WithError(err).Error("Could not convert to multiAddr")
continue
}
multiAddrs = append(multiAddrs, nodeMultiAddrs...)
multiAddrs = append(multiAddrs, multiAddr)
}
return multiAddrs
}
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
multiAddrs, err := retrieveMultiAddrsFromNode(node)
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, ma.Multiaddr, error) {
multiAddr, err := convertToSingleMultiAddr(node)
if err != nil {
return nil, nil, err
}
if len(multiAddrs) == 0 {
return nil, nil, nil
}
infos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
info, err := peer.AddrInfoFromP2pAddr(multiAddr)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not convert to peer info: %v", multiAddrs)
return nil, nil, err
}
if len(infos) != 1 {
return nil, nil, errors.Errorf("infos contains %v elements, expected exactly 1", len(infos))
}
return &infos[0], multiAddrs, nil
return info, multiAddr, nil
}
// retrieveMultiAddrsFromNode converts an enode.Node to a list of multiaddrs.
// If the node has a both a QUIC and a TCP port set in their ENR, then
// the multiaddr corresponding to the QUIC port is added first, followed
// by the multiaddr corresponding to the TCP port.
func retrieveMultiAddrsFromNode(node *enode.Node) ([]ma.Multiaddr, error) {
multiaddrs := make([]ma.Multiaddr, 0, 2)
// Retrieve the node public key.
func convertToSingleMultiAddr(node *enode.Node) (ma.Multiaddr, error) {
pubkey := node.Pubkey()
assertedKey, err := ecdsaprysm.ConvertToInterfacePubkey(pubkey)
if err != nil {
return nil, errors.Wrap(err, "could not get pubkey")
}
// Compute the node ID from the public key.
id, err := peer.IDFromPublicKey(assertedKey)
if err != nil {
return nil, errors.Wrap(err, "could not get peer id")
}
if features.Get().EnableQUIC {
// If the QUIC entry is present in the ENR, build the corresponding multiaddress.
port, ok, err := getPort(node, quic)
if err != nil {
return nil, errors.Wrap(err, "could not get QUIC port")
}
if ok {
addr, err := multiAddressBuilderWithID(node.IP(), quic, port, id)
if err != nil {
return nil, errors.Wrap(err, "could not build QUIC address")
}
multiaddrs = append(multiaddrs, addr)
}
}
// If the TCP entry is present in the ENR, build the corresponding multiaddress.
port, ok, err := getPort(node, tcp)
if err != nil {
return nil, errors.Wrap(err, "could not get TCP port")
}
if ok {
addr, err := multiAddressBuilderWithID(node.IP(), tcp, port, id)
if err != nil {
return nil, errors.Wrap(err, "could not build TCP address")
}
multiaddrs = append(multiaddrs, addr)
}
return multiaddrs, nil
}
// getPort retrieves the port for a given node and protocol, as well as a boolean
// indicating whether the port was found, and an error
func getPort(node *enode.Node, protocol internetProtocol) (uint, bool, error) {
var (
port uint
err error
)
switch protocol {
case tcp:
var entry enr.TCP
err = node.Load(&entry)
port = uint(entry)
case udp:
var entry enr.UDP
err = node.Load(&entry)
port = uint(entry)
case quic:
var entry quicProtocol
err = node.Load(&entry)
port = uint(entry)
default:
return 0, false, errors.Errorf("invalid protocol: %v", protocol)
}
if enr.IsNotFound(err) {
return port, false, nil
}
if err != nil {
return 0, false, errors.Wrap(err, "could not get port")
}
return port, true, nil
return multiAddressBuilderWithID(node.IP().String(), "tcp", uint(node.TCP()), id)
}
func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
@@ -589,14 +475,14 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
var ip4 enr.IPv4
var ip6 enr.IPv6
if node.Load(&ip4) == nil {
address, ipErr := multiAddressBuilderWithID(net.IP(ip4), udp, uint(node.UDP()), id)
address, ipErr := multiAddressBuilderWithID(net.IP(ip4).String(), "udp", uint(node.UDP()), id)
if ipErr != nil {
return nil, errors.Wrap(ipErr, "could not build IPv4 address")
}
addresses = append(addresses, address)
}
if node.Load(&ip6) == nil {
address, ipErr := multiAddressBuilderWithID(net.IP(ip6), udp, uint(node.UDP()), id)
address, ipErr := multiAddressBuilderWithID(net.IP(ip6).String(), "udp", uint(node.UDP()), id)
if ipErr != nil {
return nil, errors.Wrap(ipErr, "could not build IPv6 address")
}

View File

@@ -166,9 +166,8 @@ func TestCreateLocalNode(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
// Define ports.
const (
udpPort = 2000
tcpPort = 3000
quicPort = 3000
udpPort = 2000
tcpPort = 3000
)
// Create a private key.
@@ -181,7 +180,7 @@ func TestCreateLocalNode(t *testing.T) {
cfg: tt.cfg,
}
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort)
if tt.expectedError {
require.NotNil(t, err)
return
@@ -238,7 +237,7 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
}
node, err := s.createLocalNode(pkey, addr, 0, 0, 0)
node, err := s.createLocalNode(pkey, addr, 0, 0)
require.NoError(t, err)
multiAddr := convertToMultiAddr([]*enode.Node{node.Node()})
assert.Equal(t, 0, len(multiAddr), "Invalid ip address converted successfully")
@@ -249,9 +248,8 @@ func TestMultiAddrConversion_OK(t *testing.T) {
ipAddr, pkey := createAddrAndPrivKey(t)
s := &Service{
cfg: &Config{
UDPPort: 2000,
TCPPort: 3000,
QUICPort: 3000,
TCPPort: 0,
UDPPort: 0,
},
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),

View File

@@ -28,8 +28,7 @@ import (
)
func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
const port = 2000
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
@@ -54,7 +53,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
var listeners []*discover.UDPv5
for i := 1; i <= 5; i++ {
port := 3000 + i
port = 3000 + i
cfg.UDPPort = uint(port)
ipAddr, pkey := createAddrAndPrivKey(t)
@@ -99,14 +98,13 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
s.genesisTime = genesisTime
s.genesisValidatorsRoot = make([]byte, 32)
s.dv5Listener = lastListener
var addrs []ma.Multiaddr
addrs := make([]ma.Multiaddr, 0)
for _, node := range nodes {
if s.filterPeer(node) {
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
for _, n := range nodes {
if s.filterPeer(n) {
addr, err := convertToSingleMultiAddr(n)
require.NoError(t, err)
addrs = append(addrs, nodeAddrs...)
addrs = append(addrs, addr)
}
}
@@ -116,11 +114,10 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
}
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
const port = 2000
params.SetupTestConfigCleanup(t)
hook := logTest.NewGlobal()
logrus.SetLevel(logrus.TraceLevel)
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()
genesisValidatorsRoot := make([]byte, 32)
@@ -141,7 +138,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
var listeners []*discover.UDPv5
for i := 1; i <= 5; i++ {
port := 3000 + i
port = 3000 + i
cfg.UDPPort = uint(port)
ipAddr, pkey := createAddrAndPrivKey(t)
@@ -191,13 +188,13 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
s.genesisTime = genesisTime
s.genesisValidatorsRoot = make([]byte, 32)
s.dv5Listener = lastListener
addrs := make([]ma.Multiaddr, 0, len(nodes))
var addrs []ma.Multiaddr
for _, node := range nodes {
if s.filterPeer(node) {
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
for _, n := range nodes {
if s.filterPeer(n) {
addr, err := convertToSingleMultiAddr(n)
require.NoError(t, err)
addrs = append(addrs, nodeAddrs...)
addrs = append(addrs, addr)
}
}
if len(addrs) == 0 {

View File

@@ -1,7 +1,6 @@
package p2p
import (
"net"
"strconv"
"strings"
@@ -13,32 +12,32 @@ import (
var log = logrus.WithField("prefix", "p2p")
func logIPAddr(id peer.ID, addrs ...ma.Multiaddr) {
var correctAddr ma.Multiaddr
for _, addr := range addrs {
if !(strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/")) {
continue
if strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/") {
correctAddr = addr
break
}
}
if correctAddr != nil {
log.WithField(
"multiAddr",
addr.String()+"/p2p/"+id.String(),
correctAddr.String()+"/p2p/"+id.String(),
).Info("Node started p2p server")
}
}
func logExternalIPAddr(id peer.ID, addr string, tcpPort, quicPort uint) {
func logExternalIPAddr(id peer.ID, addr string, port uint) {
if addr != "" {
multiAddrs, err := MultiAddressBuilder(net.ParseIP(addr), tcpPort, quicPort)
multiAddr, err := MultiAddressBuilder(addr, port)
if err != nil {
log.WithError(err).Error("Could not create multiaddress")
return
}
for _, multiAddr := range multiAddrs {
log.WithField(
"multiAddr",
multiAddr.String()+"/p2p/"+id.String(),
).Info("Node started external p2p server")
}
log.WithField(
"multiAddr",
multiAddr.String()+"/p2p/"+id.String(),
).Info("Node started external p2p server")
}
}

View File

@@ -11,68 +11,40 @@ import (
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/security/noise"
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
gomplex "github.com/libp2p/go-mplex"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/features"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
type internetProtocol string
const (
udp = "udp"
tcp = "tcp"
quic = "quic"
)
// MultiAddressBuilder takes in an ip address string and port to produce a go multiaddr format.
func MultiAddressBuilder(ip net.IP, tcpPort, quicPort uint) ([]ma.Multiaddr, error) {
ipType, err := extractIpType(ip)
if err != nil {
return nil, errors.Wrap(err, "unable to determine IP type")
func MultiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
parsedIP := net.ParseIP(ipAddr)
if parsedIP.To4() == nil && parsedIP.To16() == nil {
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
}
// Example: /ip4/1.2.3.4./tcp/5678
multiaddrStr := fmt.Sprintf("/%s/%s/tcp/%d", ipType, ip, tcpPort)
multiAddrTCP, err := ma.NewMultiaddr(multiaddrStr)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce TCP multiaddr format from %s:%d", ip, tcpPort)
if parsedIP.To4() != nil {
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
}
multiaddrs := []ma.Multiaddr{multiAddrTCP}
if features.Get().EnableQUIC {
// Example: /ip4/1.2.3.4/udp/5678/quic-v1
multiAddrQUIC, err := ma.NewMultiaddr(fmt.Sprintf("/%s/%s/udp/%d/quic-v1", ipType, ip, quicPort))
if err != nil {
return nil, errors.Wrapf(err, "cannot produce QUIC multiaddr format from %s:%d", ip, tcpPort)
}
multiaddrs = append(multiaddrs, multiAddrQUIC)
}
return multiaddrs, nil
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/%d", ipAddr, port))
}
// buildOptions for the libp2p host.
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Option, error) {
cfg := s.cfg
multiaddrs, err := MultiAddressBuilder(ip, cfg.TCPPort, cfg.QUICPort)
listen, err := MultiAddressBuilder(ip.String(), cfg.TCPPort)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip, cfg.TCPPort)
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip.String(), cfg.TCPPort)
}
if cfg.LocalIP != "" {
localIP := net.ParseIP(cfg.LocalIP)
if localIP == nil {
if net.ParseIP(cfg.LocalIP) == nil {
return nil, errors.Wrapf(err, "invalid local ip provided: %s:%d", cfg.LocalIP, cfg.TCPPort)
}
multiaddrs, err = MultiAddressBuilder(localIP, cfg.TCPPort, cfg.QUICPort)
listen, err = MultiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
if err != nil {
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", cfg.LocalIP, cfg.TCPPort)
}
@@ -90,43 +62,36 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
options := []libp2p.Option{
privKeyOption(priKey),
libp2p.ListenAddrs(multiaddrs...),
libp2p.ListenAddrs(listen),
libp2p.UserAgent(version.BuildData()),
libp2p.ConnectionGater(s),
libp2p.Transport(libp2ptcp.NewTCPTransport),
libp2p.Transport(tcp.NewTCPTransport),
libp2p.DefaultMuxers,
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
libp2p.Security(noise.ID, noise.New),
libp2p.Ping(false), // Disable Ping Service.
}
if features.Get().EnableQUIC {
options = append(options, libp2p.Transport(libp2pquic.NewTransport))
}
if cfg.EnableUPnP {
options = append(options, libp2p.NATPortMap()) // Allow to use UPnP
}
if cfg.RelayNodeAddr != "" {
options = append(options, libp2p.AddrsFactory(withRelayAddrs(cfg.RelayNodeAddr)))
} else {
// Disable relay if it has not been set.
options = append(options, libp2p.DisableRelay())
}
if cfg.HostAddress != "" {
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
externalMultiaddrs, err := MultiAddressBuilder(net.ParseIP(cfg.HostAddress), cfg.TCPPort, cfg.QUICPort)
external, err := MultiAddressBuilder(cfg.HostAddress, cfg.TCPPort)
if err != nil {
log.WithError(err).Error("Unable to create external multiaddress")
} else {
addrs = append(addrs, externalMultiaddrs...)
addrs = append(addrs, external)
}
return addrs
}))
}
if cfg.HostDNS != "" {
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
external, err := ma.NewMultiaddr(fmt.Sprintf("/dns4/%s/tcp/%d", cfg.HostDNS, cfg.TCPPort))
@@ -142,47 +107,21 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
if features.Get().DisableResourceManager {
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
}
return options, nil
}
func extractIpType(ip net.IP) (string, error) {
if ip.To4() != nil {
return "ip4", nil
func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (ma.Multiaddr, error) {
parsedIP := net.ParseIP(ipAddr)
if parsedIP.To4() == nil && parsedIP.To16() == nil {
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
}
if ip.To16() != nil {
return "ip6", nil
if id.String() == "" {
return nil, errors.New("empty peer id given")
}
return "", errors.Errorf("provided IP address is neither IPv4 nor IPv6: %s", ip)
}
func multiAddressBuilderWithID(ip net.IP, protocol internetProtocol, port uint, id peer.ID) (ma.Multiaddr, error) {
var multiaddrStr string
if id == "" {
return nil, errors.Errorf("empty peer id given: %s", id)
if parsedIP.To4() != nil {
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
}
ipType, err := extractIpType(ip)
if err != nil {
return nil, errors.Wrap(err, "unable to determine IP type")
}
switch protocol {
case udp, tcp:
// Example with UDP: /ip4/1.2.3.4/udp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
// Example with TCP: /ip6/1.2.3.4/tcp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
multiaddrStr = fmt.Sprintf("/%s/%s/%s/%d/p2p/%s", ipType, ip, protocol, port, id)
case quic:
// Example: /ip4/1.2.3.4/udp/5678/quic-v1/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
multiaddrStr = fmt.Sprintf("/%s/%s/udp/%d/quic-v1/p2p/%s", ipType, ip, port, id)
default:
return nil, errors.Errorf("unsupported protocol: %s", protocol)
}
return ma.NewMultiaddr(multiaddrStr)
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
}
// Adds a private key to the libp2p option if the option was provided.

View File

@@ -13,7 +13,6 @@ import (
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -89,34 +88,30 @@ func TestIPV6Support(t *testing.T) {
lNode := enode.NewLocalNode(db, key)
mockIPV6 := net.IP{0xff, 0x02, 0xAA, 0, 0x1F, 0, 0x2E, 0, 0, 0x36, 0x45, 0, 0, 0, 0, 0x02}
lNode.Set(enr.IP(mockIPV6))
mas, err := retrieveMultiAddrsFromNode(lNode.Node())
ma, err := convertToSingleMultiAddr(lNode.Node())
if err != nil {
t.Fatal(err)
}
for _, ma := range mas {
ipv6Exists := false
for _, p := range ma.Protocols() {
if p.Name == "ip4" {
t.Error("Got ip4 address instead of ip6")
}
if p.Name == "ip6" {
ipv6Exists = true
}
ipv6Exists := false
for _, p := range ma.Protocols() {
if p.Name == "ip4" {
t.Error("Got ip4 address instead of ip6")
}
if !ipv6Exists {
t.Error("Multiaddress did not have ipv6 protocol")
if p.Name == "ip6" {
ipv6Exists = true
}
}
if !ipv6Exists {
t.Error("Multiaddress did not have ipv6 protocol")
}
}
func TestDefaultMultiplexers(t *testing.T) {
var cfg libp2p.Config
_ = cfg
p2pCfg := &Config{
TCPPort: 2000,
UDPPort: 2000,
TCPPort: 3000,
QUICPort: 3000,
StateNotifier: &mock.MockStateNotifier{},
}
svc := &Service{cfg: p2pCfg}
@@ -132,57 +127,5 @@ func TestDefaultMultiplexers(t *testing.T) {
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[0].ID)
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[1].ID)
}
func TestMultiAddressBuilderWithID(t *testing.T) {
testCases := []struct {
name string
ip net.IP
protocol internetProtocol
port uint
id string
expectedMultiaddrStr string
}{
{
name: "UDP",
ip: net.IPv4(192, 168, 0, 1),
protocol: udp,
port: 5678,
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
expectedMultiaddrStr: "/ip4/192.168.0.1/udp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
},
{
name: "TCP",
ip: net.IPv4(192, 168, 0, 1),
protocol: tcp,
port: 5678,
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
expectedMultiaddrStr: "/ip4/192.168.0.1/tcp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
},
{
name: "QUIC",
ip: net.IPv4(192, 168, 0, 1),
protocol: quic,
port: 5678,
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
expectedMultiaddrStr: "/ip4/192.168.0.1/udp/5678/quic-v1/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
id, err := hex.DecodeString(tt.id)
require.NoError(t, err)
actualMultiaddr, err := multiAddressBuilderWithID(tt.ip, tt.protocol, tt.port, peer.ID(id))
require.NoError(t, err)
actualMultiaddrStr := actualMultiaddr.String()
require.Equal(t, tt.expectedMultiaddrStr, actualMultiaddrStr)
})
}
}

View File

@@ -22,7 +22,7 @@ func TestGossipParameters(t *testing.T) {
pms := pubsubGossipParam()
assert.Equal(t, gossipSubMcacheLen, pms.HistoryLength, "gossipSubMcacheLen")
assert.Equal(t, gossipSubMcacheGossip, pms.HistoryGossip, "gossipSubMcacheGossip")
assert.Equal(t, gossipSubSeenTTL, int(pubsub.TimeCacheDuration.Seconds()), "gossipSubSeenTtl")
assert.Equal(t, gossipSubSeenTTL, int(pubsub.TimeCacheDuration.Milliseconds()/pms.HeartbeatInterval.Milliseconds()), "gossipSubSeenTtl")
}
func TestFanoutParameters(t *testing.T) {

View File

@@ -1,7 +1,7 @@
// Package peers provides information about peers at the Ethereum consensus protocol level.
//
// "Protocol level" is the level above the network level, so this layer never sees or interacts with
// (for example) hosts that are unreachable due to being down, firewalled, etc. Instead, this works
// (for example) hosts that are uncontactable due to being down, firewalled, etc. Instead, this works
// with peers that are contactable but may or may not be of the correct fork version, not currently
// required due to the number of current connections, etc.
//
@@ -26,7 +26,6 @@ import (
"context"
"math"
"sort"
"strings"
"time"
"github.com/ethereum/go-ethereum/p2p/enr"
@@ -60,8 +59,8 @@ const (
)
const (
// CollocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
CollocationLimit = 5
// ColocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
ColocationLimit = 5
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
maxLimitBuffer = 150
@@ -77,13 +76,6 @@ const (
MaxBackOffDuration = 5000
)
type InternetProtocol string
const (
TCP = "tcp"
QUIC = "quic"
)
// Status is the structure holding the peer status information.
type Status struct {
ctx context.Context
@@ -457,19 +449,6 @@ func (p *Status) InboundConnected() []peer.ID {
return peers
}
// InboundConnectedWithProtocol returns the current batch of inbound peers that are connected with a given protocol.
func (p *Status) InboundConnectedWithProtocol(protocol InternetProtocol) []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
peers = append(peers, pid)
}
}
return peers
}
// Outbound returns the current batch of outbound peers.
func (p *Status) Outbound() []peer.ID {
p.store.RLock()
@@ -496,20 +475,7 @@ func (p *Status) OutboundConnected() []peer.ID {
return peers
}
// OutboundConnectedWithProtocol returns the current batch of outbound peers that are connected with a given protocol.
func (p *Status) OutboundConnectedWithProtocol(protocol InternetProtocol) []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
peers := make([]peer.ID, 0)
for pid, peerData := range p.store.Peers() {
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
peers = append(peers, pid)
}
}
return peers
}
// Active returns the peers that are active (connecting or connected).
// Active returns the peers that are connecting or connected.
func (p *Status) Active() []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
@@ -548,7 +514,7 @@ func (p *Status) Disconnected() []peer.ID {
return peers
}
// Inactive returns the peers that are inactive (disconnecting or disconnected).
// Inactive returns the peers that are disconnecting or disconnected.
func (p *Status) Inactive() []peer.ID {
p.store.RLock()
defer p.store.RUnlock()
@@ -582,7 +548,7 @@ func (p *Status) Prune() {
p.store.Lock()
defer p.store.Unlock()
// Default to old method if flag isn't enabled.
// Default to old method if flag isnt enabled.
if !features.Get().EnablePeerScorer {
p.deprecatedPrune()
return
@@ -995,7 +961,7 @@ func (p *Status) isfromBadIP(pid peer.ID) bool {
return true
}
if val, ok := p.ipTracker[ip.String()]; ok {
if val > CollocationLimit {
if val > ColocationLimit {
return true
}
}
@@ -1046,7 +1012,7 @@ func (p *Status) tallyIPTracker() {
}
func sameIP(firstAddr, secondAddr ma.Multiaddr) bool {
// Exit early if we do get nil multi-addresses
// Exit early if we do get nil multiaddresses
if firstAddr == nil || secondAddr == nil {
return false
}

View File

@@ -565,7 +565,7 @@ func TestPeerIPTracker(t *testing.T) {
badIP := "211.227.218.116"
var badPeers []peer.ID
for i := 0; i < peers.CollocationLimit+10; i++ {
for i := 0; i < peers.ColocationLimit+10; i++ {
port := strconv.Itoa(3000 + i)
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
if err != nil {
@@ -1111,87 +1111,6 @@ func TestInbound(t *testing.T) {
assert.Equal(t, inbound.String(), result[0].String())
}
func TestInboundConnected(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 0,
},
},
})
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
createPeer(t, p, addr, network.DirInbound, peers.PeerConnecting)
result := p.InboundConnected()
require.Equal(t, 1, len(result))
assert.Equal(t, inbound.String(), result[0].String())
}
func TestInboundConnectedWithProtocol(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 0,
},
},
})
addrsTCP := []string{
"/ip4/127.0.0.1/tcp/33333",
"/ip4/127.0.0.2/tcp/44444",
}
addrsQUIC := []string{
"/ip4/192.168.1.3/udp/13000/quic-v1",
"/ip4/192.168.1.4/udp/14000/quic-v1",
"/ip4/192.168.1.5/udp/14000/quic-v1",
}
expectedTCP := make(map[string]bool, len(addrsTCP))
for _, addr := range addrsTCP {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
expectedTCP[peer.String()] = true
}
expectedQUIC := make(map[string]bool, len(addrsQUIC))
for _, addr := range addrsQUIC {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
expectedQUIC[peer.String()] = true
}
// TCP
// ---
actualTCP := p.InboundConnectedWithProtocol(peers.TCP)
require.Equal(t, len(expectedTCP), len(actualTCP))
for _, actualPeer := range actualTCP {
_, ok := expectedTCP[actualPeer.String()]
require.Equal(t, true, ok)
}
// QUIC
// ----
actualQUIC := p.InboundConnectedWithProtocol(peers.QUIC)
require.Equal(t, len(expectedQUIC), len(actualQUIC))
for _, actualPeer := range actualQUIC {
_, ok := expectedQUIC[actualPeer.String()]
require.Equal(t, true, ok)
}
}
func TestOutbound(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
@@ -1211,87 +1130,6 @@ func TestOutbound(t *testing.T) {
assert.Equal(t, outbound.String(), result[0].String())
}
func TestOutboundConnected(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 0,
},
},
})
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
require.NoError(t, err)
inbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnecting)
result := p.OutboundConnected()
require.Equal(t, 1, len(result))
assert.Equal(t, inbound.String(), result[0].String())
}
func TestOutboundConnectedWithProtocol(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 0,
},
},
})
addrsTCP := []string{
"/ip4/127.0.0.1/tcp/33333",
"/ip4/127.0.0.2/tcp/44444",
}
addrsQUIC := []string{
"/ip4/192.168.1.3/udp/13000/quic-v1",
"/ip4/192.168.1.4/udp/14000/quic-v1",
"/ip4/192.168.1.5/udp/14000/quic-v1",
}
expectedTCP := make(map[string]bool, len(addrsTCP))
for _, addr := range addrsTCP {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
expectedTCP[peer.String()] = true
}
expectedQUIC := make(map[string]bool, len(addrsQUIC))
for _, addr := range addrsQUIC {
multiaddr, err := ma.NewMultiaddr(addr)
require.NoError(t, err)
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
expectedQUIC[peer.String()] = true
}
// TCP
// ---
actualTCP := p.OutboundConnectedWithProtocol(peers.TCP)
require.Equal(t, len(expectedTCP), len(actualTCP))
for _, actualPeer := range actualTCP {
_, ok := expectedTCP[actualPeer.String()]
require.Equal(t, true, ok)
}
// QUIC
// ----
actualQUIC := p.OutboundConnectedWithProtocol(peers.QUIC)
require.Equal(t, len(expectedQUIC), len(actualQUIC))
for _, actualPeer := range actualQUIC {
_, ok := expectedQUIC[actualPeer.String()]
require.Equal(t, true, ok)
}
}
// addPeer is a helper to add a peer with a given connection state)
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState) peer.ID {
// Set up some peers with different states

View File

@@ -3,7 +3,6 @@ package p2p
import (
"context"
"encoding/hex"
"fmt"
"strings"
"time"
@@ -26,7 +25,7 @@ const (
// gossip parameters
gossipSubMcacheLen = 6 // number of windows to retain full messages in cache for `IWANT` responses
gossipSubMcacheGossip = 3 // number of windows to gossip about
gossipSubSeenTTL = 768 // number of seconds to retain message IDs ( 2 epochs)
gossipSubSeenTTL = 550 // number of heartbeat intervals to retain message IDs
// fanout ttl
gossipSubFanoutTTL = 60000000000 // TTL for fanout maps for topics we are not subscribed to but have published to, in nano seconds
@@ -131,7 +130,7 @@ func (s *Service) peerInspector(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) {
}
}
// pubsubOptions creates a list of options to configure our router with.
// Creates a list of pubsub options to configure out router with.
func (s *Service) pubsubOptions() []pubsub.Option {
psOpts := []pubsub.Option{
pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign),
@@ -148,35 +147,9 @@ func (s *Service) pubsubOptions() []pubsub.Option {
pubsub.WithGossipSubParams(pubsubGossipParam()),
pubsub.WithRawTracer(gossipTracer{host: s.host}),
}
if len(s.cfg.StaticPeers) > 0 {
directPeersAddrInfos, err := parsePeersEnr(s.cfg.StaticPeers)
if err != nil {
log.WithError(err).Error("Could not add direct peer option")
return psOpts
}
psOpts = append(psOpts, pubsub.WithDirectPeers(directPeersAddrInfos))
}
return psOpts
}
// parsePeersEnr takes a list of raw ENRs and converts them into a list of AddrInfos.
func parsePeersEnr(peers []string) ([]peer.AddrInfo, error) {
addrs, err := PeersFromStringAddrs(peers)
if err != nil {
return nil, fmt.Errorf("Cannot convert peers raw ENRs into multiaddresses: %v", err)
}
if len(addrs) == 0 {
return nil, fmt.Errorf("Converting peers raw ENRs into multiaddresses resulted in an empty list")
}
directAddrInfos, err := peer.AddrInfosFromP2pAddrs(addrs...)
if err != nil {
return nil, fmt.Errorf("Cannot convert peers multiaddresses into AddrInfos: %v", err)
}
return directAddrInfos, nil
}
// creates a custom gossipsub parameter set.
func pubsubGossipParam() pubsub.GossipSubParams {
gParams := pubsub.DefaultGossipSubParams()
@@ -192,8 +165,7 @@ func pubsubGossipParam() pubsub.GossipSubParams {
// to configure our message id time-cache rather than instantiating
// it with a router instance.
func setPubSubParameters() {
seenTtl := 2 * time.Second * time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
pubsub.TimeCacheDuration = seenTtl
pubsub.TimeCacheDuration = 550 * gossipSubHeartbeatInterval
}
// convert from libp2p's internal schema to a compatible prysm protobuf format.

View File

@@ -24,7 +24,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/config/features"
"github.com/prysmaticlabs/prysm/v5/config/params"
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
prysmnetwork "github.com/prysmaticlabs/prysm/v5/network"
@@ -125,34 +124,31 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
if err != nil {
return nil, errors.Wrapf(err, "failed to build p2p options")
}
// Sets mplex timeouts
configureMplex()
h, err := libp2p.New(opts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to create p2p host")
log.WithError(err).Error("Failed to create p2p host")
return nil, err
}
s.host = h
// Gossipsub registration is done before we add in any new peers
// due to libp2p's gossipsub implementation not taking into
// account previously added peers when creating the gossipsub
// object.
psOpts := s.pubsubOptions()
// Set the pubsub global parameters that we require.
setPubSubParameters()
// Reinitialize them in the event we are running a custom config.
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
gs, err := pubsub.NewGossipSub(s.ctx, s.host, psOpts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to create p2p pubsub")
log.WithError(err).Error("Failed to start pubsub")
return nil, err
}
s.pubsub = gs
s.peers = peers.NewStatus(ctx, &peers.StatusConfig{
@@ -217,7 +213,7 @@ func (s *Service) Start() {
if len(s.cfg.StaticPeers) > 0 {
addrs, err := PeersFromStringAddrs(s.cfg.StaticPeers)
if err != nil {
log.WithError(err).Error("could not convert ENR to multiaddr")
log.WithError(err).Error("Could not connect to static peer")
}
// Set trusted peers for those that are provided as static addresses.
pids := peerIdsFromMultiAddrs(addrs)
@@ -236,24 +232,11 @@ func (s *Service) Start() {
async.RunEvery(s.ctx, time.Duration(params.BeaconConfig().RespTimeout)*time.Second, s.updateMetrics)
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
async.RunEvery(s.ctx, 1*time.Minute, func() {
inboundQUICCount := len(s.peers.InboundConnectedWithProtocol(peers.QUIC))
inboundTCPCount := len(s.peers.InboundConnectedWithProtocol(peers.TCP))
outboundQUICCount := len(s.peers.OutboundConnectedWithProtocol(peers.QUIC))
outboundTCPCount := len(s.peers.OutboundConnectedWithProtocol(peers.TCP))
total := inboundQUICCount + inboundTCPCount + outboundQUICCount + outboundTCPCount
fields := logrus.Fields{
"inboundTCP": inboundTCPCount,
"outboundTCP": outboundTCPCount,
"total": total,
}
if features.Get().EnableQUIC {
fields["inboundQUIC"] = inboundQUICCount
fields["outboundQUIC"] = outboundQUICCount
}
log.WithFields(fields).Info("Connected peers")
log.WithFields(logrus.Fields{
"inbound": len(s.peers.InboundConnected()),
"outbound": len(s.peers.OutboundConnected()),
"activePeers": len(s.peers.Active()),
}).Info("Peer summary")
})
multiAddrs := s.host.Network().ListenAddresses()
@@ -261,10 +244,9 @@ func (s *Service) Start() {
p2pHostAddress := s.cfg.HostAddress
p2pTCPPort := s.cfg.TCPPort
p2pQUICPort := s.cfg.QUICPort
if p2pHostAddress != "" {
logExternalIPAddr(s.host.ID(), p2pHostAddress, p2pTCPPort, p2pQUICPort)
logExternalIPAddr(s.host.ID(), p2pHostAddress, p2pTCPPort)
verifyConnectivity(p2pHostAddress, p2pTCPPort, "tcp")
}

View File

@@ -102,9 +102,8 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
cs := startup.NewClockSynchronizer()
cfg := &Config{
TCPPort: 2000,
UDPPort: 2000,
TCPPort: 3000,
QUICPort: 3000,
ClockWaiter: cs,
}
s, err := NewService(context.Background(), cfg)
@@ -148,9 +147,8 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
cs := startup.NewClockSynchronizer()
cfg := &Config{
TCPPort: 2000,
UDPPort: 2000,
TCPPort: 3000,
QUICPort: 3000,
StateNotifier: &mock.MockStateNotifier{},
NoDiscovery: true, // <-- no s.dv5Listener is created
ClockWaiter: cs,

View File

@@ -93,11 +93,6 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
if err != nil {
continue
}
if info == nil {
continue
}
wg.Add(1)
go func() {
if err := s.connectWithPeer(ctx, *info); err != nil {

View File

@@ -66,7 +66,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
genesisTime := time.Now()
bootNodeService := &Service{
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000},
cfg: &Config{TCPPort: 2000, UDPPort: 3000},
genesisTime: genesisTime,
genesisValidatorsRoot: genesisValidatorsRoot,
}
@@ -89,9 +89,8 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
service, err := NewService(ctx, &Config{
Discv5BootStrapAddrs: []string{bootNodeENR},
MaxPeers: 30,
UDPPort: uint(2000 + i),
TCPPort: uint(3000 + i),
QUICPort: uint(3000 + i),
TCPPort: uint(2000 + i),
UDPPort: uint(3000 + i),
})
require.NoError(t, err)
@@ -134,9 +133,8 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
cfg := &Config{
Discv5BootStrapAddrs: []string{bootNodeENR},
MaxPeers: 30,
UDPPort: 2010,
TCPPort: 3010,
QUICPort: 3010,
TCPPort: 2010,
UDPPort: 3010,
}
service, err := NewService(ctx, cfg)

View File

@@ -50,7 +50,7 @@ func ensurePeerConnections(ctx context.Context, h host.Host, peers *peers.Status
c := h.Network().ConnsToPeer(p.ID)
if len(c) == 0 {
if err := connectWithTimeout(ctx, h, p); err != nil {
log.WithField("peer", p.ID).WithField("addrs", p.Addrs).WithError(err).Errorf("failed to reconnect to peer")
log.WithField("peer", p.ID).WithField("addrs", p.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
continue
}
}

View File

@@ -37,7 +37,6 @@ go_library(
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
@@ -122,7 +121,6 @@ go_test(
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@com_github_stretchr_testify//mock:go_default_library",
"@org_uber_go_mock//gomock:go_default_library",
],

View File

@@ -21,7 +21,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/v1alpha1/validator"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
@@ -33,7 +32,6 @@ import (
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -44,8 +42,7 @@ const (
)
var (
errNilBlock = errors.New("nil block")
errEquivocatedBlock = errors.New("block is equivocated")
errNilBlock = errors.New("nil block")
)
type handled bool
@@ -1257,16 +1254,6 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
},
}
if err = s.validateBroadcast(ctx, r, genericBlock); err != nil {
if errors.Is(err, errEquivocatedBlock) {
b, err := blocks.NewSignedBeaconBlock(genericBlock)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
if err := s.broadcastSeenBlockSidecars(ctx, b, genericBlock.GetDeneb().Blobs, genericBlock.GetDeneb().KzgProofs); err != nil {
log.WithError(err).Error("Failed to broadcast blob sidecars")
}
}
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
@@ -1396,16 +1383,6 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
consensusBlock, err = denebBlockContents.ToGeneric()
if err == nil {
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
if errors.Is(err, errEquivocatedBlock) {
b, err := blocks.NewSignedBeaconBlock(consensusBlock)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
if err := s.broadcastSeenBlockSidecars(ctx, b, consensusBlock.GetDeneb().Blobs, consensusBlock.GetDeneb().KzgProofs); err != nil {
log.WithError(err).Error("Failed to broadcast blob sidecars")
}
}
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
@@ -1570,7 +1547,7 @@ func (s *Server) validateConsensus(ctx context.Context, blk interfaces.ReadOnlyS
func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error {
if s.ForkchoiceFetcher.HighestReceivedBlockSlot() == blk.Slot() {
return errors.Wrapf(errEquivocatedBlock, "block for slot %d already exists in fork choice", blk.Slot())
return fmt.Errorf("block for slot %d already exists in fork choice", blk.Slot())
}
return nil
}
@@ -2095,37 +2072,3 @@ func (s *Server) GetDepositSnapshot(w http.ResponseWriter, r *http.Request) {
},
)
}
// Broadcast blob sidecars even if the block of the same slot has been imported.
// To ensure safety, we will only broadcast blob sidecars if the header references the same block that was previously seen.
// Otherwise, a proposer could get slashed through a different blob sidecar header reference.
func (s *Server) broadcastSeenBlockSidecars(
ctx context.Context,
b interfaces.SignedBeaconBlock,
blobs [][]byte,
kzgProofs [][]byte) error {
scs, err := validator.BuildBlobSidecars(b, blobs, kzgProofs)
if err != nil {
return err
}
for _, sc := range scs {
r, err := sc.SignedBlockHeader.Header.HashTreeRoot()
if err != nil {
log.WithError(err).Error("Failed to hash block header for blob sidecar")
continue
}
if !s.FinalizationFetcher.InForkchoice(r) {
log.WithField("root", fmt.Sprintf("%#x", r)).Debug("Block header not in forkchoice, skipping blob sidecar broadcast")
continue
}
if err := s.Broadcaster.BroadcastBlob(ctx, sc.Index, sc); err != nil {
log.WithError(err).Error("Failed to broadcast blob sidecar for index ", sc.Index)
}
log.WithFields(logrus.Fields{
"index": sc.Index,
"slot": sc.SignedBlockHeader.Header.Slot,
"kzgCommitment": fmt.Sprintf("%#x", sc.KzgCommitment),
}).Info("Broadcasted blob sidecar for already seen block")
}
return nil
}

View File

@@ -13,8 +13,6 @@ import (
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
mockp2p "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
logTest "github.com/sirupsen/logrus/hooks/test"
"go.uber.org/mock/gomock"
"github.com/gorilla/mux"
@@ -2474,9 +2472,7 @@ func TestValidateEquivocation(t *testing.T) {
require.NoError(t, err)
blk.SetSlot(st.Slot())
err = server.validateEquivocation(blk.Block())
assert.ErrorContains(t, "already exists", err)
require.ErrorIs(t, err, errEquivocatedBlock)
assert.ErrorContains(t, "already exists", server.validateEquivocation(blk.Block()))
})
}
@@ -3634,27 +3630,3 @@ func TestGetDepositSnapshot(t *testing.T) {
assert.Equal(t, finalized, len(resp.Finalized))
})
}
func TestServer_broadcastBlobSidecars(t *testing.T) {
hook := logTest.NewGlobal()
blockToPropose := util.NewBeaconBlockContentsDeneb()
blockToPropose.Blobs = [][]byte{{0x01}, {0x02}, {0x03}}
blockToPropose.KzgProofs = [][]byte{{0x01}, {0x02}, {0x03}}
blockToPropose.Block.Block.Body.BlobKzgCommitments = [][]byte{bytesutil.PadTo([]byte("kc"), 48), bytesutil.PadTo([]byte("kc1"), 48), bytesutil.PadTo([]byte("kc2"), 48)}
d := &eth.GenericSignedBeaconBlock_Deneb{Deneb: blockToPropose}
b := &eth.GenericSignedBeaconBlock{Block: d}
server := &Server{
Broadcaster: &mockp2p.MockBroadcaster{},
FinalizationFetcher: &chainMock.ChainService{NotFinalized: true},
}
blk, err := blocks.NewSignedBeaconBlock(b.Block)
require.NoError(t, err)
require.NoError(t, server.broadcastSeenBlockSidecars(context.Background(), blk, b.GetDeneb().Blobs, b.GetDeneb().KzgProofs))
require.LogsDoNotContain(t, hook, "Broadcasted blob sidecar for already seen block")
server.FinalizationFetcher = &chainMock.ChainService{NotFinalized: false}
require.NoError(t, server.broadcastSeenBlockSidecars(context.Background(), blk, b.GetDeneb().Blobs, b.GetDeneb().KzgProofs))
require.LogsContain(t, hook, "Broadcasted blob sidecar for already seen block")
}

View File

@@ -369,7 +369,16 @@ func decodeIds(w http.ResponseWriter, st state.BeaconState, rawIds []string, ign
func valsFromIds(w http.ResponseWriter, st state.BeaconState, ids []primitives.ValidatorIndex) ([]state.ReadOnlyValidator, bool) {
var vals []state.ReadOnlyValidator
if len(ids) == 0 {
vals = st.ValidatorsReadOnly()
allVals := st.Validators()
vals = make([]state.ReadOnlyValidator, len(allVals))
for i, val := range allVals {
readOnlyVal, err := statenative.NewValidator(val)
if err != nil {
httputil.HandleError(w, "Could not convert validator: "+err.Error(), http.StatusInternalServerError)
return nil, false
}
vals[i] = readOnlyVal
}
} else {
vals = make([]state.ReadOnlyValidator, 0, len(ids))
for _, id := range ids {

View File

@@ -108,7 +108,7 @@ func (*Server) GetVersion(w http.ResponseWriter, r *http.Request) {
// GetHealth returns node health status in http status codes. Useful for load balancers.
func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "node.GetHealth")
_, span := trace.StartSpan(r.Context(), "node.GetHealth")
defer span.End()
rawSyncingStatus, syncingStatus, ok := shared.UintFromQuery(w, r, "syncing_status", false)
@@ -119,14 +119,10 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
return
}
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
}
if s.SyncChecker.Synced() && !optimistic {
if s.SyncChecker.Synced() {
return
}
if s.SyncChecker.Syncing() || optimistic {
if s.SyncChecker.Syncing() || s.SyncChecker.Initialized() {
if rawSyncingStatus != "" {
w.WriteHeader(intSyncingStatus)
} else {
@@ -134,6 +130,5 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
}
return
}
w.WriteHeader(http.StatusServiceUnavailable)
}

View File

@@ -91,10 +91,8 @@ func TestGetVersion(t *testing.T) {
func TestGetHealth(t *testing.T) {
checker := &syncmock.Sync{}
optimisticFetcher := &mock.ChainService{Optimistic: false}
s := &Server{
SyncChecker: checker,
OptimisticModeFetcher: optimisticFetcher,
SyncChecker: checker,
}
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
@@ -103,30 +101,25 @@ func TestGetHealth(t *testing.T) {
s.GetHealth(writer, request)
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
checker.IsSyncing = true
checker.IsSynced = false
checker.IsInitialized = true
request = httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
writer = httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetHealth(writer, request)
assert.Equal(t, http.StatusPartialContent, writer.Code)
request = httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://example.com/eth/v1/node/health?syncing_status=%d", http.StatusPaymentRequired), nil)
writer = httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetHealth(writer, request)
assert.Equal(t, http.StatusPaymentRequired, writer.Code)
checker.IsSyncing = false
checker.IsSynced = true
request = httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
writer = httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetHealth(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
checker.IsSyncing = false
checker.IsSynced = true
optimisticFetcher.Optimistic = true
request = httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/node/health", nil)
writer = httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetHealth(writer, request)
assert.Equal(t, http.StatusPartialContent, writer.Code)
}
func TestGetIdentity(t *testing.T) {

View File

@@ -15,9 +15,7 @@ go_library(
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -37,10 +35,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"handlers_test.go",
"service_test.go",
],
srcs = ["handlers_test.go"],
embed = [":go_default_library"],
deps = [
"//api/server/structs:go_default_library",
@@ -48,8 +43,6 @@ go_test(
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",

View File

@@ -18,7 +18,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
dbutil "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
mockstategen "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen/mock"
@@ -193,7 +192,6 @@ func BlockRewardTestSetup(t *testing.T, forkName string) (state.BeaconState, int
}
func TestBlockRewards(t *testing.T) {
db := dbutil.SetupDB(t)
phase0block, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
t.Run("phase 0", func(t *testing.T) {
@@ -229,10 +227,7 @@ func TestBlockRewards(t *testing.T) {
}},
OptimisticModeFetcher: mockChainService,
FinalizationFetcher: mockChainService,
BlockRewardFetcher: &BlockRewardService{
Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st)),
DB: db,
},
BlockRewardFetcher: &BlockRewardService{Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st))},
}
url := "http://only.the.slot.number.at.the.end.is.important/2"
@@ -265,10 +260,7 @@ func TestBlockRewards(t *testing.T) {
}},
OptimisticModeFetcher: mockChainService,
FinalizationFetcher: mockChainService,
BlockRewardFetcher: &BlockRewardService{
Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st)),
DB: db,
},
BlockRewardFetcher: &BlockRewardService{Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st))},
}
url := "http://only.the.slot.number.at.the.end.is.important/2"
@@ -301,10 +293,7 @@ func TestBlockRewards(t *testing.T) {
}},
OptimisticModeFetcher: mockChainService,
FinalizationFetcher: mockChainService,
BlockRewardFetcher: &BlockRewardService{
Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st)),
DB: db,
},
BlockRewardFetcher: &BlockRewardService{Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st))},
}
url := "http://only.the.slot.number.at.the.end.is.important/2"
@@ -337,10 +326,7 @@ func TestBlockRewards(t *testing.T) {
}},
OptimisticModeFetcher: mockChainService,
FinalizationFetcher: mockChainService,
BlockRewardFetcher: &BlockRewardService{
Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st)),
DB: db,
},
BlockRewardFetcher: &BlockRewardService{Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st))},
}
url := "http://only.the.slot.number.at.the.end.is.important/2"
@@ -729,9 +715,7 @@ func TestSyncCommiteeRewards(t *testing.T) {
}},
OptimisticModeFetcher: mockChainService,
FinalizationFetcher: mockChainService,
BlockRewardFetcher: &BlockRewardService{
Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st)),
DB: dbutil.SetupDB(t)},
BlockRewardFetcher: &BlockRewardService{Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st))},
}
t.Run("ok - filtered vals", func(t *testing.T) {

View File

@@ -8,9 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
coreblocks "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
@@ -28,7 +26,6 @@ type BlockRewardsFetcher interface {
// BlockRewardService implements BlockRewardsFetcher and can be declared to access the underlying functions
type BlockRewardService struct {
Replayer stategen.ReplayerBuilder
DB db.HeadAccessDatabase
}
// GetBlockRewardsData returns the BlockRewards object which is used for the BlockRewardsResponse and ProduceBlockV3.
@@ -127,22 +124,6 @@ func (rs *BlockRewardService) GetStateForRewards(ctx context.Context, blk interf
// We want to run several block processing functions that update the proposer's balance.
// This will allow us to calculate proposer rewards for each operation (atts, slashings etc).
// To do this, we replay the state up to the block's slot, but before processing the block.
// Try getting the state from the next slot cache first.
_, prevSlotRoots, err := rs.DB.BlockRootsBySlot(ctx, slots.PrevSlot(blk.Slot()))
if err != nil {
return nil, &httputil.DefaultJsonError{
Message: "Could not get roots for previous slot: " + err.Error(),
Code: http.StatusInternalServerError,
}
}
for _, r := range prevSlotRoots {
s := transition.NextSlotState(r[:], blk.Slot())
if s != nil {
return s, nil
}
}
st, err := rs.Replayer.ReplayerForSlot(slots.PrevSlot(blk.Slot())).ReplayToSlot(ctx, blk.Slot())
if err != nil {
return nil, &httputil.DefaultJsonError{

View File

@@ -1,46 +0,0 @@
package rewards
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
dbutil "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestGetStateForRewards_NextSlotCacheHit(t *testing.T) {
ctx := context.Background()
db := dbutil.SetupDB(t)
st, err := util.NewBeaconStateDeneb()
require.NoError(t, err)
b := util.HydrateSignedBeaconBlockDeneb(util.NewBeaconBlockDeneb())
parent, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, parent))
r, err := parent.Block().HashTreeRoot()
require.NoError(t, err)
require.NoError(t, transition.UpdateNextSlotCache(ctx, r[:], st))
s := &BlockRewardService{
Replayer: nil, // setting to nil because replayer must not be invoked
DB: db,
}
b = util.HydrateSignedBeaconBlockDeneb(util.NewBeaconBlockDeneb())
sbb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
sbb.SetSlot(parent.Block().Slot() + 1)
result, err := s.GetStateForRewards(ctx, sbb.Block())
require.NoError(t, err)
_, lcs := transition.LastCachedState()
expected, err := lcs.HashTreeRoot(ctx)
require.NoError(t, err)
actual, err := result.HashTreeRoot(ctx)
require.NoError(t, err)
assert.DeepEqual(t, expected, actual)
}

View File

@@ -223,7 +223,7 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
return nil, &core.RpcError{Err: errors.Wrap(err, "failed to retrieve block from db"), Reason: core.Internal}
}
// if block is not in the retention window return 200 w/ empty list
if !p.BlobStorage.WithinRetentionPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(p.GenesisTimeFetcher.CurrentSlot())) {
return make([]*blocks.VerifiedROBlob, 0), nil
}
commitments, err := b.Block().Body().BlobKzgCommitments()

View File

@@ -4,10 +4,10 @@ go_library(
name = "go_default_library",
srcs = [
"aggregator.go",
"duties.go",
"attester.go",
"blocks.go",
"construct_generic_block.go",
"duties.go",
"exit.go",
"log.go",
"proposer.go",
@@ -179,10 +179,10 @@ go_test(
timeout = "moderate",
srcs = [
"aggregator_test.go",
"duties_test.go",
"attester_test.go",
"blocks_test.go",
"construct_generic_block_test.go",
"duties_test.go",
"exit_test.go",
"proposer_altair_test.go",
"proposer_attestations_test.go",
@@ -201,7 +201,6 @@ go_test(
"status_mainnet_test.go",
"status_test.go",
"sync_committee_test.go",
"unblinder_test.go",
"validator_test.go",
],
embed = [":go_default_library"],

View File

@@ -341,7 +341,7 @@ func (vs *Server) handleUnblindedBlock(block interfaces.SignedBeaconBlock, req *
if dbBlockContents == nil {
return nil, nil
}
return BuildBlobSidecars(block, dbBlockContents.Blobs, dbBlockContents.KzgProofs)
return buildBlobSidecars(block, dbBlockContents.Blobs, dbBlockContents.KzgProofs)
}
// broadcastReceiveBlock broadcasts a block and handles its reception.

View File

@@ -27,20 +27,11 @@ import (
"go.opencensus.io/trace"
)
var (
builderValueGweiGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "builder_value_gwei",
Help: "Builder payload value in gwei",
})
localValueGweiGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "local_value_gwei",
Help: "Local payload value in gwei",
})
builderGetPayloadMissCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "builder_get_payload_miss_count",
Help: "The number of get payload misses for validator requests to builder",
})
)
// builderGetPayloadMissCount tracks the number of misses when validator tries to get a payload from builder
var builderGetPayloadMissCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "builder_get_payload_miss_count",
Help: "The number of get payload misses for validator requests to builder",
})
// emptyTransactionsRoot represents the returned value of ssz.TransactionsRoot([][]byte{}) and
// can be used as a constant to avoid recomputing this value in every call.
@@ -92,18 +83,15 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
// Use builder payload if the following in true:
// builder_bid_value * builderBoostFactor(default 100) > local_block_value * (local-block-value-boost + 100)
boost := params.BeaconConfig().LocalBlockValueBoost
if boost > 0 && builderBoostFactor > defaultBuilderBoostFactor {
higherValueBuilder := builderValueGwei*builderBoostFactor > localValueGwei*(100+boost)
if boost > 0 && builderBoostFactor != defaultBuilderBoostFactor {
log.WithFields(logrus.Fields{
"localGweiValue": localValueGwei,
"localBoostPercentage": boost,
"builderGweiValue": builderValueGwei,
"builderBoostFactor": builderBoostFactor,
}).Warn("Proposer: builder boost will be ignored, because local boost is activated and builder boost is above the default")
builderBoostFactor = defaultBuilderBoostFactor
}).Warn("Proposer: both local boost and builder boost are using non default values")
}
higherValueBuilder := builderValueGwei*builderBoostFactor > localValueGwei*(100+boost)
builderValueGweiGauge.Set(float64(builderValueGwei))
localValueGweiGauge.Set(float64(localValueGwei))
// If we can't get the builder value, just use local block.
if higherValueBuilder && withdrawalsMatched { // Builder value is higher and withdrawals match.

View File

@@ -290,7 +290,7 @@ func TestServer_setExecutionData(t *testing.T) {
require.NoError(t, err)
require.Equal(t, uint64(2), e.BlockNumber()) // builder block
})
t.Run("Builder has higher value but forced to local payload with builder boost factor at 0", func(t *testing.T) {
t.Run("Builder builder has higher value but forced to local payload with builder boost factor", func(t *testing.T) {
blk, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockCapella())
require.NoError(t, err)
require.NoError(t, vs.BeaconDB.SaveRegistrationsByValidatorIDs(ctx, []primitives.ValidatorIndex{blk.Block().ProposerIndex()},
@@ -370,23 +370,6 @@ func TestServer_setExecutionData(t *testing.T) {
require.LogsContain(t, hook, "builderGweiValue=1 localBoostPercentage=0 localGweiValue=2")
})
t.Run("Builder configured. Local block has higher value even with higher builder boost factor", func(t *testing.T) {
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
require.NoError(t, err)
vs.ExecutionEngineCaller = &powtesting.EngineClient{PayloadIDBytes: id, ExecutionPayloadCapella: &v1.ExecutionPayloadCapella{BlockNumber: 3}, BlockValue: 2 * 1e9}
b := blk.Block()
localPayload, _, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
require.NoError(t, err)
builderPayload, builderKzgCommitments, err := vs.getBuilderPayloadAndBlobs(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
require.DeepEqual(t, [][]uint8(nil), builderKzgCommitments)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments, 99999))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(3), e.BlockNumber()) // Local block
require.LogsContain(t, hook, "builderGweiValue=1 localBoostPercentage=0 localGweiValue=2")
})
t.Run("Builder configured. Local block and local boost has higher value", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
cfg.LocalBlockValueBoost = 1 // Boost 1%.

View File

@@ -56,8 +56,8 @@ func (c *blobsBundleCache) prune(minSlot primitives.Slot) {
}
}
// BuildBlobSidecars given a block, builds the blob sidecars for the block.
func BuildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
// buildBlobSidecars given a block, builds the blob sidecars for the block.
func buildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
if blk.Version() < version.Deneb {
return nil, nil // No blobs before deneb.
}

View File

@@ -51,7 +51,7 @@ func TestServer_buildBlobSidecars(t *testing.T) {
require.NoError(t, blk.SetBlobKzgCommitments(kzgCommitments))
proof, err := hexutil.Decode("0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a")
require.NoError(t, err)
scs, err := BuildBlobSidecars(blk, [][]byte{
scs, err := buildBlobSidecars(blk, [][]byte{
make([]byte, fieldparams.BlobLength), make([]byte, fieldparams.BlobLength),
}, [][]byte{
proof, proof,

View File

@@ -13,22 +13,15 @@ import (
)
func unblindBlobsSidecars(block interfaces.SignedBeaconBlock, bundle *enginev1.BlobsBundle) ([]*ethpb.BlobSidecar, error) {
if block.Version() < version.Deneb {
if block.Version() < version.Deneb || bundle == nil {
return nil, nil
}
body := block.Block().Body()
blockCommitments, err := body.BlobKzgCommitments()
header, err := block.Header()
if err != nil {
return nil, err
}
if len(blockCommitments) == 0 {
return nil, nil
}
// Do not allow builders to provide no blob bundles for blocks which carry commitments.
if bundle == nil {
return nil, errors.New("no valid bundle provided")
}
header, err := block.Header()
body := block.Block().Body()
blockCommitments, err := body.BlobKzgCommitments()
if err != nil {
return nil, err
}

View File

@@ -1,34 +0,0 @@
package validator
import (
"testing"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
)
func TestUnblinder_UnblindBlobSidecars_InvalidBundle(t *testing.T) {
wBlock, err := consensusblocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockDeneb{
Block: &ethpb.BeaconBlockDeneb{
Body: &ethpb.BeaconBlockBodyDeneb{},
},
Signature: nil,
})
assert.NoError(t, err)
_, err = unblindBlobsSidecars(wBlock, nil)
assert.NoError(t, err)
wBlock, err = consensusblocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockDeneb{
Block: &ethpb.BeaconBlockDeneb{
Body: &ethpb.BeaconBlockBodyDeneb{
BlobKzgCommitments: [][]byte{[]byte("a"), []byte("b")},
},
},
Signature: nil,
})
assert.NoError(t, err)
_, err = unblindBlobsSidecars(wBlock, nil)
assert.ErrorContains(t, "no valid bundle provided", err)
}

View File

@@ -215,7 +215,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BlobStorage: s.cfg.BlobStorage,
}
rewardFetcher := &rewards.BlockRewardService{Replayer: ch, DB: s.cfg.BeaconDB}
rewardFetcher := &rewards.BlockRewardService{Replayer: ch}
coreService := &core.Service{
HeadFetcher: s.cfg.HeadFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,

View File

@@ -19,7 +19,6 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher",
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/prysmctl:__subpackages__",
"//testing/slasher/simulator:__subpackages__",
],
deps = [
@@ -28,7 +27,6 @@ go_library(
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/slasherkv:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/slasher/types:go_default_library",
"//beacon-chain/startup:go_default_library",
@@ -47,7 +45,6 @@ go_library(
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_x_exp//maps:go_default_library",
],
)

View File

@@ -208,8 +208,8 @@ func (m *MinSpanChunksSlice) CheckSlashable(
}
if existingAttWrapper == nil {
// This case should normally not happen. If this happens, it means we previously
// recorded in our min/max DB a distance corresponding to an attestation, but WITHOUT
// This case should normally not happen. If this happen, it means we previously
// recorded in our min/max DB an distance corresponding to an attestaiton, but WITHOUT
// recording the attestation itself. As a consequence, we say there is no surrounding vote,
// but we log an error.
fields := logrus.Fields{
@@ -287,8 +287,8 @@ func (m *MaxSpanChunksSlice) CheckSlashable(
}
if existingAttWrapper == nil {
// This case should normally not happen. If this happens, it means we previously
// recorded in our min/max DB a distance corresponding to an attestation, but WITHOUT
// This case should normally not happen. If this happen, it means we previously
// recorded in our min/max DB an distance corresponding to an attestaiton, but WITHOUT
// recording the attestation itself. As a consequence, we say there is no surrounded vote,
// but we log an error.
fields := logrus.Fields{

View File

@@ -11,7 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"go.opencensus.io/trace"
"golang.org/x/exp/maps"
)
// Takes in a list of indexed attestation wrappers and returns any
@@ -132,7 +131,7 @@ func (s *Service) checkSurroundVotes(
}
// Update the latest updated epoch for all validators involved to the current chunk.
indexes := s.params.ValidatorIndexesInChunk(validatorChunkIndex)
indexes := s.params.validatorIndexesInChunk(validatorChunkIndex)
for _, index := range indexes {
s.latestEpochUpdatedForValidator[index] = currentEpoch
}
@@ -273,20 +272,44 @@ func (s *Service) updatedChunkByChunkIndex(
// minFirstEpochToUpdate is set to the smallest first epoch to update for all validators in the chunk
// corresponding to the `validatorChunkIndex`.
var (
minFirstEpochToUpdate *primitives.Epoch
neededChunkIndexesMap map[uint64]bool
var minFirstEpochToUpdate *primitives.Epoch
err error
)
validatorIndexes := s.params.ValidatorIndexesInChunk(validatorChunkIndex)
neededChunkIndexesMap := map[uint64]bool{}
if neededChunkIndexesMap, err = s.findNeededChunkIndexes(validatorIndexes, currentEpoch, minFirstEpochToUpdate); err != nil {
return nil, errors.Wrap(err, "could not find the needed chunk indexed")
validatorIndexes := s.params.validatorIndexesInChunk(validatorChunkIndex)
for _, validatorIndex := range validatorIndexes {
// Retrieve the first epoch to write for the validator index.
isAnEpochToUpdate, firstEpochToUpdate, err := s.firstEpochToUpdate(validatorIndex, currentEpoch)
if err != nil {
return nil, errors.Wrapf(err, "could not get first epoch to write for validator index %d with current epoch %d", validatorIndex, currentEpoch)
}
if !isAnEpochToUpdate {
// If there is no epoch to write, skip.
continue
}
// If, for this validator index, the chunk corresponding to the first epoch to write
// (and all following epochs until the current epoch) are already flagged as needed,
// skip.
if minFirstEpochToUpdate != nil && *minFirstEpochToUpdate <= firstEpochToUpdate {
continue
}
minFirstEpochToUpdate = &firstEpochToUpdate
// Add new needed chunk indexes to the map.
for i := firstEpochToUpdate; i <= currentEpoch; i++ {
chunkIndex := s.params.chunkIndex(i)
neededChunkIndexesMap[chunkIndex] = true
}
}
// Transform the map of needed chunk indexes to a slice.
neededChunkIndexes := maps.Keys(neededChunkIndexesMap)
// Get the list of needed chunk indexes.
neededChunkIndexes := make([]uint64, 0, len(neededChunkIndexesMap))
for chunkIndex := range neededChunkIndexesMap {
neededChunkIndexes = append(neededChunkIndexes, chunkIndex)
}
// Retrieve needed chunks from the database.
chunkByChunkIndex, err := s.loadChunksFromDisk(ctx, validatorChunkIndex, chunkKind, neededChunkIndexes)
@@ -309,7 +332,7 @@ func (s *Service) updatedChunkByChunkIndex(
epochToUpdate := firstEpochToUpdate
for epochToUpdate <= currentEpoch {
// Get the chunk index for the epoch to write.
// Get the chunk index for the ecpoh to write.
chunkIndex := s.params.chunkIndex(epochToUpdate)
// Get the chunk corresponding to the chunk index from the `chunkByChunkIndex` map.
@@ -340,45 +363,6 @@ func (s *Service) updatedChunkByChunkIndex(
return chunkByChunkIndex, nil
}
// findNeededChunkIndexes returns a map of chunk indexes
// it loops over the validator indexes and finds the first epoch to update for each validator index.
func (s *Service) findNeededChunkIndexes(
validatorIndexes []primitives.ValidatorIndex,
currentEpoch primitives.Epoch,
minFirstEpochToUpdate *primitives.Epoch,
) (map[uint64]bool, error) {
neededChunkIndexesMap := map[uint64]bool{}
for _, validatorIndex := range validatorIndexes {
// Retrieve the first epoch to write for the validator index.
isAnEpochToUpdate, firstEpochToUpdate, err := s.firstEpochToUpdate(validatorIndex, currentEpoch)
if err != nil {
return nil, errors.Wrapf(err, "could not get first epoch to write for validator index %d with current epoch %d", validatorIndex, currentEpoch)
}
if !isAnEpochToUpdate {
// If there is no epoch to write, skip.
continue
}
// If, for this validator index, the chunk corresponding to the first epoch to write
// (and all following epochs until the current epoch) are already flagged as needed,
// skip.
if minFirstEpochToUpdate != nil && *minFirstEpochToUpdate <= firstEpochToUpdate {
continue
}
minFirstEpochToUpdate = &firstEpochToUpdate
// Add new needed chunk indexes to the map.
for i := firstEpochToUpdate; i <= currentEpoch; i++ {
chunkIndex := s.params.chunkIndex(i)
neededChunkIndexesMap[chunkIndex] = true
}
}
return neededChunkIndexesMap, nil
}
// firstEpochToUpdate, given a validator index and the current epoch, returns a boolean indicating
// if there is an epoch to write. If it is the case, it returns the first epoch to write.
func (s *Service) firstEpochToUpdate(validatorIndex primitives.ValidatorIndex, currentEpoch primitives.Epoch) (bool, primitives.Epoch, error) {

View File

@@ -1059,7 +1059,7 @@ func Test_updatedChunkByChunkIndex(t *testing.T) {
// Initialize the slasher database.
slasherDB := dbtest.SetupSlasherDB(t)
// Initialize the slasher service.
// Intialize the slasher service.
service := &Service{
params: &Parameters{
chunkSize: tt.chunkSize,
@@ -1502,7 +1502,7 @@ func runAttestationsBenchmark(b *testing.B, s *Service, numAtts, numValidators u
func Benchmark_checkSurroundVotes(b *testing.B) {
const (
// Approximately the number of Holesky active validators on 2024-02-16
// Approximatively the number of Holesky active validators on 2024-02-16
// This number is both a multiple of 32 (the number of slots per epoch) and 256 (the number of validators per chunk)
validatorsCount = 1_638_400
slotsPerEpoch = 32
@@ -1526,7 +1526,7 @@ func Benchmark_checkSurroundVotes(b *testing.B) {
// So for 1_638_400 validators with 32 slots per epoch, we would have 48_000 attestation wrappers per slot.
// With 256 validators per chunk, we would have only 188 modified chunks.
//
// In this benchmark, we use the worst case scenario where attesting validators are evenly split across all validators chunks.
// In this benchmark, we use the worst case scenario where attestating validators are evenly splitted across all validators chunks.
// We also suppose that only one chunk per validator chunk index is modified.
// For one given validator index, multiple chunk indexes could be modified.
//

View File

@@ -135,7 +135,7 @@ With 1_048_576 validators, we need 4096 * 2MB = 8GB
Storing both MIN and MAX spans for 1_048_576 validators takes 16GB.
Each chunk is stored snappy-compressed in the database.
If all validators attest ideally, a MIN SPAN chunk will contain only `2`s, and MAX SPAN chunk will contain only `0`s.
If all validators attest ideally, a MIN SPAN chunk will contain only `2`s, and and MAX SPAN chunk will contain only `0`s.
This will compress very well, and will let us store a lot of data in a small amount of space.
*/

View File

@@ -2,11 +2,8 @@ package slasher
import (
"bytes"
"context"
"fmt"
"strconv"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/slasherkv"
slashertypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -79,7 +76,7 @@ func (s *Service) filterAttestations(
continue
}
// If an attestation's target epoch is in the future, we defer processing for later.
// If an attestations's target epoch is in the future, we defer processing for later.
if attWrapper.IndexedAttestation.Data.Target.Epoch > currentEpoch {
validInFuture = append(validInFuture, attWrapper)
continue
@@ -162,93 +159,3 @@ func isDoubleProposal(incomingSigningRoot, existingSigningRoot [32]byte) bool {
}
return incomingSigningRoot != existingSigningRoot
}
type GetChunkFromDatabaseFilters struct {
ChunkKind slashertypes.ChunkKind
ValidatorIndex primitives.ValidatorIndex
SourceEpoch primitives.Epoch
IsDisplayAllValidatorsInChunk bool
IsDisplayAllEpochsInChunk bool
}
// GetChunkFromDatabase Utility function aiming at retrieving a chunk from the
// database.
func GetChunkFromDatabase(
ctx context.Context,
dbPath string,
filters GetChunkFromDatabaseFilters,
params *Parameters,
) (lastEpochForValidatorIndex primitives.Epoch, chunkIndex, validatorChunkIndex uint64, chunk Chunker, err error) {
// init store
d, err := slasherkv.NewKVStore(ctx, dbPath)
if err != nil {
return lastEpochForValidatorIndex, chunkIndex, validatorChunkIndex, chunk, fmt.Errorf("could not open database at path %s: %w", dbPath, err)
}
defer closeDB(d)
// init service
s := Service{
params: params,
serviceCfg: &ServiceConfig{
Database: d,
},
}
// variables
validatorIndex := filters.ValidatorIndex
sourceEpoch := filters.SourceEpoch
chunkKind := filters.ChunkKind
validatorChunkIndex = s.params.validatorChunkIndex(validatorIndex)
chunkIndex = s.params.chunkIndex(sourceEpoch)
// before getting the chunk, we need to verify if the requested epoch is in database
lastEpochForValidator, err := s.serviceCfg.Database.LastEpochWrittenForValidators(ctx, []primitives.ValidatorIndex{validatorIndex})
if err != nil {
return lastEpochForValidatorIndex,
chunkIndex,
validatorChunkIndex,
chunk,
fmt.Errorf("could not get last epoch written for validator %d: %w", validatorIndex, err)
}
if len(lastEpochForValidator) == 0 {
return lastEpochForValidatorIndex,
chunkIndex,
validatorChunkIndex,
chunk,
fmt.Errorf("could not get information at epoch %d for validator %d: there's no record found in slasher database",
sourceEpoch, validatorIndex,
)
}
lastEpochForValidatorIndex = lastEpochForValidator[0].Epoch
// if the epoch requested is within the range, we can proceed to get the chunk, otherwise return error
atBestSmallestEpoch := lastEpochForValidatorIndex.Sub(uint64(params.historyLength))
if sourceEpoch < atBestSmallestEpoch || sourceEpoch > lastEpochForValidatorIndex {
return lastEpochForValidatorIndex,
chunkIndex,
validatorChunkIndex,
chunk,
fmt.Errorf("requested epoch %d is outside the slasher history length %d, data can be provided within the epoch range [%d:%d] for validator %d",
sourceEpoch, params.historyLength, atBestSmallestEpoch, lastEpochForValidatorIndex, validatorIndex,
)
}
// fetch chunk from DB
chunk, err = s.getChunkFromDatabase(ctx, chunkKind, validatorChunkIndex, chunkIndex)
if err != nil {
return lastEpochForValidatorIndex,
chunkIndex,
validatorChunkIndex,
chunk,
fmt.Errorf("could not get chunk at index %d: %w", chunkIndex, err)
}
return lastEpochForValidatorIndex, chunkIndex, validatorChunkIndex, chunk, nil
}
func closeDB(d *slasherkv.Store) {
if err := d.Close(); err != nil {
log.WithError(err).Error("could not close database")
}
}

View File

@@ -16,21 +16,6 @@ type Parameters struct {
historyLength primitives.Epoch // H - defines how many epochs we keep of min or max spans.
}
// ChunkSize returns the chunk size.
func (p *Parameters) ChunkSize() uint64 {
return p.chunkSize
}
// ValidatorChunkSize returns the validator chunk size.
func (p *Parameters) ValidatorChunkSize() uint64 {
return p.validatorChunkSize
}
// HistoryLength returns the history length.
func (p *Parameters) HistoryLength() primitives.Epoch {
return p.historyLength
}
// DefaultParams defines default values for slasher's important parameters, defined
// based on optimization analysis for best and worst case scenarios for
// slasher's performance.
@@ -47,15 +32,7 @@ func DefaultParams() *Parameters {
}
}
func NewParams(chunkSize, validatorChunkSize uint64, historyLength primitives.Epoch) *Parameters {
return &Parameters{
chunkSize: chunkSize,
validatorChunkSize: validatorChunkSize,
historyLength: historyLength,
}
}
// ChunkIndex Validator min and max spans are split into chunks of length C = chunkSize.
// Validator min and max spans are split into chunks of length C = chunkSize.
// That is, if we are keeping N epochs worth of attesting history, finding what
// chunk a certain epoch, e, falls into can be computed as (e % N) / C. For example,
// if we are keeping 6 epochs worth of data, and we have chunks of size 2, then epoch
@@ -162,9 +139,9 @@ func (p *Parameters) flatSliceID(validatorChunkIndex, chunkIndex uint64) []byte
return ssz.MarshalUint64(make([]byte, 0), uint64(width.Mul(validatorChunkIndex).Add(chunkIndex)))
}
// ValidatorIndexesInChunk Given a validator chunk index, we determine all the validators
// Given a validator chunk index, we determine all of the validator
// indices that will belong in that chunk.
func (p *Parameters) ValidatorIndexesInChunk(validatorChunkIndex uint64) []primitives.ValidatorIndex {
func (p *Parameters) validatorIndexesInChunk(validatorChunkIndex uint64) []primitives.ValidatorIndex {
validatorIndices := make([]primitives.ValidatorIndex, 0)
low := validatorChunkIndex * p.validatorChunkSize
high := (validatorChunkIndex + 1) * p.validatorChunkSize

View File

@@ -468,7 +468,7 @@ func TestParams_validatorIndicesInChunk(t *testing.T) {
c := &Parameters{
validatorChunkSize: tt.fields.validatorChunkSize,
}
if got := c.ValidatorIndexesInChunk(tt.validatorChunkIdx); !reflect.DeepEqual(got, tt.want) {
if got := c.validatorIndexesInChunk(tt.validatorChunkIdx); !reflect.DeepEqual(got, tt.want) {
t.Errorf("validatorIndicesInChunk() = %v, want %v", got, tt.want)
}
})

View File

@@ -161,7 +161,7 @@ func (s *Service) Stop() error {
ctx, innerCancel := context.WithTimeout(context.Background(), shutdownTimeout)
defer innerCancel()
log.Info("Flushing last epoch written for each validator to disk, please wait")
if err := s.serviceCfg.Database.SaveLastEpochWrittenForValidators(
if err := s.serviceCfg.Database.SaveLastEpochsWrittenForValidators(
ctx, s.latestEpochUpdatedForValidator,
); err != nil {
log.Error(err)

View File

@@ -4,10 +4,7 @@ go_library(
name = "go_default_library",
srcs = ["types.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types",
visibility = [
"//beacon-chain:__subpackages__",
"//cmd/prysmctl:__subpackages__",
],
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//consensus-types/primitives:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -14,18 +14,6 @@ const (
MaxSpan
)
// String returns the string representation of the chunk kind.
func (c ChunkKind) String() string {
switch c {
case MinSpan:
return "minspan"
case MaxSpan:
return "maxspan"
default:
return "unknown"
}
}
// IndexedAttestationWrapper contains an indexed attestation with its
// data root to reduce duplicated computation.
type IndexedAttestationWrapper struct {

View File

@@ -118,7 +118,6 @@ type ReadOnlyValidator interface {
// ReadOnlyValidators defines a struct which only has read access to validators methods.
type ReadOnlyValidators interface {
Validators() []*ethpb.Validator
ValidatorsReadOnly() []ReadOnlyValidator
ValidatorAtIndex(idx primitives.ValidatorIndex) (*ethpb.Validator, error)
ValidatorAtIndexReadOnly(idx primitives.ValidatorIndex) (ReadOnlyValidator, error)
ValidatorIndexByPubkey(key [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool)

View File

@@ -21,14 +21,6 @@ func (b *BeaconState) Validators() []*ethpb.Validator {
return b.validatorsVal()
}
// ValidatorsReadOnly participating in consensus on the beacon chain.
func (b *BeaconState) ValidatorsReadOnly() []state.ReadOnlyValidator {
b.lock.RLock()
defer b.lock.RUnlock()
return b.validatorsReadOnlyVal()
}
func (b *BeaconState) validatorsVal() []*ethpb.Validator {
var v []*ethpb.Validator
if features.Get().EnableExperimentalState {
@@ -54,35 +46,6 @@ func (b *BeaconState) validatorsVal() []*ethpb.Validator {
return res
}
func (b *BeaconState) validatorsReadOnlyVal() []state.ReadOnlyValidator {
var v []*ethpb.Validator
if features.Get().EnableExperimentalState {
if b.validatorsMultiValue == nil {
return nil
}
v = b.validatorsMultiValue.Value(b)
} else {
if b.validators == nil {
return nil
}
v = b.validators
}
res := make([]state.ReadOnlyValidator, len(v))
var err error
for i := 0; i < len(res); i++ {
val := v[i]
if val == nil {
continue
}
res[i], err = NewValidator(val)
if err != nil {
continue
}
}
return res
}
// references of validators participating in consensus on the beacon chain.
// This assumes that a lock is already held on BeaconState. This does not
// copy fully and instead just copies the reference.

View File

@@ -1,7 +1,6 @@
package backfill
import (
"context"
"fmt"
"sort"
"time"
@@ -56,8 +55,6 @@ const (
batchEndSequence
)
var retryDelay = time.Second
type batchId string
type batch struct {
@@ -65,7 +62,6 @@ type batch struct {
scheduled time.Time
seq int // sequence identifier, ie how many times has the sequence() method served this batch
retries int
retryAfter time.Time
begin primitives.Slot
end primitives.Slot // half-open interval, [begin, end), ie >= start, < end.
results verifiedROBlocks
@@ -78,7 +74,7 @@ type batch struct {
}
func (b batch) logFields() logrus.Fields {
f := map[string]interface{}{
return map[string]interface{}{
"batchId": b.id(),
"state": b.state.String(),
"scheduled": b.scheduled.String(),
@@ -90,10 +86,6 @@ func (b batch) logFields() logrus.Fields {
"blockPid": b.blockPid,
"blobPid": b.blobPid,
}
if b.retries > 0 {
f["retryAfter"] = b.retryAfter.String()
}
return f
}
func (b batch) replaces(r batch) bool {
@@ -161,8 +153,7 @@ func (b batch) withState(s batchState) batch {
switch b.state {
case batchErrRetryable:
b.retries += 1
b.retryAfter = time.Now().Add(retryDelay)
log.WithFields(b.logFields()).Info("Sequencing batch for retry after delay")
log.WithFields(b.logFields()).Info("Sequencing batch for retry")
case batchInit, batchNil:
b.firstScheduled = b.scheduled
}
@@ -199,32 +190,8 @@ func (b batch) availabilityStore() das.AvailabilityStore {
return b.bs.store
}
var batchBlockUntil = func(ctx context.Context, untilRetry time.Duration, b batch) error {
log.WithFields(b.logFields()).WithField("untilRetry", untilRetry.String()).
Debug("Sleeping for retry backoff delay")
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(untilRetry):
return nil
}
}
func (b batch) waitUntilReady(ctx context.Context) error {
// Wait to retry a failed batch to avoid hammering peers
// if we've hit a state where batches will consistently fail.
// Avoids spamming requests and logs.
if b.retries > 0 {
untilRetry := time.Until(b.retryAfter)
if untilRetry > time.Millisecond {
return batchBlockUntil(ctx, untilRetry, b)
}
}
return nil
}
func sortBatchDesc(bb []batch) {
sort.Slice(bb, func(i, j int) bool {
return bb[i].end > bb[j].end
return bb[j].end < bb[i].end
})
}

View File

@@ -1,11 +1,8 @@
package backfill
import (
"context"
"testing"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
@@ -22,22 +19,3 @@ func TestSortBatchDesc(t *testing.T) {
require.Equal(t, orderOut[i], batches[i].end)
}
}
func TestWaitUntilReady(t *testing.T) {
b := batch{}.withState(batchErrRetryable)
require.Equal(t, time.Time{}, b.retryAfter)
var got time.Duration
wur := batchBlockUntil
var errDerp = errors.New("derp")
batchBlockUntil = func(_ context.Context, ur time.Duration, _ batch) error {
got = ur
return errDerp
}
// retries counter and timestamp are set when we mark the batch for sequencing, if it is in the retry state
b = b.withState(batchSequenced)
require.ErrorIs(t, b.waitUntilReady(context.Background()), errDerp)
require.Equal(t, true, retryDelay-time.Until(b.retryAfter) < time.Millisecond)
require.Equal(t, true, got < retryDelay && got > retryDelay-time.Millisecond)
require.Equal(t, 1, b.retries)
batchBlockUntil = wur
}

View File

@@ -143,11 +143,6 @@ func (p *p2pBatchWorkerPool) batchRouter(pa PeerAssigner) {
return
}
for _, pid := range assigned {
if err := todo[0].waitUntilReady(p.ctx); err != nil {
log.WithError(p.ctx.Err()).Info("p2pBatchWorkerPool context canceled, shutting down")
p.shutdown(p.ctx.Err())
return
}
busy[pid] = true
todo[0].busy = pid
p.toWorkers <- todo[0].withPeer(pid)

View File

@@ -12,12 +12,14 @@ go_library(
"log.go",
"round_robin.go",
"service.go",
"verification.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/initial-sync",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//async/abool:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -33,11 +35,13 @@ go_library(
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/leaky-bucket:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime:go_default_library",
@@ -82,10 +86,10 @@ go_test(
"//beacon-chain/p2p/types:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/sync:go_default_library",
"//beacon-chain/sync/verify:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",

View File

@@ -11,7 +11,6 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
p2pTypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
@@ -19,6 +18,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/verify"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
blocks2 "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
@@ -27,7 +27,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
"github.com/prysmaticlabs/prysm/v5/math"
p2ppb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -77,7 +76,6 @@ type blocksFetcherConfig struct {
db db.ReadOnlyDatabase
peerFilterCapacityWeight float64
mode syncMode
bs filesystem.BlobStorageSummarizer
}
// blocksFetcher is a service to fetch chain data from peers.
@@ -93,7 +91,6 @@ type blocksFetcher struct {
ctxMap prysmsync.ContextByteVersions
p2p p2p.P2P
db db.ReadOnlyDatabase
bs filesystem.BlobStorageSummarizer
blocksPerPeriod uint64
rateLimiter *leakybucket.Collector
peerLocks map[peer.ID]*peerLock
@@ -152,7 +149,6 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
ctxMap: cfg.ctxMap,
p2p: cfg.p2p,
db: cfg.db,
bs: cfg.bs,
blocksPerPeriod: uint64(blocksPerPeriod),
rateLimiter: rateLimiter,
peerLocks: make(map[peer.ID]*peerLock),
@@ -240,7 +236,7 @@ func (f *blocksFetcher) loop() {
// Main loop.
for {
// Make sure there are available peers before processing requests.
// Make sure there is are available peers before processing requests.
if _, err := f.waitForMinimumPeers(f.ctx); err != nil {
log.Error(err)
}
@@ -316,7 +312,7 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
response.bwb, response.pid, response.err = f.fetchBlocksFromPeer(ctx, start, count, peers)
if response.err == nil {
bwb, err := f.fetchBlobsFromPeer(ctx, response.bwb, response.pid, peers)
bwb, err := f.fetchBlobsFromPeer(ctx, response.bwb, response.pid)
if err != nil {
response.err = err
}
@@ -340,11 +336,6 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
Count: count,
Step: 1,
}
bestPeers := f.hasSufficientBandwidth(peers, req.Count)
// We append the best peers to the front so that higher capacity
// peers are dialed first.
peers = append(bestPeers, peers...)
peers = dedupPeers(peers)
for i := 0; i < len(peers); i++ {
p := peers[i]
blocks, err := f.requestBlocks(ctx, req, p)
@@ -376,17 +367,22 @@ func sortedBlockWithVerifiedBlobSlice(blocks []interfaces.ReadOnlySignedBeaconBl
return rb, nil
}
type commitmentCount struct {
slot primitives.Slot
root [32]byte
count int
func blobRequest(bwb []blocks2.BlockWithROBlobs, blobWindowStart primitives.Slot) *p2ppb.BlobSidecarsByRangeRequest {
if len(bwb) == 0 {
return nil
}
lowest := lowestSlotNeedsBlob(blobWindowStart, bwb)
if lowest == nil {
return nil
}
highest := bwb[len(bwb)-1].Block.Block().Slot()
return &p2ppb.BlobSidecarsByRangeRequest{
StartSlot: *lowest,
Count: uint64(highest.SubSlot(*lowest)) + 1,
}
}
type commitmentCountList []commitmentCount
// countCommitments makes a list of all blocks that have commitments that need to be satisfied.
// This gives us a representation to finish building the request that is lightweight and readable for testing.
func countCommitments(bwb []blocks2.BlockWithROBlobs, retentionStart primitives.Slot) commitmentCountList {
func lowestSlotNeedsBlob(retentionStart primitives.Slot, bwb []blocks2.BlockWithROBlobs) *primitives.Slot {
if len(bwb) == 0 {
return nil
}
@@ -396,13 +392,8 @@ func countCommitments(bwb []blocks2.BlockWithROBlobs, retentionStart primitives.
if bwb[len(bwb)-1].Block.Block().Slot() < retentionStart {
return nil
}
fc := make([]commitmentCount, 0, len(bwb))
for i := range bwb {
b := bwb[i]
for _, b := range bwb {
slot := b.Block.Block().Slot()
if b.Block.Version() < version.Deneb {
continue
}
if slot < retentionStart {
continue
}
@@ -410,116 +401,67 @@ func countCommitments(bwb []blocks2.BlockWithROBlobs, retentionStart primitives.
if err != nil || len(commits) == 0 {
continue
}
fc = append(fc, commitmentCount{slot: slot, root: b.Block.Root(), count: len(commits)})
}
return fc
}
// func slotRangeForCommitmentCounts(cc []commitmentCount, bs filesystem.BlobStorageSummarizer) *blobRange {
func (cc commitmentCountList) blobRange(bs filesystem.BlobStorageSummarizer) *blobRange {
if len(cc) == 0 {
return nil
}
// If we don't have a blob summarizer, can't check local blobs, request blobs over complete range.
if bs == nil {
return &blobRange{low: cc[0].slot, high: cc[len(cc)-1].slot}
}
for i := range cc {
hci := cc[i]
// This list is always ordered by increasing slot, per req/resp validation rules.
// Skip through slots until we find one with missing blobs.
if bs.Summary(hci.root).AllAvailable(hci.count) {
continue
}
// The slow of the first missing blob is the lower bound.
// If we don't find an upper bound, we'll have a 1 slot request (same low/high).
needed := &blobRange{low: hci.slot, high: hci.slot}
// Iterate backward through the list to find the highest missing slot above the lower bound.
// Return the complete range as soon as we find it; if lower bound is already the last element,
// or if we never find an upper bound, we'll fall through to the bounds being equal after this loop.
for z := len(cc) - 1; z > i; z-- {
hcz := cc[z]
if bs.Summary(hcz.root).AllAvailable(hcz.count) {
continue
}
needed.high = hcz.slot
return needed
}
return needed
return &slot
}
return nil
}
type blobRange struct {
low primitives.Slot
high primitives.Slot
}
func sortBlobs(blobs []blocks.ROBlob) []blocks.ROBlob {
sort.Slice(blobs, func(i, j int) bool {
if blobs[i].Slot() == blobs[j].Slot() {
return blobs[i].Index < blobs[j].Index
}
return blobs[i].Slot() < blobs[j].Slot()
})
func (r *blobRange) Request() *p2ppb.BlobSidecarsByRangeRequest {
if r == nil {
return nil
}
return &p2ppb.BlobSidecarsByRangeRequest{
StartSlot: r.low,
Count: uint64(r.high.SubSlot(r.low)) + 1,
}
return blobs
}
var errBlobVerification = errors.New("peer unable to serve aligned BlobSidecarsByRange and BeaconBlockSidecarsByRange responses")
var errMissingBlobsForBlockCommitments = errors.Wrap(errBlobVerification, "blobs unavailable for processing block with kzg commitments")
func verifyAndPopulateBlobs(bwb []blocks2.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) ([]blocks2.BlockWithROBlobs, error) {
blobsByRoot := make(map[[32]byte][]blocks.ROBlob)
for i := range blobs {
if blobs[i].Slot() < req.StartSlot {
func verifyAndPopulateBlobs(bwb []blocks2.BlockWithROBlobs, blobs []blocks.ROBlob, blobWindowStart primitives.Slot) ([]blocks2.BlockWithROBlobs, error) {
// Assumes bwb has already been sorted by sortedBlockWithVerifiedBlobSlice.
blobs = sortBlobs(blobs)
blobi := 0
// Loop over all blocks, and each time a commitment is observed, advance the index into the blob slice.
// The assumption is that the blob slice contains a value for every commitment in the blocks it is based on,
// correctly ordered by slot and blob index.
for i, bb := range bwb {
block := bb.Block.Block()
if block.Slot() < blobWindowStart {
continue
}
br := blobs[i].BlockRoot()
blobsByRoot[br] = append(blobsByRoot[br], blobs[i])
}
for i := range bwb {
bwi, err := populateBlock(bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
commits, err := block.Body().BlobKzgCommitments()
if err != nil {
if errors.Is(err, errDidntPopulate) {
if errors.Is(err, consensus_types.ErrUnsupportedField) {
log.
WithField("blockSlot", block.Slot()).
WithField("retentionStart", blobWindowStart).
Warn("block with slot within blob retention period has version which does not support commitments")
continue
}
return bwb, err
return nil, err
}
bwb[i] = bwi
bb.Blobs = make([]blocks.ROBlob, len(commits))
for ci := range commits {
// There are more expected commitments in this block, but we've run out of blobs from the response
// (out-of-bound error guard).
if blobi == len(blobs) {
return nil, missingCommitError(bb.Block.Root(), bb.Block.Block().Slot(), commits[ci:])
}
bl := blobs[blobi]
if err := verify.BlobAlignsWithBlock(bl, bb.Block); err != nil {
return nil, err
}
bb.Blobs[ci] = bl
blobi += 1
}
bwb[i] = bb
}
return bwb, nil
}
var errDidntPopulate = errors.New("skipping population of block")
func populateBlock(bw blocks2.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) (blocks2.BlockWithROBlobs, error) {
blk := bw.Block
if blk.Version() < version.Deneb || blk.Block().Slot() < req.StartSlot {
return bw, errDidntPopulate
}
commits, err := blk.Block().Body().BlobKzgCommitments()
if err != nil {
return bw, errDidntPopulate
}
if len(commits) == 0 {
return bw, errDidntPopulate
}
// Drop blobs on the floor if we already have them.
if bss != nil && bss.Summary(blk.Root()).AllAvailable(len(commits)) {
return bw, errDidntPopulate
}
if len(commits) != len(blobs) {
return bw, missingCommitError(blk.Root(), blk.Block().Slot(), commits)
}
for ci := range commits {
if err := verify.BlobAlignsWithBlock(blobs[ci], blk); err != nil {
return bw, err
}
}
bw.Blobs = blobs
return bw, nil
}
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
missStr := make([]string, 0, len(missing))
for k := range missing {
@@ -530,7 +472,7 @@ func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) e
}
// fetchBlobsFromPeer fetches blocks from a single randomly selected peer.
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.BlockWithROBlobs, pid peer.ID, peers []peer.ID) ([]blocks2.BlockWithROBlobs, error) {
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.BlockWithROBlobs, pid peer.ID) ([]blocks2.BlockWithROBlobs, error) {
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlobsFromPeer")
defer span.End()
if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch {
@@ -541,34 +483,17 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.Bl
return nil, err
}
// Construct request message based on observed interval of blocks in need of blobs.
req := countCommitments(bwb, blobWindowStart).blobRange(f.bs).Request()
req := blobRequest(bwb, blobWindowStart)
if req == nil {
return bwb, nil
}
peers = f.filterPeers(ctx, peers, peersPercentagePerRequest)
// We dial the initial peer first to ensure that we get the desired set of blobs.
wantedPeers := append([]peer.ID{pid}, peers...)
bestPeers := f.hasSufficientBandwidth(wantedPeers, req.Count)
// We append the best peers to the front so that higher capacity
// peers are dialed first. If all of them fail, we fallback to the
// initial peer we wanted to request blobs from.
peers = append(bestPeers, pid)
for i := 0; i < len(peers); i++ {
p := peers[i]
blobs, err := f.requestBlobs(ctx, req, p)
if err != nil {
log.WithField("peer", p).WithError(err).Debug("Could not request blobs by range from peer")
continue
}
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
robs, err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs)
if err != nil {
log.WithField("peer", p).WithError(err).Debug("Invalid BeaconBlobsByRange response")
continue
}
return robs, err
// Request blobs from the same peer that gave us the blob batch.
blobs, err := f.requestBlobs(ctx, req, pid)
if err != nil {
return nil, errors.Wrap(err, "could not request blobs by range")
}
return nil, errNoPeersAvailable
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(pid)
return verifyAndPopulateBlobs(bwb, blobs, blobWindowStart)
}
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
@@ -681,18 +606,6 @@ func (f *blocksFetcher) waitForBandwidth(pid peer.ID, count uint64) error {
return nil
}
func (f *blocksFetcher) hasSufficientBandwidth(peers []peer.ID, count uint64) []peer.ID {
filteredPeers := []peer.ID{}
for _, p := range peers {
if uint64(f.rateLimiter.Remaining(p.String())) < count {
continue
}
copiedP := p
filteredPeers = append(filteredPeers, copiedP)
}
return filteredPeers
}
// Determine how long it will take for us to have the required number of blocks allowed by our rate limiter.
// We do this by calculating the duration till the rate limiter can request these blocks without exceeding
// the provided bandwidth limits per peer.
@@ -713,18 +626,3 @@ func timeToWait(wanted, rem, capacity int64, timeTillEmpty time.Duration) time.D
expectedTime := int64(timeTillEmpty) * blocksNeeded / currentNumBlks
return time.Duration(expectedTime)
}
// deduplicates the provided peer list.
func dedupPeers(peers []peer.ID) []peer.ID {
newPeerList := make([]peer.ID, 0, len(peers))
peerExists := make(map[peer.ID]bool)
for i := range peers {
if peerExists[peers[i]] {
continue
}
newPeerList = append(newPeerList, peers[i])
peerExists[peers[i]] = true
}
return newPeerList
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"math"
"math/rand"
"sort"
"sync"
"testing"
@@ -11,16 +12,14 @@ import (
libp2pcore "github.com/libp2p/go-libp2p/core"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
dbtest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
p2pm "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
p2pt "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
beaconsync "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/verify"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
@@ -960,7 +959,28 @@ func TestTimeToWait(t *testing.T) {
}
}
func TestBlobRangeForBlocks(t *testing.T) {
func TestSortBlobs(t *testing.T) {
_, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
shuffled := make([]blocks.ROBlob, len(blobs))
for i := range blobs {
shuffled[i] = blobs[i]
}
rand.Shuffle(len(shuffled), func(i, j int) {
shuffled[i], shuffled[j] = shuffled[j], shuffled[i]
})
sorted := sortBlobs(shuffled)
require.Equal(t, len(sorted), len(shuffled))
for i := range blobs {
expect := blobs[i]
actual := sorted[i]
require.Equal(t, expect.Slot(), actual.Slot())
require.Equal(t, expect.Index, actual.Index)
require.Equal(t, bytesutil.ToBytes48(expect.KzgCommitment), bytesutil.ToBytes48(actual.KzgCommitment))
require.Equal(t, expect.BlockRoot(), actual.BlockRoot())
}
}
func TestLowestSlotNeedsBlob(t *testing.T) {
blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
for i := range blks {
@@ -969,12 +989,12 @@ func TestBlobRangeForBlocks(t *testing.T) {
retentionStart := primitives.Slot(5)
bwb, err := sortedBlockWithVerifiedBlobSlice(sbbs)
require.NoError(t, err)
bounds := countCommitments(bwb, retentionStart).blobRange(nil)
require.Equal(t, retentionStart, bounds.low)
lowest := lowestSlotNeedsBlob(retentionStart, bwb)
require.Equal(t, retentionStart, *lowest)
higher := primitives.Slot(len(blks) + 1)
bounds = countCommitments(bwb, higher).blobRange(nil)
var nilBounds *blobRange
require.Equal(t, nilBounds, bounds)
lowest = lowestSlotNeedsBlob(higher, bwb)
var nilSlot *primitives.Slot
require.Equal(t, nilSlot, lowest)
blks, _ = util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
sbbs = make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
@@ -987,14 +1007,14 @@ func TestBlobRangeForBlocks(t *testing.T) {
next := bwb[6].Block.Block().Slot()
skip := bwb[5].Block.Block()
bwb[5].Block, _ = util.GenerateTestDenebBlockWithSidecar(t, skip.ParentRoot(), skip.Slot(), 0)
bounds = countCommitments(bwb, retentionStart).blobRange(nil)
require.Equal(t, next, bounds.low)
lowest = lowestSlotNeedsBlob(retentionStart, bwb)
require.Equal(t, next, *lowest)
}
func TestBlobRequest(t *testing.T) {
var nilReq *ethpb.BlobSidecarsByRangeRequest
// no blocks
req := countCommitments([]blocks.BlockWithROBlobs{}, 0).blobRange(nil).Request()
req := blobRequest([]blocks.BlockWithROBlobs{}, 0)
require.Equal(t, nilReq, req)
blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
@@ -1006,180 +1026,26 @@ func TestBlobRequest(t *testing.T) {
maxBlkSlot := primitives.Slot(len(blks) - 1)
tooHigh := primitives.Slot(len(blks) + 1)
req = countCommitments(bwb, tooHigh).blobRange(nil).Request()
req = blobRequest(bwb, tooHigh)
require.Equal(t, nilReq, req)
req = countCommitments(bwb, maxBlkSlot).blobRange(nil).Request()
req = blobRequest(bwb, maxBlkSlot)
require.Equal(t, uint64(1), req.Count)
require.Equal(t, maxBlkSlot, req.StartSlot)
halfway := primitives.Slot(5)
req = countCommitments(bwb, halfway).blobRange(nil).Request()
req = blobRequest(bwb, halfway)
require.Equal(t, halfway, req.StartSlot)
// adding 1 to include the halfway slot itself
require.Equal(t, uint64(1+maxBlkSlot-halfway), req.Count)
before := bwb[0].Block.Block().Slot()
allAfter := bwb[1:]
req = countCommitments(allAfter, before).blobRange(nil).Request()
req = blobRequest(allAfter, before)
require.Equal(t, allAfter[0].Block.Block().Slot(), req.StartSlot)
require.Equal(t, len(allAfter), int(req.Count))
}
func TestCountCommitments(t *testing.T) {
// no blocks
// blocks before retention start filtered
// blocks without commitments filtered
// pre-deneb filtered
// variety of commitment counts are accurate, from 1 to max
type testcase struct {
name string
bwb func(t *testing.T, c testcase) []blocks.BlockWithROBlobs
numBlocks int
retStart primitives.Slot
resCount int
}
cases := []testcase{
{
name: "nil blocks is safe",
bwb: func(t *testing.T, c testcase) []blocks.BlockWithROBlobs {
return nil
},
retStart: 0,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
bwb := c.bwb(t, c)
cc := countCommitments(bwb, c.retStart)
require.Equal(t, c.resCount, len(cc))
})
}
}
func TestCommitmentCountList(t *testing.T) {
cases := []struct {
name string
cc commitmentCountList
bss func(*testing.T) filesystem.BlobStorageSummarizer
expected *blobRange
request *ethpb.BlobSidecarsByRangeRequest
}{
{
name: "nil commitmentCount is safe",
cc: nil,
expected: nil,
request: nil,
},
{
name: "nil bss, single slot",
cc: []commitmentCount{
{slot: 11235, count: 1},
},
expected: &blobRange{low: 11235, high: 11235},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: 11235, Count: 1},
},
{
name: "nil bss, sparse slots",
cc: []commitmentCount{
{slot: 11235, count: 1},
{slot: 11240, count: fieldparams.MaxBlobsPerBlock},
{slot: 11250, count: 3},
},
expected: &blobRange{low: 11235, high: 11250},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: 11235, Count: 16},
},
{
name: "AllAvailable in middle, some avail low, none high",
bss: func(t *testing.T) filesystem.BlobStorageSummarizer {
onDisk := map[[32]byte][]int{
bytesutil.ToBytes32([]byte("0")): {0, 1},
bytesutil.ToBytes32([]byte("1")): {0, 1, 2, 3, 4, 5},
}
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
},
cc: []commitmentCount{
{slot: 0, count: 3, root: bytesutil.ToBytes32([]byte("0"))},
{slot: 5, count: fieldparams.MaxBlobsPerBlock, root: bytesutil.ToBytes32([]byte("1"))},
{slot: 15, count: 3},
},
expected: &blobRange{low: 0, high: 15},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: 0, Count: 16},
},
{
name: "AllAvailable at high and low",
bss: func(t *testing.T) filesystem.BlobStorageSummarizer {
onDisk := map[[32]byte][]int{
bytesutil.ToBytes32([]byte("0")): {0, 1},
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
}
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
},
cc: []commitmentCount{
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
{slot: 5, count: 3},
{slot: 15, count: fieldparams.MaxBlobsPerBlock, root: bytesutil.ToBytes32([]byte("2"))},
},
expected: &blobRange{low: 5, high: 5},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 1},
},
{
name: "AllAvailable at high and low, adjacent range in middle",
bss: func(t *testing.T) filesystem.BlobStorageSummarizer {
onDisk := map[[32]byte][]int{
bytesutil.ToBytes32([]byte("0")): {0, 1},
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
}
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
},
cc: []commitmentCount{
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
{slot: 5, count: 3},
{slot: 6, count: 3},
{slot: 15, count: fieldparams.MaxBlobsPerBlock, root: bytesutil.ToBytes32([]byte("2"))},
},
expected: &blobRange{low: 5, high: 6},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 2},
},
{
name: "AllAvailable at high and low, range in middle",
bss: func(t *testing.T) filesystem.BlobStorageSummarizer {
onDisk := map[[32]byte][]int{
bytesutil.ToBytes32([]byte("0")): {0, 1},
bytesutil.ToBytes32([]byte("1")): {0, 1},
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
}
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
},
cc: []commitmentCount{
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
{slot: 5, count: 3, root: bytesutil.ToBytes32([]byte("1"))},
{slot: 10, count: 3},
{slot: 15, count: fieldparams.MaxBlobsPerBlock, root: bytesutil.ToBytes32([]byte("2"))},
},
expected: &blobRange{low: 5, high: 10},
request: &ethpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 6},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
var bss filesystem.BlobStorageSummarizer
if c.bss != nil {
bss = c.bss(t)
}
br := c.cc.blobRange(bss)
require.DeepEqual(t, c.expected, br)
if c.request == nil {
require.IsNil(t, br.Request())
} else {
req := br.Request()
require.DeepEqual(t, req.StartSlot, c.request.StartSlot)
require.DeepEqual(t, req.Count, c.request.Count)
}
})
}
}
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROBlobs, []blocks.ROBlob) {
blks, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, nblocks)
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
@@ -1191,75 +1057,91 @@ func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROB
return bwb, blobs
}
func testReqFromResp(bwb []blocks.BlockWithROBlobs) *ethpb.BlobSidecarsByRangeRequest {
return &ethpb.BlobSidecarsByRangeRequest{
StartSlot: bwb[0].Block.Block().Slot(),
Count: uint64(bwb[len(bwb)-1].Block.Block().Slot()-bwb[0].Block.Block().Slot()) + 1,
}
}
func TestVerifyAndPopulateBlobs(t *testing.T) {
t.Run("happy path", func(t *testing.T) {
bwb, blobs := testSequenceBlockWithBlob(t, 10)
bwb, blobs := testSequenceBlockWithBlob(t, 10)
lastBlobIdx := len(blobs) - 1
// Blocks are all before the retention window, blobs argument is ignored.
windowAfter := bwb[len(bwb)-1].Block.Block().Slot() + 1
_, err := verifyAndPopulateBlobs(bwb, nil, windowAfter)
require.NoError(t, err)
expectedCommits := make(map[[48]byte]bool)
for _, bl := range blobs {
expectedCommits[bytesutil.ToBytes48(bl.KzgCommitment)] = true
}
require.Equal(t, len(blobs), len(expectedCommits))
firstBlockSlot := bwb[0].Block.Block().Slot()
// slice off blobs for the last block so we hit the out of bounds / blob exhaustion check.
_, err = verifyAndPopulateBlobs(bwb, blobs[0:len(blobs)-6], firstBlockSlot)
require.ErrorIs(t, err, errMissingBlobsForBlockCommitments)
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
require.NoError(t, err)
for _, bw := range bwb {
commits, err := bw.Block.Block().Body().BlobKzgCommitments()
require.NoError(t, err)
require.Equal(t, len(commits), len(bw.Blobs))
for i := range commits {
bc := bytesutil.ToBytes48(commits[i])
require.Equal(t, bc, bytesutil.ToBytes48(bw.Blobs[i].KzgCommitment))
// Since we delete entries we've seen, duplicates will cause an error here.
_, ok := expectedCommits[bc]
// Make sure this was an expected delete, then delete it from the map so we can make sure we saw all of them.
require.Equal(t, true, ok)
delete(expectedCommits, bc)
}
bwb, blobs = testSequenceBlockWithBlob(t, 10)
// Misalign the slots of the blobs for the first block to simulate them being missing from the response.
offByOne := blobs[0].Slot()
for i := range blobs {
if blobs[i].Slot() == offByOne {
blobs[i].SignedBlockHeader.Header.Slot = offByOne + 1
}
// We delete each entry we've seen, so if we see all expected commits, the map should be empty at the end.
require.Equal(t, 0, len(expectedCommits))
})
t.Run("missing blobs", func(t *testing.T) {
bwb, blobs := testSequenceBlockWithBlob(t, 10)
_, err := verifyAndPopulateBlobs(bwb, blobs[1:], testReqFromResp(bwb), nil)
require.ErrorIs(t, err, errMissingBlobsForBlockCommitments)
})
t.Run("no blobs for last block", func(t *testing.T) {
bwb, blobs := testSequenceBlockWithBlob(t, 10)
lastIdx := len(bwb) - 1
lastBlk := bwb[lastIdx].Block
cmts, err := lastBlk.Block().Body().BlobKzgCommitments()
}
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
require.ErrorIs(t, err, verify.ErrBlobBlockMisaligned)
bwb, blobs = testSequenceBlockWithBlob(t, 10)
blobs[lastBlobIdx], err = blocks.NewROBlobWithRoot(blobs[lastBlobIdx].BlobSidecar, blobs[0].BlockRoot())
require.NoError(t, err)
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
require.ErrorIs(t, err, verify.ErrBlobBlockMisaligned)
bwb, blobs = testSequenceBlockWithBlob(t, 10)
blobs[lastBlobIdx].Index = 100
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
require.ErrorIs(t, err, verify.ErrIncorrectBlobIndex)
bwb, blobs = testSequenceBlockWithBlob(t, 10)
blobs[lastBlobIdx].SignedBlockHeader.Header.ProposerIndex = 100
blobs[lastBlobIdx], err = blocks.NewROBlob(blobs[lastBlobIdx].BlobSidecar)
require.NoError(t, err)
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
require.ErrorIs(t, err, verify.ErrBlobBlockMisaligned)
bwb, blobs = testSequenceBlockWithBlob(t, 10)
blobs[lastBlobIdx].SignedBlockHeader.Header.ParentRoot = blobs[0].SignedBlockHeader.Header.ParentRoot
blobs[lastBlobIdx], err = blocks.NewROBlob(blobs[lastBlobIdx].BlobSidecar)
require.NoError(t, err)
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
require.ErrorIs(t, err, verify.ErrBlobBlockMisaligned)
var emptyKzg [48]byte
bwb, blobs = testSequenceBlockWithBlob(t, 10)
blobs[lastBlobIdx].KzgCommitment = emptyKzg[:]
blobs[lastBlobIdx], err = blocks.NewROBlob(blobs[lastBlobIdx].BlobSidecar)
require.NoError(t, err)
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
require.ErrorIs(t, err, verify.ErrMismatchedBlobCommitments)
// happy path
bwb, blobs = testSequenceBlockWithBlob(t, 10)
expectedCommits := make(map[[48]byte]bool)
for _, bl := range blobs {
expectedCommits[bytesutil.ToBytes48(bl.KzgCommitment)] = true
}
// The assertions using this map expect all commitments to be unique, so make sure that stays true.
require.Equal(t, len(blobs), len(expectedCommits))
bwb, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
require.NoError(t, err)
for _, bw := range bwb {
commits, err := bw.Block.Block().Body().BlobKzgCommitments()
require.NoError(t, err)
blobs = blobs[0 : len(blobs)-len(cmts)]
lastBlk, _ = util.GenerateTestDenebBlockWithSidecar(t, lastBlk.Block().ParentRoot(), lastBlk.Block().Slot(), 0)
bwb[lastIdx].Block = lastBlk
_, err = verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
require.NoError(t, err)
})
t.Run("blobs not copied if all locally available", func(t *testing.T) {
bwb, blobs := testSequenceBlockWithBlob(t, 10)
// r1 only has some blobs locally available, so we'll still copy them all.
// r7 has all blobs locally available, so we shouldn't copy them.
i1, i7 := 1, 7
r1, r7 := bwb[i1].Block.Root(), bwb[i7].Block.Root()
onDisk := map[[32]byte][]int{
r1: {0, 1},
r7: {0, 1, 2, 3, 4, 5},
require.Equal(t, len(commits), len(bw.Blobs))
for i := range commits {
bc := bytesutil.ToBytes48(commits[i])
require.Equal(t, bc, bytesutil.ToBytes48(bw.Blobs[i].KzgCommitment))
// Since we delete entries we've seen, duplicates will cause an error here.
_, ok := expectedCommits[bc]
// Make sure this was an expected delete, then delete it from the map so we can make sure we saw all of them.
require.Equal(t, true, ok)
delete(expectedCommits, bc)
}
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk)
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
require.NoError(t, err)
require.Equal(t, 6, len(bwb[i1].Blobs))
require.Equal(t, 0, len(bwb[i7].Blobs))
})
}
// We delete each entry we've seen, so if we see all expected commits, the map should be empty at the end.
require.Equal(t, 0, len(expectedCommits))
}
func TestBatchLimit(t *testing.T) {
@@ -1284,22 +1166,3 @@ func TestBatchLimit(t *testing.T) {
assert.Equal(t, params.BeaconConfig().MaxRequestBlocksDeneb, uint64(maxBatchLimit()))
}
func TestBlockFetcher_HasSufficientBandwidth(t *testing.T) {
bf := newBlocksFetcher(context.Background(), &blocksFetcherConfig{})
currCap := bf.rateLimiter.Capacity()
wantedAmt := currCap - 100
bf.rateLimiter.Add(peer.ID("a").String(), wantedAmt)
bf.rateLimiter.Add(peer.ID("c").String(), wantedAmt)
bf.rateLimiter.Add(peer.ID("f").String(), wantedAmt)
bf.rateLimiter.Add(peer.ID("d").String(), wantedAmt)
receivedPeers := bf.hasSufficientBandwidth([]peer.ID{"a", "b", "c", "d", "e", "f"}, 110)
for _, p := range receivedPeers {
switch p {
case "a", "c", "f", "d":
t.Errorf("peer has exceeded capacity: %s", p)
}
}
assert.Equal(t, 2, len(receivedPeers))
}

View File

@@ -280,7 +280,7 @@ func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, slot
}
// We need to fetch the blobs for the given alt-chain if any exist, so that we can try to verify and import
// the blocks.
bwb, err := f.fetchBlobsFromPeer(ctx, altBlocks, pid, []peer.ID{pid})
bwb, err := f.fetchBlobsFromPeer(ctx, altBlocks, pid)
if err != nil {
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findForkWithPeer")
}
@@ -302,7 +302,7 @@ func (f *blocksFetcher) findAncestor(ctx context.Context, pid peer.ID, b interfa
if err != nil {
return nil, errors.Wrap(err, "received invalid blocks in findAncestor")
}
bwb, err = f.fetchBlobsFromPeer(ctx, bwb, pid, []peer.ID{pid})
bwb, err = f.fetchBlobsFromPeer(ctx, bwb, pid)
if err != nil {
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findAncestor")
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
beaconsync "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
@@ -70,7 +69,6 @@ type blocksQueueConfig struct {
p2p p2p.P2P
db db.ReadOnlyDatabase
mode syncMode
bs filesystem.BlobStorageSummarizer
}
// blocksQueue is a priority queue that serves as a intermediary between block fetchers (producers)
@@ -103,16 +101,12 @@ func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
blocksFetcher := cfg.blocksFetcher
if blocksFetcher == nil {
if cfg.bs == nil {
log.Warn("rpc fetcher starting without blob availability cache, duplicate blobs may be requested.")
}
blocksFetcher = newBlocksFetcher(ctx, &blocksFetcherConfig{
ctxMap: cfg.ctxMap,
chain: cfg.chain,
p2p: cfg.p2p,
db: cfg.db,
clock: cfg.clock,
bs: cfg.bs,
})
}
highestExpectedSlot := cfg.highestExpectedSlot
@@ -145,7 +139,7 @@ func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
queue.smm.addEventHandler(eventDataReceived, stateScheduled, queue.onDataReceivedEvent(ctx))
queue.smm.addEventHandler(eventTick, stateDataParsed, queue.onReadyToSendEvent(ctx))
queue.smm.addEventHandler(eventTick, stateSkipped, queue.onProcessSkippedEvent(ctx))
queue.smm.addEventHandler(eventTick, stateSent, onCheckStaleEvent(ctx))
queue.smm.addEventHandler(eventTick, stateSent, queue.onCheckStaleEvent(ctx))
return queue
}
@@ -457,7 +451,7 @@ func (q *blocksQueue) onProcessSkippedEvent(ctx context.Context) eventHandlerFn
// onCheckStaleEvent is an event that allows to mark stale epochs,
// so that they can be re-processed.
func onCheckStaleEvent(ctx context.Context) eventHandlerFn {
func (_ *blocksQueue) onCheckStaleEvent(ctx context.Context) eventHandlerFn {
return func(m *stateMachine, in interface{}) (stateID, error) {
if ctx.Err() != nil {
return m.state, ctx.Err()

View File

@@ -971,12 +971,24 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) {
}
func TestBlocksQueue_onCheckStaleEvent(t *testing.T) {
blockBatchLimit := flags.Get().BlockBatchLimit
mc, p2p, _ := initializeTestServices(t, []primitives.Slot{}, []*peerData{})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
chain: mc,
p2p: p2p,
})
t.Run("expired context", func(t *testing.T) {
ctx, cancel := context.WithCancel(ctx)
handlerFn := onCheckStaleEvent(ctx)
queue := newBlocksQueue(ctx, &blocksQueueConfig{
blocksFetcher: fetcher,
chain: mc,
highestExpectedSlot: primitives.Slot(blockBatchLimit),
})
handlerFn := queue.onCheckStaleEvent(ctx)
cancel()
updatedState, err := handlerFn(&stateMachine{
state: stateSkipped,
@@ -986,10 +998,16 @@ func TestBlocksQueue_onCheckStaleEvent(t *testing.T) {
})
t.Run("invalid input state", func(t *testing.T) {
queue := newBlocksQueue(ctx, &blocksQueueConfig{
blocksFetcher: fetcher,
chain: mc,
highestExpectedSlot: primitives.Slot(blockBatchLimit),
})
invalidStates := []stateID{stateNew, stateScheduled, stateDataParsed, stateSkipped}
for _, state := range invalidStates {
t.Run(state.String(), func(t *testing.T) {
handlerFn := onCheckStaleEvent(ctx)
handlerFn := queue.onCheckStaleEvent(ctx)
updatedState, err := handlerFn(&stateMachine{
state: state,
}, nil)
@@ -1000,7 +1018,12 @@ func TestBlocksQueue_onCheckStaleEvent(t *testing.T) {
})
t.Run("process non stale machine", func(t *testing.T) {
handlerFn := onCheckStaleEvent(ctx)
queue := newBlocksQueue(ctx, &blocksQueueConfig{
blocksFetcher: fetcher,
chain: mc,
highestExpectedSlot: primitives.Slot(blockBatchLimit),
})
handlerFn := queue.onCheckStaleEvent(ctx)
updatedState, err := handlerFn(&stateMachine{
state: stateSent,
updated: prysmTime.Now().Add(-staleEpochTimeout / 2),
@@ -1011,7 +1034,12 @@ func TestBlocksQueue_onCheckStaleEvent(t *testing.T) {
})
t.Run("process stale machine", func(t *testing.T) {
handlerFn := onCheckStaleEvent(ctx)
queue := newBlocksQueue(ctx, &blocksQueueConfig{
blocksFetcher: fetcher,
chain: mc,
highestExpectedSlot: primitives.Slot(blockBatchLimit),
})
handlerFn := queue.onCheckStaleEvent(ctx)
updatedState, err := handlerFn(&stateMachine{
state: stateSent,
updated: prysmTime.Now().Add(-staleEpochTimeout),

View File

@@ -11,9 +11,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -63,39 +61,7 @@ func (s *Service) roundRobinSync(genesis time.Time) error {
return s.syncToNonFinalizedEpoch(ctx, genesis)
}
func (s *Service) startBlocksQueue(ctx context.Context, highestSlot primitives.Slot, mode syncMode) (*blocksQueue, error) {
vr := s.clock.GenesisValidatorsRoot()
ctxMap, err := sync.ContextByteVersionsForValRoot(vr)
if err != nil {
return nil, errors.Wrapf(err, "unable to initialize context version map using genesis validator root = %#x", vr)
}
summarizer, err := s.cfg.BlobStorage.WaitForSummarizer(ctx)
if err != nil {
// The summarizer is an optional optimization, we can continue without, only stop if there is a different error.
if !errors.Is(err, filesystem.ErrBlobStorageSummarizerUnavailable) {
return nil, err
}
summarizer = nil // This should already be nil, but we'll set it just to be safe.
}
cfg := &blocksQueueConfig{
p2p: s.cfg.P2P,
db: s.cfg.DB,
chain: s.cfg.Chain,
clock: s.clock,
ctxMap: ctxMap,
highestExpectedSlot: highestSlot,
mode: mode,
bs: summarizer,
}
queue := newBlocksQueue(ctx, cfg)
if err := queue.start(); err != nil {
return nil, err
}
return queue, nil
}
// syncToFinalizedEpoch sync from head to the best known finalized epoch.
// syncToFinalizedEpoch sync from head to best known finalized epoch.
func (s *Service) syncToFinalizedEpoch(ctx context.Context, genesis time.Time) error {
highestFinalizedSlot, err := slots.EpochStart(s.highestFinalizedEpoch())
if err != nil {
@@ -107,12 +73,28 @@ func (s *Service) syncToFinalizedEpoch(ctx context.Context, genesis time.Time) e
return nil
}
queue, err := s.startBlocksQueue(ctx, highestFinalizedSlot, modeStopOnFinalizedEpoch)
vr := s.clock.GenesisValidatorsRoot()
ctxMap, err := sync.ContextByteVersionsForValRoot(vr)
if err != nil {
return errors.Wrapf(err, "unable to initialize context version map using genesis validator root = %#x", vr)
}
queue := newBlocksQueue(ctx, &blocksQueueConfig{
p2p: s.cfg.P2P,
db: s.cfg.DB,
chain: s.cfg.Chain,
clock: s.clock,
ctxMap: ctxMap,
highestExpectedSlot: highestFinalizedSlot,
mode: modeStopOnFinalizedEpoch,
})
if err := queue.start(); err != nil {
return err
}
for data := range queue.fetchedData {
// If blobs are available. Verify blobs and blocks are consistence.
// We can't import a block if there's no associated blob within DA bound.
// The blob has to pass aggregated proof check.
s.processFetchedData(ctx, genesis, s.cfg.Chain.HeadSlot(), data)
}
@@ -130,8 +112,21 @@ func (s *Service) syncToFinalizedEpoch(ctx context.Context, genesis time.Time) e
// syncToNonFinalizedEpoch sync from head to best known non-finalized epoch supported by majority
// of peers (no less than MinimumSyncPeers*2 peers).
func (s *Service) syncToNonFinalizedEpoch(ctx context.Context, genesis time.Time) error {
queue, err := s.startBlocksQueue(ctx, slots.Since(genesis), modeNonConstrained)
vr := s.clock.GenesisValidatorsRoot()
ctxMap, err := sync.ContextByteVersionsForValRoot(vr)
if err != nil {
return errors.Wrapf(err, "unable to initialize context version map using genesis validator root = %#x", vr)
}
queue := newBlocksQueue(ctx, &blocksQueueConfig{
p2p: s.cfg.P2P,
db: s.cfg.DB,
chain: s.cfg.Chain,
clock: s.clock,
ctxMap: ctxMap,
highestExpectedSlot: slots.Since(genesis),
mode: modeNonConstrained,
})
if err := queue.start(); err != nil {
return err
}
for data := range queue.fetchedData {
@@ -172,7 +167,7 @@ func (s *Service) processFetchedDataRegSync(
if len(bwb) == 0 {
return
}
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
bv := newBlobBatchVerifier(s.newBlobVerifier)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
batchFields := logrus.Fields{
"firstSlot": data.bwb[0].Block.Block().Slot(),
@@ -331,7 +326,7 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot())
}
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
bv := newBlobBatchVerifier(s.newBlobVerifier)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
s.logBatchSyncStatus(genesis, first, len(bwb))
for _, bb := range bwb {

Some files were not shown because too many files have changed in this diff Show More