mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
16 Commits
community_
...
fixE2E
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa1327701e | ||
|
|
1a0c07deec | ||
|
|
04f231a400 | ||
|
|
be1bfcce63 | ||
|
|
8cf5d79852 | ||
|
|
f7912e7c20 | ||
|
|
caa8be5dd1 | ||
|
|
0c15a30a34 | ||
|
|
7bce1c0714 | ||
|
|
d1084cbe48 | ||
|
|
2cc3f69a3f | ||
|
|
a861489a83 | ||
|
|
0e1c585f7d | ||
|
|
9df20e616c | ||
|
|
53fdd2d062 | ||
|
|
2b4bb5d890 |
16
WORKSPACE
16
WORKSPACE
@@ -342,22 +342,6 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "goerli_testnet",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = [
|
||||
"prater/config.yaml",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "43fc0f55ddff7b511713e2de07aa22846a67432df997296fb4fc09cd8ed1dcdb",
|
||||
strip_prefix = "goerli-6522ac6684693740cd4ddcc2a0662e03702aa4a1",
|
||||
url = "https://github.com/eth-clients/goerli/archive/6522ac6684693740cd4ddcc2a0662e03702aa4a1.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "holesky_testnet",
|
||||
build_file_content = """
|
||||
|
||||
@@ -88,7 +88,7 @@ func TestToggle(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestToogleMultipleTimes(t *testing.T) {
|
||||
func TestToggleMultipleTimes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
v := New()
|
||||
@@ -101,16 +101,16 @@ func TestToogleMultipleTimes(t *testing.T) {
|
||||
|
||||
expected := i%2 != 0
|
||||
if v.IsSet() != expected {
|
||||
t.Fatalf("AtomicBool.Toogle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
|
||||
t.Fatalf("AtomicBool.Toggle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
|
||||
}
|
||||
|
||||
if pre == v.IsSet() {
|
||||
t.Fatalf("AtomicBool.Toogle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
|
||||
t.Fatalf("AtomicBool.Toggle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToogleAfterOverflow(t *testing.T) {
|
||||
func TestToggleAfterOverflow(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var value int32 = math.MaxInt32
|
||||
@@ -122,7 +122,7 @@ func TestToogleAfterOverflow(t *testing.T) {
|
||||
v.Toggle()
|
||||
expected := math.MaxInt32%2 == 0
|
||||
if v.IsSet() != expected {
|
||||
t.Fatalf("AtomicBool.Toogle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
t.Fatalf("AtomicBool.Toggle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
}
|
||||
|
||||
// make sure overflow happened
|
||||
@@ -135,7 +135,7 @@ func TestToogleAfterOverflow(t *testing.T) {
|
||||
v.Toggle()
|
||||
expected = !expected
|
||||
if v.IsSet() != expected {
|
||||
t.Fatalf("AtomicBool.Toogle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
t.Fatalf("AtomicBool.Toggle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ package event
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@@ -219,12 +220,9 @@ type caseList []reflect.SelectCase
|
||||
|
||||
// find returns the index of a case containing the given channel.
|
||||
func (cs caseList) find(channel interface{}) int {
|
||||
for i, cas := range cs {
|
||||
if cas.Chan.Interface() == channel {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
|
||||
return selectCase.Chan.Interface() == channel
|
||||
})
|
||||
}
|
||||
|
||||
// delete removes the given case from cs.
|
||||
|
||||
@@ -63,7 +63,7 @@ func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, err
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelisation.
|
||||
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelization.
|
||||
func calculateChunkSize(items int) int {
|
||||
// Start with a simple even split
|
||||
chunkSize := items / runtime.GOMAXPROCS(0)
|
||||
|
||||
@@ -61,7 +61,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
||||
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
|
||||
// NextSyncCommittee should be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
|
||||
require.NotEqual(t, 0, len(indices))
|
||||
}
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
|
||||
|
||||
// logNonCanonicalBlockReceived prints a message informing that the received
|
||||
// block is not the head of the chain. It requires the caller holds a lock on
|
||||
// Foprkchoice.
|
||||
// Forkchoice.
|
||||
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
if err != nil {
|
||||
|
||||
@@ -170,7 +170,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
// Send finalized events and finalized deposits in the background
|
||||
if newFinalized {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
go s.sendNewFinalizedEvent(blockCopy, postState)
|
||||
go s.sendNewFinalizedEvent(ctx, postState)
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDeposits(depCtx, finalized.Root)
|
||||
@@ -443,7 +443,7 @@ func (s *Service) updateFinalizationOnBlock(ctx context.Context, preState, postS
|
||||
|
||||
// sendNewFinalizedEvent sends a new finalization checkpoint event over the
|
||||
// event feed. It needs to be called on the background
|
||||
func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBlock, postState state.BeaconState) {
|
||||
func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.BeaconState) {
|
||||
isValidPayload := false
|
||||
s.headLock.RLock()
|
||||
if s.head != nil {
|
||||
@@ -451,8 +451,17 @@ func (s *Service) sendNewFinalizedEvent(signed interfaces.ReadOnlySignedBeaconBl
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
blk, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve block for finalized checkpoint root. Finalized event will not be emitted")
|
||||
return
|
||||
}
|
||||
if blk == nil || blk.IsNil() || blk.Block() == nil || blk.Block().IsNil() {
|
||||
log.WithError(err).Error("Block retrieved for finalized checkpoint root is nil. Finalized event will not be emitted")
|
||||
return
|
||||
}
|
||||
stateRoot := blk.Block().StateRoot()
|
||||
// Send an event regarding the new finalized checkpoint over a common event feed.
|
||||
stateRoot := signed.Block().StateRoot()
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.FinalizedCheckpoint,
|
||||
Data: ðpbv1.EventFinalizedCheckpoint{
|
||||
|
||||
@@ -8,12 +8,14 @@ import (
|
||||
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -378,3 +380,38 @@ func TestHandleBlockBLSToExecutionChanges(t *testing.T) {
|
||||
require.Equal(t, false, pool.ValidatorExists(idx))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_sendNewFinalizedEvent(t *testing.T) {
|
||||
s, _ := minimalTestService(t)
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
finalizedSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
finalizedStRoot, err := finalizedSt.HashTreeRoot(s.ctx)
|
||||
require.NoError(t, err)
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.StateRoot = finalizedStRoot[:]
|
||||
sbb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
sbbRoot, err := sbb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveBlock(s.ctx, sbb))
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetFinalizedCheckpoint(ðpb.Checkpoint{
|
||||
Epoch: 123,
|
||||
Root: sbbRoot[:],
|
||||
}))
|
||||
|
||||
s.sendNewFinalizedEvent(s.ctx, st)
|
||||
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, sbbRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
}
|
||||
|
||||
@@ -804,7 +804,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform this in a non-sensical ordering
|
||||
// Perform this in a nonsensical ordering
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 10, [32]byte{}, 0))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3, [32]byte{}, 0))
|
||||
|
||||
@@ -784,7 +784,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform this in a non-sensical ordering
|
||||
// Perform this in a nonsensical ordering
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
|
||||
|
||||
@@ -22,7 +22,7 @@ var balanceCache = cache.NewEffectiveBalanceCache()
|
||||
// """
|
||||
// Return the combined effective balance of the ``indices``.
|
||||
// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
|
||||
// Math safe up to ~10B ETH, afterwhich this overflows uint64.
|
||||
// Math safe up to ~10B ETH, after which this overflows uint64.
|
||||
// """
|
||||
// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices])))
|
||||
func TotalBalance(state state.ReadOnlyValidators, indices []primitives.ValidatorIndex) uint64 {
|
||||
|
||||
@@ -59,7 +59,7 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch,
|
||||
return ComputeDomainAndSignWithoutState(st.Fork(), epoch, domain, st.GenesisValidatorsRoot(), obj, key)
|
||||
}
|
||||
|
||||
// ComputeDomainAndSignWithoutState offers the same functionalit as ComputeDomainAndSign without the need to provide a BeaconState.
|
||||
// ComputeDomainAndSignWithoutState offers the same functionality as ComputeDomainAndSign without the need to provide a BeaconState.
|
||||
// This is particularly helpful for signing values in tests.
|
||||
func ComputeDomainAndSignWithoutState(fork *ethpb.Fork, epoch primitives.Epoch, domain [4]byte, vr []byte, obj fssz.HashRoot, key bls.SecretKey) ([]byte, error) {
|
||||
// EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits.
|
||||
|
||||
@@ -94,6 +94,15 @@ func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current pri
|
||||
entry := s.cache.ensure(key)
|
||||
defer s.cache.delete(key)
|
||||
root := b.Root()
|
||||
sumz, err := s.store.WaitForSummarizer(ctx)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
|
||||
WithError(err).
|
||||
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
|
||||
} else {
|
||||
entry.setDiskSummary(sumz.Summary(root))
|
||||
}
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -59,7 +60,12 @@ func (c *cache) delete(key cacheKey) {
|
||||
|
||||
// cacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type cacheEntry struct {
|
||||
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
|
||||
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
|
||||
diskSummary filesystem.BlobStorageSummary
|
||||
}
|
||||
|
||||
func (e *cacheEntry) setDiskSummary(sum filesystem.BlobStorageSummary) {
|
||||
e.diskSummary = sum
|
||||
}
|
||||
|
||||
// stash adds an item to the in-memory cache of BlobSidecars.
|
||||
@@ -81,9 +87,17 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
// the cache do not match those found in the block. If err is nil, then all expected
|
||||
// commitments were found in the cache and the sidecar slice return value can be used
|
||||
// to perform a DA check against the cached sidecars.
|
||||
// filter only returns blobs that need to be checked. Blobs already available on disk will be excluded.
|
||||
func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROBlob, error) {
|
||||
if e.diskSummary.AllAvailable(kc.count()) {
|
||||
return nil, nil
|
||||
}
|
||||
scs := make([]blocks.ROBlob, kc.count())
|
||||
for i := uint64(0); i < fieldparams.MaxBlobsPerBlock; i++ {
|
||||
// We already have this blob, we don't need to write it or validate it.
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
if kc[i] == nil {
|
||||
if e.scs[i] != nil {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
|
||||
|
||||
@@ -4,9 +4,10 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"ephemeral.go",
|
||||
"cache.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"mock.go",
|
||||
"pruner.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
|
||||
@@ -33,6 +34,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"blob_test.go",
|
||||
"cache_test.go",
|
||||
"pruner_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
@@ -103,12 +104,29 @@ func (bs *BlobStorage) WarmCache() {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
if err := bs.pruner.prune(0); err != nil {
|
||||
start := time.Now()
|
||||
if err := bs.pruner.warmCache(); err != nil {
|
||||
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
|
||||
}()
|
||||
}
|
||||
|
||||
// ErrBlobStorageSummarizerUnavailable is a sentinel error returned when there is no pruner/cache available.
|
||||
// This should be used by code that optionally uses the summarizer to optimize rpc requests. Being able to
|
||||
// fallback when there is no summarizer allows client code to avoid test complexity where the summarizer doesn't matter.
|
||||
var ErrBlobStorageSummarizerUnavailable = errors.New("BlobStorage not initialized with a pruner or cache")
|
||||
|
||||
// WaitForSummarizer blocks until the BlobStorageSummarizer is ready to use.
|
||||
// BlobStorageSummarizer is not ready immediately on node startup because it needs to sample the blob filesystem to
|
||||
// determine which blobs are available.
|
||||
func (bs *BlobStorage) WaitForSummarizer(ctx context.Context) (BlobStorageSummarizer, error) {
|
||||
if bs == nil || bs.pruner == nil {
|
||||
return nil, ErrBlobStorageSummarizerUnavailable
|
||||
}
|
||||
return bs.pruner.waitForCache(ctx)
|
||||
}
|
||||
|
||||
// Save saves blobs given a list of sidecars.
|
||||
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
startTime := time.Now()
|
||||
|
||||
119
beacon-chain/db/filesystem/cache.go
Normal file
119
beacon-chain/db/filesystem/cache.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
|
||||
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
|
||||
|
||||
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
|
||||
type BlobStorageSummary struct {
|
||||
slot primitives.Slot
|
||||
mask blobIndexMask
|
||||
}
|
||||
|
||||
// HasIndex returns true if the BlobSidecar at the given index is available in the filesystem.
|
||||
func (s BlobStorageSummary) HasIndex(idx uint64) bool {
|
||||
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return false
|
||||
}
|
||||
return s.mask[idx]
|
||||
}
|
||||
|
||||
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
|
||||
func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
if count > fieldparams.MaxBlobsPerBlock {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < count; i++ {
|
||||
if !s.mask[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// BlobStorageSummarizer can be used to receive a summary of metadata about blobs on disk for a given root.
|
||||
// The BlobStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type BlobStorageSummarizer interface {
|
||||
Summary(root [32]byte) BlobStorageSummary
|
||||
}
|
||||
|
||||
type blobStorageCache struct {
|
||||
mu sync.RWMutex
|
||||
nBlobs float64
|
||||
cache map[string]BlobStorageSummary
|
||||
}
|
||||
|
||||
var _ BlobStorageSummarizer = &blobStorageCache{}
|
||||
|
||||
func newBlobStorageCache() *blobStorageCache {
|
||||
return &blobStorageCache{
|
||||
cache: make(map[string]BlobStorageSummary, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
|
||||
}
|
||||
}
|
||||
|
||||
// Summary returns the BlobStorageSummary for `root`. The BlobStorageSummary can be used to check for the presence of
|
||||
// BlobSidecars based on Index.
|
||||
func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
k := rootString(root)
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.cache[k]
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) ensure(key string, slot primitives.Slot, idx uint64) error {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v := s.cache[key]
|
||||
v.slot = slot
|
||||
if !v.mask[idx] {
|
||||
s.updateMetrics(1)
|
||||
}
|
||||
v.mask[idx] = true
|
||||
s.cache[key] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) slot(key string) (primitives.Slot, bool) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
v, ok := s.cache[key]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return v.slot, ok
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) evict(key string) {
|
||||
var deleted float64
|
||||
s.mu.Lock()
|
||||
v, ok := s.cache[key]
|
||||
if ok {
|
||||
for i := range v.mask {
|
||||
if v.mask[i] {
|
||||
deleted += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(s.cache, key)
|
||||
s.mu.Unlock()
|
||||
if deleted > 0 {
|
||||
s.updateMetrics(-deleted)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) updateMetrics(delta float64) {
|
||||
s.nBlobs += delta
|
||||
blobDiskCount.Set(s.nBlobs)
|
||||
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
|
||||
}
|
||||
150
beacon-chain/db/filesystem/cache_test.go
Normal file
150
beacon-chain/db/filesystem/cache_test.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestSlotByRoot_Summary(t *testing.T) {
|
||||
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
|
||||
firstSet[0] = true
|
||||
lastSet[len(lastSet)-1] = true
|
||||
oneSet[1] = true
|
||||
for i := range allSet {
|
||||
allSet[i] = true
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
root [32]byte
|
||||
expected *blobIndexMask
|
||||
}{
|
||||
{
|
||||
name: "not found",
|
||||
},
|
||||
{
|
||||
name: "none set",
|
||||
expected: &noneSet,
|
||||
},
|
||||
{
|
||||
name: "index 1 set",
|
||||
expected: &oneSet,
|
||||
},
|
||||
{
|
||||
name: "all set",
|
||||
expected: &allSet,
|
||||
},
|
||||
{
|
||||
name: "first set",
|
||||
expected: &firstSet,
|
||||
},
|
||||
{
|
||||
name: "last set",
|
||||
expected: &lastSet,
|
||||
},
|
||||
}
|
||||
sc := newBlobStorageCache()
|
||||
for _, c := range cases {
|
||||
if c.expected != nil {
|
||||
key := rootString(bytesutil.ToBytes32([]byte(c.name)))
|
||||
sc.cache[key] = BlobStorageSummary{slot: 0, mask: *c.expected}
|
||||
}
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
key := bytesutil.ToBytes32([]byte(c.name))
|
||||
sum := sc.Summary(key)
|
||||
for i := range c.expected {
|
||||
ui := uint64(i)
|
||||
if c.expected == nil {
|
||||
require.Equal(t, false, sum.HasIndex(ui))
|
||||
} else {
|
||||
require.Equal(t, c.expected[i], sum.HasIndex(ui))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllAvailable(t *testing.T) {
|
||||
idxUpTo := func(u int) []int {
|
||||
r := make([]int, u)
|
||||
for i := range r {
|
||||
r[i] = i
|
||||
}
|
||||
return r
|
||||
}
|
||||
require.DeepEqual(t, []int{}, idxUpTo(0))
|
||||
require.DeepEqual(t, []int{0}, idxUpTo(1))
|
||||
require.DeepEqual(t, []int{0, 1, 2, 3, 4, 5}, idxUpTo(6))
|
||||
cases := []struct {
|
||||
name string
|
||||
idxSet []int
|
||||
count int
|
||||
aa bool
|
||||
}{
|
||||
{
|
||||
// If there are no blobs committed, then all the committed blobs are available.
|
||||
name: "none in idx, 0 arg",
|
||||
count: 0,
|
||||
aa: true,
|
||||
},
|
||||
{
|
||||
name: "none in idx, 1 arg",
|
||||
count: 1,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "first in idx, 1 arg",
|
||||
idxSet: []int{0},
|
||||
count: 1,
|
||||
aa: true,
|
||||
},
|
||||
{
|
||||
name: "second in idx, 1 arg",
|
||||
idxSet: []int{1},
|
||||
count: 1,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "first missing, 2 arg",
|
||||
idxSet: []int{1},
|
||||
count: 2,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "all missing, 1 arg",
|
||||
count: 6,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "out of bound is safe",
|
||||
count: fieldparams.MaxBlobsPerBlock + 1,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "max present",
|
||||
count: fieldparams.MaxBlobsPerBlock,
|
||||
idxSet: idxUpTo(fieldparams.MaxBlobsPerBlock),
|
||||
aa: true,
|
||||
},
|
||||
{
|
||||
name: "one present",
|
||||
count: 1,
|
||||
idxSet: idxUpTo(1),
|
||||
aa: true,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
var mask blobIndexMask
|
||||
for _, idx := range c.idxSet {
|
||||
mask[idx] = true
|
||||
}
|
||||
sum := BlobStorageSummary{mask: mask}
|
||||
require.Equal(t, c.aa, sum.AllAvailable(c.count))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
// improving test performance and simplifying cleanup.
|
||||
func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
|
||||
fs := afero.NewMemMapFs()
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
|
||||
if err != nil {
|
||||
t.Fatal("test setup issue", err)
|
||||
}
|
||||
@@ -23,7 +23,7 @@ func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
|
||||
// in order to interact with it outside the parameters of the BlobStorage api.
|
||||
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage, error) {
|
||||
fs := afero.NewMemMapFs()
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
|
||||
if err != nil {
|
||||
t.Fatal("test setup issue", err)
|
||||
}
|
||||
@@ -61,3 +61,15 @@ func NewEphemeralBlobStorageWithMocker(_ testing.TB) (*BlobMocker, *BlobStorage)
|
||||
bs := &BlobStorage{fs: fs}
|
||||
return &BlobMocker{fs: fs, bs: bs}, bs
|
||||
}
|
||||
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
|
||||
c := newBlobStorageCache()
|
||||
for k, v := range set {
|
||||
for i := range v {
|
||||
if err := c.ensure(rootString(k), 0, uint64(v[i])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"path"
|
||||
@@ -12,7 +13,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -32,22 +32,39 @@ type blobPruner struct {
|
||||
sync.Mutex
|
||||
prunedBefore atomic.Uint64
|
||||
windowSize primitives.Slot
|
||||
slotMap *slotForRoot
|
||||
cache *blobStorageCache
|
||||
cacheReady chan struct{}
|
||||
warmed bool
|
||||
fs afero.Fs
|
||||
}
|
||||
|
||||
func newBlobPruner(fs afero.Fs, retain primitives.Epoch) (*blobPruner, error) {
|
||||
type prunerOpt func(*blobPruner) error
|
||||
|
||||
func withWarmedCache() prunerOpt {
|
||||
return func(p *blobPruner) error {
|
||||
return p.warmCache()
|
||||
}
|
||||
}
|
||||
|
||||
func newBlobPruner(fs afero.Fs, retain primitives.Epoch, opts ...prunerOpt) (*blobPruner, error) {
|
||||
r, err := slots.EpochStart(retain + retentionBuffer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set retentionSlots")
|
||||
}
|
||||
return &blobPruner{fs: fs, windowSize: r, slotMap: newSlotForRoot()}, nil
|
||||
cw := make(chan struct{})
|
||||
p := &blobPruner{fs: fs, windowSize: r, cache: newBlobStorageCache(), cacheReady: cw}
|
||||
for _, o := range opts {
|
||||
if err := o(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache
|
||||
// of root->slot mappings and decide when to evict old blobs based on the age of present blobs.
|
||||
func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) error {
|
||||
if err := p.slotMap.ensure(rootString(root), latest, idx); err != nil {
|
||||
if err := p.cache.ensure(rootString(root), latest, idx); err != nil {
|
||||
return err
|
||||
}
|
||||
pruned := uint64(windowMin(latest, p.windowSize))
|
||||
@@ -55,6 +72,8 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
|
||||
return nil
|
||||
}
|
||||
go func() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if err := p.prune(primitives.Slot(pruned)); err != nil {
|
||||
log.WithError(err).Errorf("Failed to prune blobs from slot %d", latest)
|
||||
}
|
||||
@@ -62,7 +81,7 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
|
||||
func windowMin(latest, offset primitives.Slot) primitives.Slot {
|
||||
// Safely compute the first slot in the epoch for the latest slot
|
||||
latest = latest - latest%params.BeaconConfig().SlotsPerEpoch
|
||||
if latest < offset {
|
||||
@@ -71,12 +90,32 @@ func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
|
||||
return latest - offset
|
||||
}
|
||||
|
||||
func (p *blobPruner) warmCache() error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if err := p.prune(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if !p.warmed {
|
||||
p.warmed = true
|
||||
close(p.cacheReady)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *blobPruner) waitForCache(ctx context.Context) (*blobStorageCache, error) {
|
||||
select {
|
||||
case <-p.cacheReady:
|
||||
return p.cache, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Prune prunes blobs in the base directory based on the retention epoch.
|
||||
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
|
||||
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
|
||||
func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
start := time.Now()
|
||||
totalPruned, totalErr := 0, 0
|
||||
// Customize logging/metrics behavior for the initial cache warmup when slot=0.
|
||||
@@ -122,7 +161,7 @@ func shouldRetain(slot, pruneBefore primitives.Slot) bool {
|
||||
|
||||
func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) {
|
||||
root := rootFromDir(dir)
|
||||
slot, slotCached := p.slotMap.slot(root)
|
||||
slot, slotCached := p.cache.slot(root)
|
||||
// Return early if the slot is cached and doesn't need pruning.
|
||||
if slotCached && shouldRetain(slot, pruneBefore) {
|
||||
return 0, nil
|
||||
@@ -151,7 +190,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "index could not be determined for blob file %s", scFiles[i])
|
||||
}
|
||||
if err := p.slotMap.ensure(root, slot, idx); err != nil {
|
||||
if err := p.cache.ensure(root, slot, idx); err != nil {
|
||||
return 0, errors.Wrapf(err, "could not update prune cache for blob file %s", scFiles[i])
|
||||
}
|
||||
}
|
||||
@@ -179,7 +218,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
|
||||
return removed, errors.Wrapf(err, "unable to remove blob directory %s", dir)
|
||||
}
|
||||
|
||||
p.slotMap.evict(rootFromDir(dir))
|
||||
p.cache.evict(rootFromDir(dir))
|
||||
return len(scFiles), nil
|
||||
}
|
||||
|
||||
@@ -269,71 +308,3 @@ func filterSsz(s string) bool {
|
||||
func filterPart(s string) bool {
|
||||
return filepath.Ext(s) == dotPartExt
|
||||
}
|
||||
|
||||
func newSlotForRoot() *slotForRoot {
|
||||
return &slotForRoot{
|
||||
cache: make(map[string]*slotCacheEntry, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
|
||||
}
|
||||
}
|
||||
|
||||
type slotCacheEntry struct {
|
||||
slot primitives.Slot
|
||||
mask [fieldparams.MaxBlobsPerBlock]bool
|
||||
}
|
||||
|
||||
type slotForRoot struct {
|
||||
sync.RWMutex
|
||||
nBlobs float64
|
||||
cache map[string]*slotCacheEntry
|
||||
}
|
||||
|
||||
func (s *slotForRoot) updateMetrics(delta float64) {
|
||||
s.nBlobs += delta
|
||||
blobDiskCount.Set(s.nBlobs)
|
||||
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
|
||||
}
|
||||
|
||||
func (s *slotForRoot) ensure(key string, slot primitives.Slot, idx uint64) error {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
v, ok := s.cache[key]
|
||||
if !ok {
|
||||
v = &slotCacheEntry{}
|
||||
}
|
||||
v.slot = slot
|
||||
if !v.mask[idx] {
|
||||
s.updateMetrics(1)
|
||||
}
|
||||
v.mask[idx] = true
|
||||
s.cache[key] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *slotForRoot) slot(key string) (primitives.Slot, bool) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
v, ok := s.cache[key]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return v.slot, ok
|
||||
}
|
||||
|
||||
func (s *slotForRoot) evict(key string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
v, ok := s.cache[key]
|
||||
var deleted float64
|
||||
if ok {
|
||||
for i := range v.mask {
|
||||
if v.mask[i] {
|
||||
deleted += 1
|
||||
}
|
||||
}
|
||||
s.updateMetrics(-deleted)
|
||||
}
|
||||
delete(s.cache, key)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func TestTryPruneDir_CachedNotExpired(t *testing.T) {
|
||||
root := fmt.Sprintf("%#x", sc.BlockRoot())
|
||||
// This slot is right on the edge of what would need to be pruned, so by adding it to the cache and
|
||||
// skipping any other test setup, we can be certain the hot cache path never touches the filesystem.
|
||||
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
|
||||
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(root, pr.windowSize)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
@@ -45,7 +45,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root := fmt.Sprintf("%#x", sc.BlockRoot())
|
||||
require.NoError(t, fs.Mkdir(root, directoryPermissions)) // make empty directory
|
||||
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
|
||||
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(root, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
@@ -63,7 +63,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
|
||||
cs, cok := bs.pruner.slotMap.slot(root)
|
||||
cs, cok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, true, cok)
|
||||
require.Equal(t, slot, cs)
|
||||
|
||||
@@ -95,12 +95,12 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
|
||||
cs, ok := bs.pruner.slotMap.slot(root)
|
||||
cs, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, slot, cs)
|
||||
// evict it from the cache so that we trigger the file read path
|
||||
bs.pruner.slotMap.evict(root)
|
||||
_, ok = bs.pruner.slotMap.slot(root)
|
||||
bs.pruner.cache.evict(root)
|
||||
_, ok = bs.pruner.cache.slot(root)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// ensure that we see the saved files in the filesystem
|
||||
@@ -119,7 +119,7 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
// Set slot equal to the window size, so it should be retained.
|
||||
var slot primitives.Slot = bs.pruner.windowSize
|
||||
slot := bs.pruner.windowSize
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
@@ -129,8 +129,8 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
|
||||
// Evict slot mapping from the cache so that we trigger the file read path.
|
||||
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
|
||||
bs.pruner.slotMap.evict(root)
|
||||
_, ok := bs.pruner.slotMap.slot(root)
|
||||
bs.pruner.cache.evict(root)
|
||||
_, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// Ensure that we see the saved files in the filesystem.
|
||||
@@ -243,10 +243,8 @@ func TestListDir(t *testing.T) {
|
||||
}
|
||||
blobWithSszAndTmp := dirFiles{name: "0x1234567890", isDir: true,
|
||||
children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}}
|
||||
fsLayout.children = append(fsLayout.children, notABlob)
|
||||
fsLayout.children = append(fsLayout.children, childlessBlob)
|
||||
fsLayout.children = append(fsLayout.children, blobWithSsz)
|
||||
fsLayout.children = append(fsLayout.children, blobWithSszAndTmp)
|
||||
fsLayout.children = append(fsLayout.children,
|
||||
notABlob, childlessBlob, blobWithSsz, blobWithSszAndTmp)
|
||||
|
||||
topChildren := make([]string, len(fsLayout.children))
|
||||
for i := range fsLayout.children {
|
||||
@@ -282,10 +280,7 @@ func TestListDir(t *testing.T) {
|
||||
dirPath: ".",
|
||||
expected: []string{notABlob.name},
|
||||
filter: func(s string) bool {
|
||||
if s == notABlob.name {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return s == notABlob.name
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -697,7 +697,7 @@ func decodeSlasherChunk(enc []byte) ([]uint16, error) {
|
||||
}
|
||||
|
||||
// Encode attestation record to bytes.
|
||||
// The output encoded attestation record consists in the signing root concatened with the compressed attestation record.
|
||||
// The output encoded attestation record consists in the signing root concatenated with the compressed attestation record.
|
||||
func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byte, error) {
|
||||
if att == nil || att.IndexedAttestation == nil {
|
||||
return []byte{}, errors.New("nil proposal record")
|
||||
@@ -716,7 +716,7 @@ func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byt
|
||||
}
|
||||
|
||||
// Decode attestation record from bytes.
|
||||
// The input encoded attestation record consists in the signing root concatened with the compressed attestation record.
|
||||
// The input encoded attestation record consists in the signing root concatenated with the compressed attestation record.
|
||||
func decodeAttestationRecord(encoded []byte) (*slashertypes.IndexedAttestationWrapper, error) {
|
||||
if len(encoded) < rootSize {
|
||||
return nil, fmt.Errorf("wrong length for encoded attestation record, want minimum %d, got %d", rootSize, len(encoded))
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
// TestCleanup ensures that the cleanup function unregisters the prometheus.Collection
|
||||
// also tests the interchangability of the explicit prometheus Register/Unregister
|
||||
// also tests the interchangeability of the explicit prometheus Register/Unregister
|
||||
// and the implicit methods within the collector implementation
|
||||
func TestCleanup(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
@@ -32,11 +32,11 @@ func TestCleanup(t *testing.T) {
|
||||
assert.Equal(t, true, unregistered, "prometheus.Unregister failed to unregister PowchainCollector on final cleanup")
|
||||
}
|
||||
|
||||
// TestCancelation tests that canceling the context passed into
|
||||
// TestCancellation tests that canceling the context passed into
|
||||
// NewPowchainCollector cleans everything up as expected. This
|
||||
// does come at the cost of an extra channel cluttering up
|
||||
// PowchainCollector, just for this test.
|
||||
func TestCancelation(t *testing.T) {
|
||||
func TestCancellation(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
pc, err := NewPowchainCollector(ctx)
|
||||
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")
|
||||
|
||||
@@ -707,6 +707,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
|
||||
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
|
||||
@@ -217,9 +217,9 @@ func Test_hasNetworkFlag(t *testing.T) {
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Prater testnet",
|
||||
networkName: features.PraterTestnet.Name,
|
||||
networkValue: "prater",
|
||||
name: "Holesky testnet",
|
||||
networkName: features.HoleskyTestnet.Name,
|
||||
networkValue: "holesky",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -90,6 +90,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peerstore:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_mplex//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
@@ -137,11 +138,11 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
// In the event our attestation is outdated and beyond the
|
||||
// acceptable threshold, we exit early and do not broadcast it.
|
||||
currSlot := slots.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
if att.Data.Slot+params.BeaconConfig().SlotsPerEpoch < currSlot {
|
||||
if err := helpers.ValidateAttestationTime(att.Data.Slot, s.genesisTime, params.BeaconConfig().MaximumGossipClockDisparityDuration()); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestationSlot": att.Data.Slot,
|
||||
"currentSlot": currSlot,
|
||||
}).Warning("Attestation is too old to broadcast, discarding it")
|
||||
}).WithError(err).Warning("Attestation is too old to broadcast, discarding it")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ type Config struct {
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
MetaDataDir string
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
MaxPeers uint
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
@@ -39,6 +40,11 @@ const (
|
||||
udp6
|
||||
)
|
||||
|
||||
type quicProtocol uint16
|
||||
|
||||
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
|
||||
func (quicProtocol) ENRKey() string { return "quic" }
|
||||
|
||||
// RefreshENR uses an epoch to refresh the enr entry for our node
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
@@ -100,14 +106,15 @@ func (s *Service) RefreshENR() {
|
||||
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
func (s *Service) listenForNewNodes() {
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
iterator = enode.Filter(iterator, s.filterPeer)
|
||||
iterator := enode.Filter(s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
defer iterator.Close()
|
||||
|
||||
for {
|
||||
// Exit if service's context is canceled
|
||||
// Exit if service's context is canceled.
|
||||
if s.ctx.Err() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if s.isPeerAtLimit(false /* inbound */) {
|
||||
// Pause the main loop for a period to stop looking
|
||||
// for new peers.
|
||||
@@ -115,16 +122,22 @@ func (s *Service) listenForNewNodes() {
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
exists := iterator.Next()
|
||||
if !exists {
|
||||
|
||||
if exists := iterator.Next(); !exists {
|
||||
break
|
||||
}
|
||||
|
||||
node := iterator.Node()
|
||||
peerInfo, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to peer info")
|
||||
continue
|
||||
}
|
||||
|
||||
if peerInfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
|
||||
s.Peers().RandomizeBackOff(peerInfo.ID)
|
||||
go func(info *peer.AddrInfo) {
|
||||
@@ -167,8 +180,7 @@ func (s *Service) createListener(
|
||||
|
||||
// Listen to all network interfaces
|
||||
// for both ip protocols.
|
||||
networkVersion := "udp"
|
||||
conn, err := net.ListenUDP(networkVersion, udpAddr)
|
||||
conn, err := net.ListenUDP("udp", udpAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not listen to UDP")
|
||||
}
|
||||
@@ -178,6 +190,7 @@ func (s *Service) createListener(
|
||||
ipAddr,
|
||||
int(s.cfg.UDPPort),
|
||||
int(s.cfg.TCPPort),
|
||||
int(s.cfg.QUICPort),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create local node")
|
||||
@@ -209,7 +222,7 @@ func (s *Service) createListener(
|
||||
func (s *Service) createLocalNode(
|
||||
privKey *ecdsa.PrivateKey,
|
||||
ipAddr net.IP,
|
||||
udpPort, tcpPort int,
|
||||
udpPort, tcpPort, quicPort int,
|
||||
) (*enode.LocalNode, error) {
|
||||
db, err := enode.OpenDB("")
|
||||
if err != nil {
|
||||
@@ -218,11 +231,19 @@ func (s *Service) createLocalNode(
|
||||
localNode := enode.NewLocalNode(db, privKey)
|
||||
|
||||
ipEntry := enr.IP(ipAddr)
|
||||
udpEntry := enr.UDP(udpPort)
|
||||
tcpEntry := enr.TCP(tcpPort)
|
||||
localNode.Set(ipEntry)
|
||||
|
||||
udpEntry := enr.UDP(udpPort)
|
||||
localNode.Set(udpEntry)
|
||||
|
||||
tcpEntry := enr.TCP(tcpPort)
|
||||
localNode.Set(tcpEntry)
|
||||
|
||||
if features.Get().EnableQUIC {
|
||||
quicEntry := quicProtocol(quicPort)
|
||||
localNode.Set(quicEntry)
|
||||
}
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
@@ -277,7 +298,7 @@ func (s *Service) startDiscoveryV5(
|
||||
// filterPeer validates each node that we retrieve from our dht. We
|
||||
// try to ascertain that the peer can be a valid protocol peer.
|
||||
// Validity Conditions:
|
||||
// 1. Peer has a valid IP and TCP port set in their enr.
|
||||
// 1. Peer has a valid IP and a (QUIC and/or TCP) port set in their enr.
|
||||
// 2. Peer hasn't been marked as 'bad'.
|
||||
// 3. Peer is not currently active or connected.
|
||||
// 4. Peer is ready to receive incoming connections.
|
||||
@@ -294,17 +315,13 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ignore nodes with their TCP ports not set.
|
||||
if err := node.Record().Load(enr.WithEntry("tcp", new(enr.TCP))); err != nil {
|
||||
if !enr.IsNotFound(err) {
|
||||
log.WithError(err).Debug("Could not retrieve tcp port")
|
||||
}
|
||||
peerData, multiAddrs, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not convert to peer data")
|
||||
return false
|
||||
}
|
||||
|
||||
peerData, multiAddr, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not convert to peer data")
|
||||
if peerData == nil || len(multiAddrs) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -337,6 +354,9 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// If the peer has 2 multiaddrs, favor the QUIC address, which is in first position.
|
||||
multiAddr := multiAddrs[0]
|
||||
|
||||
// Add peer to peer handler.
|
||||
s.peers.Add(nodeENR, peerData.ID, multiAddr, network.DirUnknown)
|
||||
|
||||
@@ -380,11 +400,11 @@ func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get enode from string")
|
||||
}
|
||||
addr, err := convertToSingleMultiAddr(enodeAddr)
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(enodeAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
allAddrs = append(allAddrs, nodeAddrs...)
|
||||
}
|
||||
return allAddrs, nil
|
||||
}
|
||||
@@ -419,45 +439,139 @@ func parseGenericAddrs(addrs []string) (enodeString, multiAddrString []string) {
|
||||
}
|
||||
|
||||
func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
|
||||
var multiAddrs []ma.Multiaddr
|
||||
// Expect each node to have a TCP and a QUIC address.
|
||||
multiAddrs := make([]ma.Multiaddr, 0, 2*len(nodes))
|
||||
|
||||
for _, node := range nodes {
|
||||
// ignore nodes with no ip address stored
|
||||
// Skip nodes with no ip address stored.
|
||||
if node.IP() == nil {
|
||||
continue
|
||||
}
|
||||
multiAddr, err := convertToSingleMultiAddr(node)
|
||||
|
||||
// Get up to two multiaddrs (TCP and QUIC) for each node.
|
||||
nodeMultiAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to multiAddr")
|
||||
log.WithError(err).Errorf("Could not convert to multiAddr node %s", node)
|
||||
continue
|
||||
}
|
||||
multiAddrs = append(multiAddrs, multiAddr)
|
||||
|
||||
multiAddrs = append(multiAddrs, nodeMultiAddrs...)
|
||||
}
|
||||
|
||||
return multiAddrs
|
||||
}
|
||||
|
||||
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, ma.Multiaddr, error) {
|
||||
multiAddr, err := convertToSingleMultiAddr(node)
|
||||
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
|
||||
multiAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
info, err := peer.AddrInfoFromP2pAddr(multiAddr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
if len(multiAddrs) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return info, multiAddr, nil
|
||||
|
||||
infos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert to peer info: %v", multiAddrs)
|
||||
}
|
||||
|
||||
if len(infos) != 1 {
|
||||
return nil, nil, errors.Errorf("infos contains %v elements, expected exactly 1", len(infos))
|
||||
}
|
||||
|
||||
return &infos[0], multiAddrs, nil
|
||||
}
|
||||
|
||||
func convertToSingleMultiAddr(node *enode.Node) (ma.Multiaddr, error) {
|
||||
// retrieveMultiAddrsFromNode converts an enode.Node to a list of multiaddrs.
|
||||
// If the node has a both a QUIC and a TCP port set in their ENR, then
|
||||
// the multiaddr corresponding to the QUIC port is added first, followed
|
||||
// by the multiaddr corresponding to the TCP port.
|
||||
func retrieveMultiAddrsFromNode(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
multiaddrs := make([]ma.Multiaddr, 0, 2)
|
||||
|
||||
// Retrieve the node public key.
|
||||
pubkey := node.Pubkey()
|
||||
assertedKey, err := ecdsaprysm.ConvertToInterfacePubkey(pubkey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get pubkey")
|
||||
}
|
||||
|
||||
// Compute the node ID from the public key.
|
||||
id, err := peer.IDFromPublicKey(assertedKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get peer id")
|
||||
}
|
||||
return multiAddressBuilderWithID(node.IP().String(), "tcp", uint(node.TCP()), id)
|
||||
|
||||
if features.Get().EnableQUIC {
|
||||
// If the QUIC entry is present in the ENR, build the corresponding multiaddress.
|
||||
port, ok, err := getPort(node, quic)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get QUIC port")
|
||||
}
|
||||
|
||||
if ok {
|
||||
addr, err := multiAddressBuilderWithID(node.IP(), quic, port, id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not build QUIC address")
|
||||
}
|
||||
|
||||
multiaddrs = append(multiaddrs, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// If the TCP entry is present in the ENR, build the corresponding multiaddress.
|
||||
port, ok, err := getPort(node, tcp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get TCP port")
|
||||
}
|
||||
|
||||
if ok {
|
||||
addr, err := multiAddressBuilderWithID(node.IP(), tcp, port, id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not build TCP address")
|
||||
}
|
||||
|
||||
multiaddrs = append(multiaddrs, addr)
|
||||
}
|
||||
|
||||
return multiaddrs, nil
|
||||
}
|
||||
|
||||
// getPort retrieves the port for a given node and protocol, as well as a boolean
|
||||
// indicating whether the port was found, and an error
|
||||
func getPort(node *enode.Node, protocol internetProtocol) (uint, bool, error) {
|
||||
var (
|
||||
port uint
|
||||
err error
|
||||
)
|
||||
|
||||
switch protocol {
|
||||
case tcp:
|
||||
var entry enr.TCP
|
||||
err = node.Load(&entry)
|
||||
port = uint(entry)
|
||||
case udp:
|
||||
var entry enr.UDP
|
||||
err = node.Load(&entry)
|
||||
port = uint(entry)
|
||||
case quic:
|
||||
var entry quicProtocol
|
||||
err = node.Load(&entry)
|
||||
port = uint(entry)
|
||||
default:
|
||||
return 0, false, errors.Errorf("invalid protocol: %v", protocol)
|
||||
}
|
||||
|
||||
if enr.IsNotFound(err) {
|
||||
return port, false, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, false, errors.Wrap(err, "could not get port")
|
||||
}
|
||||
|
||||
return port, true, nil
|
||||
}
|
||||
|
||||
func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
@@ -475,14 +589,14 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
var ip4 enr.IPv4
|
||||
var ip6 enr.IPv6
|
||||
if node.Load(&ip4) == nil {
|
||||
address, ipErr := multiAddressBuilderWithID(net.IP(ip4).String(), "udp", uint(node.UDP()), id)
|
||||
address, ipErr := multiAddressBuilderWithID(net.IP(ip4), udp, uint(node.UDP()), id)
|
||||
if ipErr != nil {
|
||||
return nil, errors.Wrap(ipErr, "could not build IPv4 address")
|
||||
}
|
||||
addresses = append(addresses, address)
|
||||
}
|
||||
if node.Load(&ip6) == nil {
|
||||
address, ipErr := multiAddressBuilderWithID(net.IP(ip6).String(), "udp", uint(node.UDP()), id)
|
||||
address, ipErr := multiAddressBuilderWithID(net.IP(ip6), udp, uint(node.UDP()), id)
|
||||
if ipErr != nil {
|
||||
return nil, errors.Wrap(ipErr, "could not build IPv6 address")
|
||||
}
|
||||
|
||||
@@ -166,8 +166,9 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Define ports.
|
||||
const (
|
||||
udpPort = 2000
|
||||
tcpPort = 3000
|
||||
udpPort = 2000
|
||||
tcpPort = 3000
|
||||
quicPort = 3000
|
||||
)
|
||||
|
||||
// Create a private key.
|
||||
@@ -180,7 +181,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
cfg: tt.cfg,
|
||||
}
|
||||
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort)
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
|
||||
if tt.expectedError {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
@@ -237,7 +238,7 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
}
|
||||
node, err := s.createLocalNode(pkey, addr, 0, 0)
|
||||
node, err := s.createLocalNode(pkey, addr, 0, 0, 0)
|
||||
require.NoError(t, err)
|
||||
multiAddr := convertToMultiAddr([]*enode.Node{node.Node()})
|
||||
assert.Equal(t, 0, len(multiAddr), "Invalid ip address converted successfully")
|
||||
@@ -248,8 +249,9 @@ func TestMultiAddrConversion_OK(t *testing.T) {
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
TCPPort: 0,
|
||||
UDPPort: 0,
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
|
||||
@@ -28,7 +28,8 @@ import (
|
||||
)
|
||||
|
||||
func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
port := 2000
|
||||
const port = 2000
|
||||
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
|
||||
@@ -53,7 +54,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
|
||||
var listeners []*discover.UDPv5
|
||||
for i := 1; i <= 5; i++ {
|
||||
port = 3000 + i
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
|
||||
@@ -98,13 +99,14 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
var addrs []ma.Multiaddr
|
||||
|
||||
for _, n := range nodes {
|
||||
if s.filterPeer(n) {
|
||||
addr, err := convertToSingleMultiAddr(n)
|
||||
addrs := make([]ma.Multiaddr, 0)
|
||||
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, addr)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,10 +116,11 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
const port = 2000
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
logrus.SetLevel(logrus.TraceLevel)
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
@@ -138,7 +141,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
|
||||
var listeners []*discover.UDPv5
|
||||
for i := 1; i <= 5; i++ {
|
||||
port = 3000 + i
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
|
||||
@@ -188,13 +191,13 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
var addrs []ma.Multiaddr
|
||||
addrs := make([]ma.Multiaddr, 0, len(nodes))
|
||||
|
||||
for _, n := range nodes {
|
||||
if s.filterPeer(n) {
|
||||
addr, err := convertToSingleMultiAddr(n)
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, addr)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -12,32 +13,32 @@ import (
|
||||
var log = logrus.WithField("prefix", "p2p")
|
||||
|
||||
func logIPAddr(id peer.ID, addrs ...ma.Multiaddr) {
|
||||
var correctAddr ma.Multiaddr
|
||||
for _, addr := range addrs {
|
||||
if strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/") {
|
||||
correctAddr = addr
|
||||
break
|
||||
if !(strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/")) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if correctAddr != nil {
|
||||
|
||||
log.WithField(
|
||||
"multiAddr",
|
||||
correctAddr.String()+"/p2p/"+id.String(),
|
||||
addr.String()+"/p2p/"+id.String(),
|
||||
).Info("Node started p2p server")
|
||||
}
|
||||
}
|
||||
|
||||
func logExternalIPAddr(id peer.ID, addr string, port uint) {
|
||||
func logExternalIPAddr(id peer.ID, addr string, tcpPort, quicPort uint) {
|
||||
if addr != "" {
|
||||
multiAddr, err := MultiAddressBuilder(addr, port)
|
||||
multiAddrs, err := MultiAddressBuilder(net.ParseIP(addr), tcpPort, quicPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not create multiaddress")
|
||||
return
|
||||
}
|
||||
log.WithField(
|
||||
"multiAddr",
|
||||
multiAddr.String()+"/p2p/"+id.String(),
|
||||
).Info("Node started external p2p server")
|
||||
|
||||
for _, multiAddr := range multiAddrs {
|
||||
log.WithField(
|
||||
"multiAddr",
|
||||
multiAddr.String()+"/p2p/"+id.String(),
|
||||
).Info("Node started external p2p server")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,40 +11,68 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
||||
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
gomplex "github.com/libp2p/go-mplex"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
type internetProtocol string
|
||||
|
||||
const (
|
||||
udp = "udp"
|
||||
tcp = "tcp"
|
||||
quic = "quic"
|
||||
)
|
||||
|
||||
// MultiAddressBuilder takes in an ip address string and port to produce a go multiaddr format.
|
||||
func MultiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
|
||||
func MultiAddressBuilder(ip net.IP, tcpPort, quicPort uint) ([]ma.Multiaddr, error) {
|
||||
ipType, err := extractIpType(ip)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to determine IP type")
|
||||
}
|
||||
if parsedIP.To4() != nil {
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
||||
|
||||
// Example: /ip4/1.2.3.4./tcp/5678
|
||||
multiaddrStr := fmt.Sprintf("/%s/%s/tcp/%d", ipType, ip, tcpPort)
|
||||
multiAddrTCP, err := ma.NewMultiaddr(multiaddrStr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot produce TCP multiaddr format from %s:%d", ip, tcpPort)
|
||||
}
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/%d", ipAddr, port))
|
||||
|
||||
multiaddrs := []ma.Multiaddr{multiAddrTCP}
|
||||
|
||||
if features.Get().EnableQUIC {
|
||||
// Example: /ip4/1.2.3.4/udp/5678/quic-v1
|
||||
multiAddrQUIC, err := ma.NewMultiaddr(fmt.Sprintf("/%s/%s/udp/%d/quic-v1", ipType, ip, quicPort))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot produce QUIC multiaddr format from %s:%d", ip, tcpPort)
|
||||
}
|
||||
|
||||
multiaddrs = append(multiaddrs, multiAddrQUIC)
|
||||
}
|
||||
|
||||
return multiaddrs, nil
|
||||
}
|
||||
|
||||
// buildOptions for the libp2p host.
|
||||
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Option, error) {
|
||||
cfg := s.cfg
|
||||
listen, err := MultiAddressBuilder(ip.String(), cfg.TCPPort)
|
||||
multiaddrs, err := MultiAddressBuilder(ip, cfg.TCPPort, cfg.QUICPort)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip.String(), cfg.TCPPort)
|
||||
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip, cfg.TCPPort)
|
||||
}
|
||||
if cfg.LocalIP != "" {
|
||||
if net.ParseIP(cfg.LocalIP) == nil {
|
||||
localIP := net.ParseIP(cfg.LocalIP)
|
||||
if localIP == nil {
|
||||
return nil, errors.Wrapf(err, "invalid local ip provided: %s:%d", cfg.LocalIP, cfg.TCPPort)
|
||||
}
|
||||
|
||||
listen, err = MultiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
|
||||
multiaddrs, err = MultiAddressBuilder(localIP, cfg.TCPPort, cfg.QUICPort)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", cfg.LocalIP, cfg.TCPPort)
|
||||
}
|
||||
@@ -58,40 +86,47 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
return nil, errors.Wrapf(err, "cannot get ID from public key: %s", ifaceKey.GetPublic().Type().String())
|
||||
}
|
||||
|
||||
log.Infof("Running node with peer id of %s ", id.String())
|
||||
log.WithField("peerId", id).Info("Running node with id")
|
||||
|
||||
options := []libp2p.Option{
|
||||
privKeyOption(priKey),
|
||||
libp2p.ListenAddrs(listen),
|
||||
libp2p.ListenAddrs(multiaddrs...),
|
||||
libp2p.UserAgent(version.BuildData()),
|
||||
libp2p.ConnectionGater(s),
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.Transport(libp2ptcp.NewTCPTransport),
|
||||
libp2p.DefaultMuxers,
|
||||
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
|
||||
libp2p.Security(noise.ID, noise.New),
|
||||
libp2p.Ping(false), // Disable Ping Service.
|
||||
}
|
||||
|
||||
if features.Get().EnableQUIC {
|
||||
options = append(options, libp2p.Transport(libp2pquic.NewTransport))
|
||||
}
|
||||
|
||||
if cfg.EnableUPnP {
|
||||
options = append(options, libp2p.NATPortMap()) // Allow to use UPnP
|
||||
}
|
||||
|
||||
if cfg.RelayNodeAddr != "" {
|
||||
options = append(options, libp2p.AddrsFactory(withRelayAddrs(cfg.RelayNodeAddr)))
|
||||
} else {
|
||||
// Disable relay if it has not been set.
|
||||
options = append(options, libp2p.DisableRelay())
|
||||
}
|
||||
|
||||
if cfg.HostAddress != "" {
|
||||
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
external, err := MultiAddressBuilder(cfg.HostAddress, cfg.TCPPort)
|
||||
externalMultiaddrs, err := MultiAddressBuilder(net.ParseIP(cfg.HostAddress), cfg.TCPPort, cfg.QUICPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Unable to create external multiaddress")
|
||||
} else {
|
||||
addrs = append(addrs, external)
|
||||
addrs = append(addrs, externalMultiaddrs...)
|
||||
}
|
||||
return addrs
|
||||
}))
|
||||
}
|
||||
|
||||
if cfg.HostDNS != "" {
|
||||
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
external, err := ma.NewMultiaddr(fmt.Sprintf("/dns4/%s/tcp/%d", cfg.HostDNS, cfg.TCPPort))
|
||||
@@ -107,21 +142,47 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
if features.Get().DisableResourceManager {
|
||||
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
|
||||
func extractIpType(ip net.IP) (string, error) {
|
||||
if ip.To4() != nil {
|
||||
return "ip4", nil
|
||||
}
|
||||
if id.String() == "" {
|
||||
return nil, errors.New("empty peer id given")
|
||||
|
||||
if ip.To16() != nil {
|
||||
return "ip6", nil
|
||||
}
|
||||
if parsedIP.To4() != nil {
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
|
||||
|
||||
return "", errors.Errorf("provided IP address is neither IPv4 nor IPv6: %s", ip)
|
||||
}
|
||||
|
||||
func multiAddressBuilderWithID(ip net.IP, protocol internetProtocol, port uint, id peer.ID) (ma.Multiaddr, error) {
|
||||
var multiaddrStr string
|
||||
|
||||
if id == "" {
|
||||
return nil, errors.Errorf("empty peer id given: %s", id)
|
||||
}
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
|
||||
|
||||
ipType, err := extractIpType(ip)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to determine IP type")
|
||||
}
|
||||
|
||||
switch protocol {
|
||||
case udp, tcp:
|
||||
// Example with UDP: /ip4/1.2.3.4/udp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
|
||||
// Example with TCP: /ip6/1.2.3.4/tcp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
|
||||
multiaddrStr = fmt.Sprintf("/%s/%s/%s/%d/p2p/%s", ipType, ip, protocol, port, id)
|
||||
case quic:
|
||||
// Example: /ip4/1.2.3.4/udp/5678/quic-v1/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
|
||||
multiaddrStr = fmt.Sprintf("/%s/%s/udp/%d/quic-v1/p2p/%s", ipType, ip, port, id)
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported protocol: %s", protocol)
|
||||
}
|
||||
|
||||
return ma.NewMultiaddr(multiaddrStr)
|
||||
}
|
||||
|
||||
// Adds a private key to the libp2p option if the option was provided.
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -88,30 +89,34 @@ func TestIPV6Support(t *testing.T) {
|
||||
lNode := enode.NewLocalNode(db, key)
|
||||
mockIPV6 := net.IP{0xff, 0x02, 0xAA, 0, 0x1F, 0, 0x2E, 0, 0, 0x36, 0x45, 0, 0, 0, 0, 0x02}
|
||||
lNode.Set(enr.IP(mockIPV6))
|
||||
ma, err := convertToSingleMultiAddr(lNode.Node())
|
||||
mas, err := retrieveMultiAddrsFromNode(lNode.Node())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ipv6Exists := false
|
||||
for _, p := range ma.Protocols() {
|
||||
if p.Name == "ip4" {
|
||||
t.Error("Got ip4 address instead of ip6")
|
||||
|
||||
for _, ma := range mas {
|
||||
ipv6Exists := false
|
||||
for _, p := range ma.Protocols() {
|
||||
if p.Name == "ip4" {
|
||||
t.Error("Got ip4 address instead of ip6")
|
||||
}
|
||||
if p.Name == "ip6" {
|
||||
ipv6Exists = true
|
||||
}
|
||||
}
|
||||
if p.Name == "ip6" {
|
||||
ipv6Exists = true
|
||||
if !ipv6Exists {
|
||||
t.Error("Multiaddress did not have ipv6 protocol")
|
||||
}
|
||||
}
|
||||
if !ipv6Exists {
|
||||
t.Error("Multiaddress did not have ipv6 protocol")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultMultiplexers(t *testing.T) {
|
||||
var cfg libp2p.Config
|
||||
_ = cfg
|
||||
p2pCfg := &Config{
|
||||
TCPPort: 2000,
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
}
|
||||
svc := &Service{cfg: p2pCfg}
|
||||
@@ -127,5 +132,57 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
|
||||
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[0].ID)
|
||||
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[1].ID)
|
||||
|
||||
}
|
||||
|
||||
func TestMultiAddressBuilderWithID(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
ip net.IP
|
||||
protocol internetProtocol
|
||||
port uint
|
||||
id string
|
||||
|
||||
expectedMultiaddrStr string
|
||||
}{
|
||||
{
|
||||
name: "UDP",
|
||||
ip: net.IPv4(192, 168, 0, 1),
|
||||
protocol: udp,
|
||||
port: 5678,
|
||||
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
|
||||
|
||||
expectedMultiaddrStr: "/ip4/192.168.0.1/udp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
|
||||
},
|
||||
{
|
||||
name: "TCP",
|
||||
ip: net.IPv4(192, 168, 0, 1),
|
||||
protocol: tcp,
|
||||
port: 5678,
|
||||
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
|
||||
|
||||
expectedMultiaddrStr: "/ip4/192.168.0.1/tcp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
|
||||
},
|
||||
{
|
||||
name: "QUIC",
|
||||
ip: net.IPv4(192, 168, 0, 1),
|
||||
protocol: quic,
|
||||
port: 5678,
|
||||
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
|
||||
|
||||
expectedMultiaddrStr: "/ip4/192.168.0.1/udp/5678/quic-v1/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
id, err := hex.DecodeString(tt.id)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualMultiaddr, err := multiAddressBuilderWithID(tt.ip, tt.protocol, tt.port, peer.ID(id))
|
||||
require.NoError(t, err)
|
||||
|
||||
actualMultiaddrStr := actualMultiaddr.String()
|
||||
require.Equal(t, tt.expectedMultiaddrStr, actualMultiaddrStr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Package peers provides information about peers at the Ethereum consensus protocol level.
|
||||
//
|
||||
// "Protocol level" is the level above the network level, so this layer never sees or interacts with
|
||||
// (for example) hosts that are uncontactable due to being down, firewalled, etc. Instead, this works
|
||||
// (for example) hosts that are unreachable due to being down, firewalled, etc. Instead, this works
|
||||
// with peers that are contactable but may or may not be of the correct fork version, not currently
|
||||
// required due to the number of current connections, etc.
|
||||
//
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"context"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
@@ -59,8 +60,8 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
// ColocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
|
||||
ColocationLimit = 5
|
||||
// CollocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
|
||||
CollocationLimit = 5
|
||||
|
||||
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
|
||||
maxLimitBuffer = 150
|
||||
@@ -76,6 +77,13 @@ const (
|
||||
MaxBackOffDuration = 5000
|
||||
)
|
||||
|
||||
type InternetProtocol string
|
||||
|
||||
const (
|
||||
TCP = "tcp"
|
||||
QUIC = "quic"
|
||||
)
|
||||
|
||||
// Status is the structure holding the peer status information.
|
||||
type Status struct {
|
||||
ctx context.Context
|
||||
@@ -449,6 +457,19 @@ func (p *Status) InboundConnected() []peer.ID {
|
||||
return peers
|
||||
}
|
||||
|
||||
// InboundConnectedWithProtocol returns the current batch of inbound peers that are connected with a given protocol.
|
||||
func (p *Status) InboundConnectedWithProtocol(protocol InternetProtocol) []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// Outbound returns the current batch of outbound peers.
|
||||
func (p *Status) Outbound() []peer.ID {
|
||||
p.store.RLock()
|
||||
@@ -475,7 +496,20 @@ func (p *Status) OutboundConnected() []peer.ID {
|
||||
return peers
|
||||
}
|
||||
|
||||
// Active returns the peers that are connecting or connected.
|
||||
// OutboundConnectedWithProtocol returns the current batch of outbound peers that are connected with a given protocol.
|
||||
func (p *Status) OutboundConnectedWithProtocol(protocol InternetProtocol) []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), string(protocol)) {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// Active returns the peers that are active (connecting or connected).
|
||||
func (p *Status) Active() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
@@ -514,7 +548,7 @@ func (p *Status) Disconnected() []peer.ID {
|
||||
return peers
|
||||
}
|
||||
|
||||
// Inactive returns the peers that are disconnecting or disconnected.
|
||||
// Inactive returns the peers that are inactive (disconnecting or disconnected).
|
||||
func (p *Status) Inactive() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
@@ -548,7 +582,7 @@ func (p *Status) Prune() {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
// Default to old method if flag isnt enabled.
|
||||
// Default to old method if flag isn't enabled.
|
||||
if !features.Get().EnablePeerScorer {
|
||||
p.deprecatedPrune()
|
||||
return
|
||||
@@ -961,7 +995,7 @@ func (p *Status) isfromBadIP(pid peer.ID) bool {
|
||||
return true
|
||||
}
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > ColocationLimit {
|
||||
if val > CollocationLimit {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1012,7 +1046,7 @@ func (p *Status) tallyIPTracker() {
|
||||
}
|
||||
|
||||
func sameIP(firstAddr, secondAddr ma.Multiaddr) bool {
|
||||
// Exit early if we do get nil multiaddresses
|
||||
// Exit early if we do get nil multi-addresses
|
||||
if firstAddr == nil || secondAddr == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -565,7 +565,7 @@ func TestPeerIPTracker(t *testing.T) {
|
||||
|
||||
badIP := "211.227.218.116"
|
||||
var badPeers []peer.ID
|
||||
for i := 0; i < peers.ColocationLimit+10; i++ {
|
||||
for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
port := strconv.Itoa(3000 + i)
|
||||
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
if err != nil {
|
||||
@@ -1111,6 +1111,87 @@ func TestInbound(t *testing.T) {
|
||||
assert.Equal(t, inbound.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestInboundConnected(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.PeerConnecting)
|
||||
|
||||
result := p.InboundConnected()
|
||||
require.Equal(t, 1, len(result))
|
||||
assert.Equal(t, inbound.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestInboundConnectedWithProtocol(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addrsTCP := []string{
|
||||
"/ip4/127.0.0.1/tcp/33333",
|
||||
"/ip4/127.0.0.2/tcp/44444",
|
||||
}
|
||||
|
||||
addrsQUIC := []string{
|
||||
"/ip4/192.168.1.3/udp/13000/quic-v1",
|
||||
"/ip4/192.168.1.4/udp/14000/quic-v1",
|
||||
"/ip4/192.168.1.5/udp/14000/quic-v1",
|
||||
}
|
||||
|
||||
expectedTCP := make(map[string]bool, len(addrsTCP))
|
||||
for _, addr := range addrsTCP {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
|
||||
expectedTCP[peer.String()] = true
|
||||
}
|
||||
|
||||
expectedQUIC := make(map[string]bool, len(addrsQUIC))
|
||||
for _, addr := range addrsQUIC {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirInbound, peers.PeerConnected)
|
||||
expectedQUIC[peer.String()] = true
|
||||
}
|
||||
|
||||
// TCP
|
||||
// ---
|
||||
|
||||
actualTCP := p.InboundConnectedWithProtocol(peers.TCP)
|
||||
require.Equal(t, len(expectedTCP), len(actualTCP))
|
||||
|
||||
for _, actualPeer := range actualTCP {
|
||||
_, ok := expectedTCP[actualPeer.String()]
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
// QUIC
|
||||
// ----
|
||||
actualQUIC := p.InboundConnectedWithProtocol(peers.QUIC)
|
||||
require.Equal(t, len(expectedQUIC), len(actualQUIC))
|
||||
|
||||
for _, actualPeer := range actualQUIC {
|
||||
_, ok := expectedQUIC[actualPeer.String()]
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutbound(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
@@ -1130,6 +1211,87 @@ func TestOutbound(t *testing.T) {
|
||||
assert.Equal(t, outbound.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestOutboundConnected(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnecting)
|
||||
|
||||
result := p.OutboundConnected()
|
||||
require.Equal(t, 1, len(result))
|
||||
assert.Equal(t, inbound.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestOutboundConnectedWithProtocol(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addrsTCP := []string{
|
||||
"/ip4/127.0.0.1/tcp/33333",
|
||||
"/ip4/127.0.0.2/tcp/44444",
|
||||
}
|
||||
|
||||
addrsQUIC := []string{
|
||||
"/ip4/192.168.1.3/udp/13000/quic-v1",
|
||||
"/ip4/192.168.1.4/udp/14000/quic-v1",
|
||||
"/ip4/192.168.1.5/udp/14000/quic-v1",
|
||||
}
|
||||
|
||||
expectedTCP := make(map[string]bool, len(addrsTCP))
|
||||
for _, addr := range addrsTCP {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
|
||||
expectedTCP[peer.String()] = true
|
||||
}
|
||||
|
||||
expectedQUIC := make(map[string]bool, len(addrsQUIC))
|
||||
for _, addr := range addrsQUIC {
|
||||
multiaddr, err := ma.NewMultiaddr(addr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := createPeer(t, p, multiaddr, network.DirOutbound, peers.PeerConnected)
|
||||
expectedQUIC[peer.String()] = true
|
||||
}
|
||||
|
||||
// TCP
|
||||
// ---
|
||||
|
||||
actualTCP := p.OutboundConnectedWithProtocol(peers.TCP)
|
||||
require.Equal(t, len(expectedTCP), len(actualTCP))
|
||||
|
||||
for _, actualPeer := range actualTCP {
|
||||
_, ok := expectedTCP[actualPeer.String()]
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
// QUIC
|
||||
// ----
|
||||
actualQUIC := p.OutboundConnectedWithProtocol(peers.QUIC)
|
||||
require.Equal(t, len(expectedQUIC), len(actualQUIC))
|
||||
|
||||
for _, actualPeer := range actualQUIC {
|
||||
_, ok := expectedQUIC[actualPeer.String()]
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// addPeer is a helper to add a peer with a given connection state)
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState) peer.ID {
|
||||
// Set up some peers with different states
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
prysmnetwork "github.com/prysmaticlabs/prysm/v5/network"
|
||||
@@ -124,31 +125,34 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to build p2p options")
|
||||
}
|
||||
|
||||
// Sets mplex timeouts
|
||||
configureMplex()
|
||||
h, err := libp2p.New(opts...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to create p2p host")
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "failed to create p2p host")
|
||||
}
|
||||
|
||||
s.host = h
|
||||
|
||||
// Gossipsub registration is done before we add in any new peers
|
||||
// due to libp2p's gossipsub implementation not taking into
|
||||
// account previously added peers when creating the gossipsub
|
||||
// object.
|
||||
psOpts := s.pubsubOptions()
|
||||
|
||||
// Set the pubsub global parameters that we require.
|
||||
setPubSubParameters()
|
||||
|
||||
// Reinitialize them in the event we are running a custom config.
|
||||
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
|
||||
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
|
||||
gs, err := pubsub.NewGossipSub(s.ctx, s.host, psOpts...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to start pubsub")
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "failed to create p2p pubsub")
|
||||
}
|
||||
|
||||
s.pubsub = gs
|
||||
|
||||
s.peers = peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
@@ -213,7 +217,7 @@ func (s *Service) Start() {
|
||||
if len(s.cfg.StaticPeers) > 0 {
|
||||
addrs, err := PeersFromStringAddrs(s.cfg.StaticPeers)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not connect to static peer")
|
||||
log.WithError(err).Error("could not convert ENR to multiaddr")
|
||||
}
|
||||
// Set trusted peers for those that are provided as static addresses.
|
||||
pids := peerIdsFromMultiAddrs(addrs)
|
||||
@@ -232,11 +236,24 @@ func (s *Service) Start() {
|
||||
async.RunEvery(s.ctx, time.Duration(params.BeaconConfig().RespTimeout)*time.Second, s.updateMetrics)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
|
||||
async.RunEvery(s.ctx, 1*time.Minute, func() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"inbound": len(s.peers.InboundConnected()),
|
||||
"outbound": len(s.peers.OutboundConnected()),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Info("Peer summary")
|
||||
inboundQUICCount := len(s.peers.InboundConnectedWithProtocol(peers.QUIC))
|
||||
inboundTCPCount := len(s.peers.InboundConnectedWithProtocol(peers.TCP))
|
||||
outboundQUICCount := len(s.peers.OutboundConnectedWithProtocol(peers.QUIC))
|
||||
outboundTCPCount := len(s.peers.OutboundConnectedWithProtocol(peers.TCP))
|
||||
total := inboundQUICCount + inboundTCPCount + outboundQUICCount + outboundTCPCount
|
||||
|
||||
fields := logrus.Fields{
|
||||
"inboundTCP": inboundTCPCount,
|
||||
"outboundTCP": outboundTCPCount,
|
||||
"total": total,
|
||||
}
|
||||
|
||||
if features.Get().EnableQUIC {
|
||||
fields["inboundQUIC"] = inboundQUICCount
|
||||
fields["outboundQUIC"] = outboundQUICCount
|
||||
}
|
||||
|
||||
log.WithFields(fields).Info("Connected peers")
|
||||
})
|
||||
|
||||
multiAddrs := s.host.Network().ListenAddresses()
|
||||
@@ -244,9 +261,10 @@ func (s *Service) Start() {
|
||||
|
||||
p2pHostAddress := s.cfg.HostAddress
|
||||
p2pTCPPort := s.cfg.TCPPort
|
||||
p2pQUICPort := s.cfg.QUICPort
|
||||
|
||||
if p2pHostAddress != "" {
|
||||
logExternalIPAddr(s.host.ID(), p2pHostAddress, p2pTCPPort)
|
||||
logExternalIPAddr(s.host.ID(), p2pHostAddress, p2pTCPPort, p2pQUICPort)
|
||||
verifyConnectivity(p2pHostAddress, p2pTCPPort, "tcp")
|
||||
}
|
||||
|
||||
|
||||
@@ -102,8 +102,9 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg := &Config{
|
||||
TCPPort: 2000,
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
ClockWaiter: cs,
|
||||
}
|
||||
s, err := NewService(context.Background(), cfg)
|
||||
@@ -147,8 +148,9 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
|
||||
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg := &Config{
|
||||
TCPPort: 2000,
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
NoDiscovery: true, // <-- no s.dv5Listener is created
|
||||
ClockWaiter: cs,
|
||||
|
||||
@@ -93,6 +93,11 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := s.connectWithPeer(ctx, *info); err != nil {
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
genesisTime := time.Now()
|
||||
|
||||
bootNodeService := &Service{
|
||||
cfg: &Config{TCPPort: 2000, UDPPort: 3000},
|
||||
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
@@ -89,8 +89,9 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
service, err := NewService(ctx, &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNodeENR},
|
||||
MaxPeers: 30,
|
||||
TCPPort: uint(2000 + i),
|
||||
UDPPort: uint(3000 + i),
|
||||
UDPPort: uint(2000 + i),
|
||||
TCPPort: uint(3000 + i),
|
||||
QUICPort: uint(3000 + i),
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
@@ -133,8 +134,9 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNodeENR},
|
||||
MaxPeers: 30,
|
||||
TCPPort: 2010,
|
||||
UDPPort: 3010,
|
||||
UDPPort: 2010,
|
||||
TCPPort: 3010,
|
||||
QUICPort: 3010,
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, cfg)
|
||||
|
||||
@@ -50,7 +50,7 @@ func ensurePeerConnections(ctx context.Context, h host.Host, peers *peers.Status
|
||||
c := h.Network().ConnsToPeer(p.ID)
|
||||
if len(c) == 0 {
|
||||
if err := connectWithTimeout(ctx, h, p); err != nil {
|
||||
log.WithField("peer", p.ID).WithField("addrs", p.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
|
||||
log.WithField("peer", p.ID).WithField("addrs", p.Addrs).WithError(err).Errorf("failed to reconnect to peer")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,6 +37,7 @@ go_library(
|
||||
"//beacon-chain/rpc/eth/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
@@ -121,6 +122,7 @@ go_test(
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_stretchr_testify//mock:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/prysm/v1alpha1/validator"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
@@ -32,6 +33,7 @@ import (
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -42,7 +44,8 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
errNilBlock = errors.New("nil block")
|
||||
errNilBlock = errors.New("nil block")
|
||||
errEquivocatedBlock = errors.New("block is equivocated")
|
||||
)
|
||||
|
||||
type handled bool
|
||||
@@ -1254,6 +1257,16 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
},
|
||||
}
|
||||
if err = s.validateBroadcast(ctx, r, genericBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := s.broadcastSeenBlockSidecars(ctx, b, genericBlock.GetDeneb().Blobs, genericBlock.GetDeneb().KzgProofs); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecars")
|
||||
}
|
||||
}
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
@@ -1383,6 +1396,16 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
consensusBlock, err = denebBlockContents.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(consensusBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := s.broadcastSeenBlockSidecars(ctx, b, consensusBlock.GetDeneb().Blobs, consensusBlock.GetDeneb().KzgProofs); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecars")
|
||||
}
|
||||
}
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
@@ -1547,7 +1570,7 @@ func (s *Server) validateConsensus(ctx context.Context, blk interfaces.ReadOnlyS
|
||||
|
||||
func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error {
|
||||
if s.ForkchoiceFetcher.HighestReceivedBlockSlot() == blk.Slot() {
|
||||
return fmt.Errorf("block for slot %d already exists in fork choice", blk.Slot())
|
||||
return errors.Wrapf(errEquivocatedBlock, "block for slot %d already exists in fork choice", blk.Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -2072,3 +2095,37 @@ func (s *Server) GetDepositSnapshot(w http.ResponseWriter, r *http.Request) {
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Broadcast blob sidecars even if the block of the same slot has been imported.
|
||||
// To ensure safety, we will only broadcast blob sidecars if the header references the same block that was previously seen.
|
||||
// Otherwise, a proposer could get slashed through a different blob sidecar header reference.
|
||||
func (s *Server) broadcastSeenBlockSidecars(
|
||||
ctx context.Context,
|
||||
b interfaces.SignedBeaconBlock,
|
||||
blobs [][]byte,
|
||||
kzgProofs [][]byte) error {
|
||||
scs, err := validator.BuildBlobSidecars(b, blobs, kzgProofs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, sc := range scs {
|
||||
r, err := sc.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to hash block header for blob sidecar")
|
||||
continue
|
||||
}
|
||||
if !s.FinalizationFetcher.InForkchoice(r) {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).Debug("Block header not in forkchoice, skipping blob sidecar broadcast")
|
||||
continue
|
||||
}
|
||||
if err := s.Broadcaster.BroadcastBlob(ctx, sc.Index, sc); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecar for index ", sc.Index)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"index": sc.Index,
|
||||
"slot": sc.SignedBlockHeader.Header.Slot,
|
||||
"kzgCommitment": fmt.Sprintf("%#x", sc.KzgCommitment),
|
||||
}).Info("Broadcasted blob sidecar for already seen block")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"go.uber.org/mock/gomock"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
@@ -2472,7 +2474,9 @@ func TestValidateEquivocation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
blk.SetSlot(st.Slot())
|
||||
|
||||
assert.ErrorContains(t, "already exists", server.validateEquivocation(blk.Block()))
|
||||
err = server.validateEquivocation(blk.Block())
|
||||
assert.ErrorContains(t, "already exists", err)
|
||||
require.ErrorIs(t, err, errEquivocatedBlock)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3630,3 +3634,27 @@ func TestGetDepositSnapshot(t *testing.T) {
|
||||
assert.Equal(t, finalized, len(resp.Finalized))
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_broadcastBlobSidecars(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
blockToPropose := util.NewBeaconBlockContentsDeneb()
|
||||
blockToPropose.Blobs = [][]byte{{0x01}, {0x02}, {0x03}}
|
||||
blockToPropose.KzgProofs = [][]byte{{0x01}, {0x02}, {0x03}}
|
||||
blockToPropose.Block.Block.Body.BlobKzgCommitments = [][]byte{bytesutil.PadTo([]byte("kc"), 48), bytesutil.PadTo([]byte("kc1"), 48), bytesutil.PadTo([]byte("kc2"), 48)}
|
||||
d := ð.GenericSignedBeaconBlock_Deneb{Deneb: blockToPropose}
|
||||
b := ð.GenericSignedBeaconBlock{Block: d}
|
||||
|
||||
server := &Server{
|
||||
Broadcaster: &mockp2p.MockBroadcaster{},
|
||||
FinalizationFetcher: &chainMock.ChainService{NotFinalized: true},
|
||||
}
|
||||
|
||||
blk, err := blocks.NewSignedBeaconBlock(b.Block)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, server.broadcastSeenBlockSidecars(context.Background(), blk, b.GetDeneb().Blobs, b.GetDeneb().KzgProofs))
|
||||
require.LogsDoNotContain(t, hook, "Broadcasted blob sidecar for already seen block")
|
||||
|
||||
server.FinalizationFetcher = &chainMock.ChainService{NotFinalized: false}
|
||||
require.NoError(t, server.broadcastSeenBlockSidecars(context.Background(), blk, b.GetDeneb().Blobs, b.GetDeneb().KzgProofs))
|
||||
require.LogsContain(t, hook, "Broadcasted blob sidecar for already seen block")
|
||||
}
|
||||
|
||||
@@ -341,7 +341,7 @@ func (vs *Server) handleUnblindedBlock(block interfaces.SignedBeaconBlock, req *
|
||||
if dbBlockContents == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return buildBlobSidecars(block, dbBlockContents.Blobs, dbBlockContents.KzgProofs)
|
||||
return BuildBlobSidecars(block, dbBlockContents.Blobs, dbBlockContents.KzgProofs)
|
||||
}
|
||||
|
||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||
|
||||
@@ -56,8 +56,8 @@ func (c *blobsBundleCache) prune(minSlot primitives.Slot) {
|
||||
}
|
||||
}
|
||||
|
||||
// buildBlobSidecars given a block, builds the blob sidecars for the block.
|
||||
func buildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
|
||||
// BuildBlobSidecars given a block, builds the blob sidecars for the block.
|
||||
func BuildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil, nil // No blobs before deneb.
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func TestServer_buildBlobSidecars(t *testing.T) {
|
||||
require.NoError(t, blk.SetBlobKzgCommitments(kzgCommitments))
|
||||
proof, err := hexutil.Decode("0xb4021b0de10f743893d4f71e1bf830c019e832958efd6795baf2f83b8699a9eccc5dc99015d8d4d8ec370d0cc333c06a")
|
||||
require.NoError(t, err)
|
||||
scs, err := buildBlobSidecars(blk, [][]byte{
|
||||
scs, err := BuildBlobSidecars(blk, [][]byte{
|
||||
make([]byte, fieldparams.BlobLength), make([]byte, fieldparams.BlobLength),
|
||||
}, [][]byte{
|
||||
proof, proof,
|
||||
|
||||
@@ -208,8 +208,8 @@ func (m *MinSpanChunksSlice) CheckSlashable(
|
||||
}
|
||||
|
||||
if existingAttWrapper == nil {
|
||||
// This case should normally not happen. If this happen, it means we previously
|
||||
// recorded in our min/max DB an distance corresponding to an attestaiton, but WITHOUT
|
||||
// This case should normally not happen. If this happens, it means we previously
|
||||
// recorded in our min/max DB a distance corresponding to an attestation, but WITHOUT
|
||||
// recording the attestation itself. As a consequence, we say there is no surrounding vote,
|
||||
// but we log an error.
|
||||
fields := logrus.Fields{
|
||||
@@ -287,8 +287,8 @@ func (m *MaxSpanChunksSlice) CheckSlashable(
|
||||
}
|
||||
|
||||
if existingAttWrapper == nil {
|
||||
// This case should normally not happen. If this happen, it means we previously
|
||||
// recorded in our min/max DB an distance corresponding to an attestaiton, but WITHOUT
|
||||
// This case should normally not happen. If this happens, it means we previously
|
||||
// recorded in our min/max DB a distance corresponding to an attestation, but WITHOUT
|
||||
// recording the attestation itself. As a consequence, we say there is no surrounded vote,
|
||||
// but we log an error.
|
||||
fields := logrus.Fields{
|
||||
|
||||
@@ -1059,7 +1059,7 @@ func Test_updatedChunkByChunkIndex(t *testing.T) {
|
||||
// Initialize the slasher database.
|
||||
slasherDB := dbtest.SetupSlasherDB(t)
|
||||
|
||||
// Intialize the slasher service.
|
||||
// Initialize the slasher service.
|
||||
service := &Service{
|
||||
params: &Parameters{
|
||||
chunkSize: tt.chunkSize,
|
||||
@@ -1502,7 +1502,7 @@ func runAttestationsBenchmark(b *testing.B, s *Service, numAtts, numValidators u
|
||||
|
||||
func Benchmark_checkSurroundVotes(b *testing.B) {
|
||||
const (
|
||||
// Approximatively the number of Holesky active validators on 2024-02-16
|
||||
// Approximately the number of Holesky active validators on 2024-02-16
|
||||
// This number is both a multiple of 32 (the number of slots per epoch) and 256 (the number of validators per chunk)
|
||||
validatorsCount = 1_638_400
|
||||
slotsPerEpoch = 32
|
||||
@@ -1526,7 +1526,7 @@ func Benchmark_checkSurroundVotes(b *testing.B) {
|
||||
// So for 1_638_400 validators with 32 slots per epoch, we would have 48_000 attestation wrappers per slot.
|
||||
// With 256 validators per chunk, we would have only 188 modified chunks.
|
||||
//
|
||||
// In this benchmark, we use the worst case scenario where attestating validators are evenly splitted across all validators chunks.
|
||||
// In this benchmark, we use the worst case scenario where attesting validators are evenly split across all validators chunks.
|
||||
// We also suppose that only one chunk per validator chunk index is modified.
|
||||
// For one given validator index, multiple chunk indexes could be modified.
|
||||
//
|
||||
|
||||
@@ -135,7 +135,7 @@ With 1_048_576 validators, we need 4096 * 2MB = 8GB
|
||||
Storing both MIN and MAX spans for 1_048_576 validators takes 16GB.
|
||||
|
||||
Each chunk is stored snappy-compressed in the database.
|
||||
If all validators attest ideally, a MIN SPAN chunk will contain only `2`s, and and MAX SPAN chunk will contain only `0`s.
|
||||
If all validators attest ideally, a MIN SPAN chunk will contain only `2`s, and MAX SPAN chunk will contain only `0`s.
|
||||
This will compress very well, and will let us store a lot of data in a small amount of space.
|
||||
*/
|
||||
|
||||
|
||||
@@ -79,7 +79,7 @@ func (s *Service) filterAttestations(
|
||||
continue
|
||||
}
|
||||
|
||||
// If an attestations's target epoch is in the future, we defer processing for later.
|
||||
// If an attestation's target epoch is in the future, we defer processing for later.
|
||||
if attWrapper.IndexedAttestation.Data.Target.Epoch > currentEpoch {
|
||||
validInFuture = append(validInFuture, attWrapper)
|
||||
continue
|
||||
|
||||
@@ -33,7 +33,6 @@ go_library(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -83,10 +82,10 @@ go_test(
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//beacon-chain/sync/verify:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
@@ -18,7 +19,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/verify"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
blocks2 "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
p2ppb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -76,6 +77,7 @@ type blocksFetcherConfig struct {
|
||||
db db.ReadOnlyDatabase
|
||||
peerFilterCapacityWeight float64
|
||||
mode syncMode
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
}
|
||||
|
||||
// blocksFetcher is a service to fetch chain data from peers.
|
||||
@@ -91,6 +93,7 @@ type blocksFetcher struct {
|
||||
ctxMap prysmsync.ContextByteVersions
|
||||
p2p p2p.P2P
|
||||
db db.ReadOnlyDatabase
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
blocksPerPeriod uint64
|
||||
rateLimiter *leakybucket.Collector
|
||||
peerLocks map[peer.ID]*peerLock
|
||||
@@ -149,6 +152,7 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
|
||||
ctxMap: cfg.ctxMap,
|
||||
p2p: cfg.p2p,
|
||||
db: cfg.db,
|
||||
bs: cfg.bs,
|
||||
blocksPerPeriod: uint64(blocksPerPeriod),
|
||||
rateLimiter: rateLimiter,
|
||||
peerLocks: make(map[peer.ID]*peerLock),
|
||||
@@ -236,7 +240,7 @@ func (f *blocksFetcher) loop() {
|
||||
|
||||
// Main loop.
|
||||
for {
|
||||
// Make sure there is are available peers before processing requests.
|
||||
// Make sure there are available peers before processing requests.
|
||||
if _, err := f.waitForMinimumPeers(f.ctx); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
@@ -372,22 +376,17 @@ func sortedBlockWithVerifiedBlobSlice(blocks []interfaces.ReadOnlySignedBeaconBl
|
||||
return rb, nil
|
||||
}
|
||||
|
||||
func blobRequest(bwb []blocks2.BlockWithROBlobs, blobWindowStart primitives.Slot) *p2ppb.BlobSidecarsByRangeRequest {
|
||||
if len(bwb) == 0 {
|
||||
return nil
|
||||
}
|
||||
lowest := lowestSlotNeedsBlob(blobWindowStart, bwb)
|
||||
if lowest == nil {
|
||||
return nil
|
||||
}
|
||||
highest := bwb[len(bwb)-1].Block.Block().Slot()
|
||||
return &p2ppb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: *lowest,
|
||||
Count: uint64(highest.SubSlot(*lowest)) + 1,
|
||||
}
|
||||
type commitmentCount struct {
|
||||
slot primitives.Slot
|
||||
root [32]byte
|
||||
count int
|
||||
}
|
||||
|
||||
func lowestSlotNeedsBlob(retentionStart primitives.Slot, bwb []blocks2.BlockWithROBlobs) *primitives.Slot {
|
||||
type commitmentCountList []commitmentCount
|
||||
|
||||
// countCommitments makes a list of all blocks that have commitments that need to be satisfied.
|
||||
// This gives us a representation to finish building the request that is lightweight and readable for testing.
|
||||
func countCommitments(bwb []blocks2.BlockWithROBlobs, retentionStart primitives.Slot) commitmentCountList {
|
||||
if len(bwb) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -397,8 +396,13 @@ func lowestSlotNeedsBlob(retentionStart primitives.Slot, bwb []blocks2.BlockWith
|
||||
if bwb[len(bwb)-1].Block.Block().Slot() < retentionStart {
|
||||
return nil
|
||||
}
|
||||
for _, b := range bwb {
|
||||
fc := make([]commitmentCount, 0, len(bwb))
|
||||
for i := range bwb {
|
||||
b := bwb[i]
|
||||
slot := b.Block.Block().Slot()
|
||||
if b.Block.Version() < version.Deneb {
|
||||
continue
|
||||
}
|
||||
if slot < retentionStart {
|
||||
continue
|
||||
}
|
||||
@@ -406,67 +410,116 @@ func lowestSlotNeedsBlob(retentionStart primitives.Slot, bwb []blocks2.BlockWith
|
||||
if err != nil || len(commits) == 0 {
|
||||
continue
|
||||
}
|
||||
return &slot
|
||||
fc = append(fc, commitmentCount{slot: slot, root: b.Block.Root(), count: len(commits)})
|
||||
}
|
||||
return fc
|
||||
}
|
||||
|
||||
// func slotRangeForCommitmentCounts(cc []commitmentCount, bs filesystem.BlobStorageSummarizer) *blobRange {
|
||||
func (cc commitmentCountList) blobRange(bs filesystem.BlobStorageSummarizer) *blobRange {
|
||||
if len(cc) == 0 {
|
||||
return nil
|
||||
}
|
||||
// If we don't have a blob summarizer, can't check local blobs, request blobs over complete range.
|
||||
if bs == nil {
|
||||
return &blobRange{low: cc[0].slot, high: cc[len(cc)-1].slot}
|
||||
}
|
||||
for i := range cc {
|
||||
hci := cc[i]
|
||||
// This list is always ordered by increasing slot, per req/resp validation rules.
|
||||
// Skip through slots until we find one with missing blobs.
|
||||
if bs.Summary(hci.root).AllAvailable(hci.count) {
|
||||
continue
|
||||
}
|
||||
// The slow of the first missing blob is the lower bound.
|
||||
// If we don't find an upper bound, we'll have a 1 slot request (same low/high).
|
||||
needed := &blobRange{low: hci.slot, high: hci.slot}
|
||||
// Iterate backward through the list to find the highest missing slot above the lower bound.
|
||||
// Return the complete range as soon as we find it; if lower bound is already the last element,
|
||||
// or if we never find an upper bound, we'll fall through to the bounds being equal after this loop.
|
||||
for z := len(cc) - 1; z > i; z-- {
|
||||
hcz := cc[z]
|
||||
if bs.Summary(hcz.root).AllAvailable(hcz.count) {
|
||||
continue
|
||||
}
|
||||
needed.high = hcz.slot
|
||||
return needed
|
||||
}
|
||||
return needed
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sortBlobs(blobs []blocks.ROBlob) []blocks.ROBlob {
|
||||
sort.Slice(blobs, func(i, j int) bool {
|
||||
if blobs[i].Slot() == blobs[j].Slot() {
|
||||
return blobs[i].Index < blobs[j].Index
|
||||
}
|
||||
return blobs[i].Slot() < blobs[j].Slot()
|
||||
})
|
||||
type blobRange struct {
|
||||
low primitives.Slot
|
||||
high primitives.Slot
|
||||
}
|
||||
|
||||
return blobs
|
||||
func (r *blobRange) Request() *p2ppb.BlobSidecarsByRangeRequest {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
return &p2ppb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: r.low,
|
||||
Count: uint64(r.high.SubSlot(r.low)) + 1,
|
||||
}
|
||||
}
|
||||
|
||||
var errBlobVerification = errors.New("peer unable to serve aligned BlobSidecarsByRange and BeaconBlockSidecarsByRange responses")
|
||||
var errMissingBlobsForBlockCommitments = errors.Wrap(errBlobVerification, "blobs unavailable for processing block with kzg commitments")
|
||||
|
||||
func verifyAndPopulateBlobs(bwb []blocks2.BlockWithROBlobs, blobs []blocks.ROBlob, blobWindowStart primitives.Slot) ([]blocks2.BlockWithROBlobs, error) {
|
||||
// Assumes bwb has already been sorted by sortedBlockWithVerifiedBlobSlice.
|
||||
blobs = sortBlobs(blobs)
|
||||
blobi := 0
|
||||
// Loop over all blocks, and each time a commitment is observed, advance the index into the blob slice.
|
||||
// The assumption is that the blob slice contains a value for every commitment in the blocks it is based on,
|
||||
// correctly ordered by slot and blob index.
|
||||
for i, bb := range bwb {
|
||||
block := bb.Block.Block()
|
||||
if block.Slot() < blobWindowStart {
|
||||
func verifyAndPopulateBlobs(bwb []blocks2.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) ([]blocks2.BlockWithROBlobs, error) {
|
||||
blobsByRoot := make(map[[32]byte][]blocks.ROBlob)
|
||||
for i := range blobs {
|
||||
if blobs[i].Slot() < req.StartSlot {
|
||||
continue
|
||||
}
|
||||
commits, err := block.Body().BlobKzgCommitments()
|
||||
br := blobs[i].BlockRoot()
|
||||
blobsByRoot[br] = append(blobsByRoot[br], blobs[i])
|
||||
}
|
||||
for i := range bwb {
|
||||
bwi, err := populateBlock(bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
|
||||
if err != nil {
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
log.
|
||||
WithField("blockSlot", block.Slot()).
|
||||
WithField("retentionStart", blobWindowStart).
|
||||
Warn("block with slot within blob retention period has version which does not support commitments")
|
||||
if errors.Is(err, errDidntPopulate) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
return bwb, err
|
||||
}
|
||||
bb.Blobs = make([]blocks.ROBlob, len(commits))
|
||||
for ci := range commits {
|
||||
// There are more expected commitments in this block, but we've run out of blobs from the response
|
||||
// (out-of-bound error guard).
|
||||
if blobi == len(blobs) {
|
||||
return nil, missingCommitError(bb.Block.Root(), bb.Block.Block().Slot(), commits[ci:])
|
||||
}
|
||||
bl := blobs[blobi]
|
||||
if err := verify.BlobAlignsWithBlock(bl, bb.Block); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bb.Blobs[ci] = bl
|
||||
blobi += 1
|
||||
}
|
||||
bwb[i] = bb
|
||||
bwb[i] = bwi
|
||||
}
|
||||
return bwb, nil
|
||||
}
|
||||
|
||||
var errDidntPopulate = errors.New("skipping population of block")
|
||||
|
||||
func populateBlock(bw blocks2.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) (blocks2.BlockWithROBlobs, error) {
|
||||
blk := bw.Block
|
||||
if blk.Version() < version.Deneb || blk.Block().Slot() < req.StartSlot {
|
||||
return bw, errDidntPopulate
|
||||
}
|
||||
commits, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return bw, errDidntPopulate
|
||||
}
|
||||
if len(commits) == 0 {
|
||||
return bw, errDidntPopulate
|
||||
}
|
||||
// Drop blobs on the floor if we already have them.
|
||||
if bss != nil && bss.Summary(blk.Root()).AllAvailable(len(commits)) {
|
||||
return bw, errDidntPopulate
|
||||
}
|
||||
if len(commits) != len(blobs) {
|
||||
return bw, missingCommitError(blk.Root(), blk.Block().Slot(), commits)
|
||||
}
|
||||
for ci := range commits {
|
||||
if err := verify.BlobAlignsWithBlock(blobs[ci], blk); err != nil {
|
||||
return bw, err
|
||||
}
|
||||
}
|
||||
bw.Blobs = blobs
|
||||
return bw, nil
|
||||
}
|
||||
|
||||
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
|
||||
missStr := make([]string, 0, len(missing))
|
||||
for k := range missing {
|
||||
@@ -488,7 +541,7 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.Bl
|
||||
return nil, err
|
||||
}
|
||||
// Construct request message based on observed interval of blocks in need of blobs.
|
||||
req := blobRequest(bwb, blobWindowStart)
|
||||
req := countCommitments(bwb, blobWindowStart).blobRange(f.bs).Request()
|
||||
if req == nil {
|
||||
return bwb, nil
|
||||
}
|
||||
@@ -508,7 +561,7 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.Bl
|
||||
continue
|
||||
}
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
||||
robs, err := verifyAndPopulateBlobs(bwb, blobs, blobWindowStart)
|
||||
robs, err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs)
|
||||
if err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Invalid BeaconBlobsByRange response")
|
||||
continue
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -14,13 +13,14 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
dbtest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
p2pm "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pt "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
beaconsync "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/verify"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -960,28 +960,7 @@ func TestTimeToWait(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortBlobs(t *testing.T) {
|
||||
_, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
||||
shuffled := make([]blocks.ROBlob, len(blobs))
|
||||
for i := range blobs {
|
||||
shuffled[i] = blobs[i]
|
||||
}
|
||||
rand.Shuffle(len(shuffled), func(i, j int) {
|
||||
shuffled[i], shuffled[j] = shuffled[j], shuffled[i]
|
||||
})
|
||||
sorted := sortBlobs(shuffled)
|
||||
require.Equal(t, len(sorted), len(shuffled))
|
||||
for i := range blobs {
|
||||
expect := blobs[i]
|
||||
actual := sorted[i]
|
||||
require.Equal(t, expect.Slot(), actual.Slot())
|
||||
require.Equal(t, expect.Index, actual.Index)
|
||||
require.Equal(t, bytesutil.ToBytes48(expect.KzgCommitment), bytesutil.ToBytes48(actual.KzgCommitment))
|
||||
require.Equal(t, expect.BlockRoot(), actual.BlockRoot())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLowestSlotNeedsBlob(t *testing.T) {
|
||||
func TestBlobRangeForBlocks(t *testing.T) {
|
||||
blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
for i := range blks {
|
||||
@@ -990,12 +969,12 @@ func TestLowestSlotNeedsBlob(t *testing.T) {
|
||||
retentionStart := primitives.Slot(5)
|
||||
bwb, err := sortedBlockWithVerifiedBlobSlice(sbbs)
|
||||
require.NoError(t, err)
|
||||
lowest := lowestSlotNeedsBlob(retentionStart, bwb)
|
||||
require.Equal(t, retentionStart, *lowest)
|
||||
bounds := countCommitments(bwb, retentionStart).blobRange(nil)
|
||||
require.Equal(t, retentionStart, bounds.low)
|
||||
higher := primitives.Slot(len(blks) + 1)
|
||||
lowest = lowestSlotNeedsBlob(higher, bwb)
|
||||
var nilSlot *primitives.Slot
|
||||
require.Equal(t, nilSlot, lowest)
|
||||
bounds = countCommitments(bwb, higher).blobRange(nil)
|
||||
var nilBounds *blobRange
|
||||
require.Equal(t, nilBounds, bounds)
|
||||
|
||||
blks, _ = util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
||||
sbbs = make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
@@ -1008,14 +987,14 @@ func TestLowestSlotNeedsBlob(t *testing.T) {
|
||||
next := bwb[6].Block.Block().Slot()
|
||||
skip := bwb[5].Block.Block()
|
||||
bwb[5].Block, _ = util.GenerateTestDenebBlockWithSidecar(t, skip.ParentRoot(), skip.Slot(), 0)
|
||||
lowest = lowestSlotNeedsBlob(retentionStart, bwb)
|
||||
require.Equal(t, next, *lowest)
|
||||
bounds = countCommitments(bwb, retentionStart).blobRange(nil)
|
||||
require.Equal(t, next, bounds.low)
|
||||
}
|
||||
|
||||
func TestBlobRequest(t *testing.T) {
|
||||
var nilReq *ethpb.BlobSidecarsByRangeRequest
|
||||
// no blocks
|
||||
req := blobRequest([]blocks.BlockWithROBlobs{}, 0)
|
||||
req := countCommitments([]blocks.BlockWithROBlobs{}, 0).blobRange(nil).Request()
|
||||
require.Equal(t, nilReq, req)
|
||||
blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
@@ -1027,26 +1006,180 @@ func TestBlobRequest(t *testing.T) {
|
||||
maxBlkSlot := primitives.Slot(len(blks) - 1)
|
||||
|
||||
tooHigh := primitives.Slot(len(blks) + 1)
|
||||
req = blobRequest(bwb, tooHigh)
|
||||
req = countCommitments(bwb, tooHigh).blobRange(nil).Request()
|
||||
require.Equal(t, nilReq, req)
|
||||
|
||||
req = blobRequest(bwb, maxBlkSlot)
|
||||
req = countCommitments(bwb, maxBlkSlot).blobRange(nil).Request()
|
||||
require.Equal(t, uint64(1), req.Count)
|
||||
require.Equal(t, maxBlkSlot, req.StartSlot)
|
||||
|
||||
halfway := primitives.Slot(5)
|
||||
req = blobRequest(bwb, halfway)
|
||||
req = countCommitments(bwb, halfway).blobRange(nil).Request()
|
||||
require.Equal(t, halfway, req.StartSlot)
|
||||
// adding 1 to include the halfway slot itself
|
||||
require.Equal(t, uint64(1+maxBlkSlot-halfway), req.Count)
|
||||
|
||||
before := bwb[0].Block.Block().Slot()
|
||||
allAfter := bwb[1:]
|
||||
req = blobRequest(allAfter, before)
|
||||
req = countCommitments(allAfter, before).blobRange(nil).Request()
|
||||
require.Equal(t, allAfter[0].Block.Block().Slot(), req.StartSlot)
|
||||
require.Equal(t, len(allAfter), int(req.Count))
|
||||
}
|
||||
|
||||
func TestCountCommitments(t *testing.T) {
|
||||
// no blocks
|
||||
// blocks before retention start filtered
|
||||
// blocks without commitments filtered
|
||||
// pre-deneb filtered
|
||||
// variety of commitment counts are accurate, from 1 to max
|
||||
type testcase struct {
|
||||
name string
|
||||
bwb func(t *testing.T, c testcase) []blocks.BlockWithROBlobs
|
||||
numBlocks int
|
||||
retStart primitives.Slot
|
||||
resCount int
|
||||
}
|
||||
cases := []testcase{
|
||||
{
|
||||
name: "nil blocks is safe",
|
||||
bwb: func(t *testing.T, c testcase) []blocks.BlockWithROBlobs {
|
||||
return nil
|
||||
},
|
||||
retStart: 0,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
bwb := c.bwb(t, c)
|
||||
cc := countCommitments(bwb, c.retStart)
|
||||
require.Equal(t, c.resCount, len(cc))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitmentCountList(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
cc commitmentCountList
|
||||
bss func(*testing.T) filesystem.BlobStorageSummarizer
|
||||
expected *blobRange
|
||||
request *ethpb.BlobSidecarsByRangeRequest
|
||||
}{
|
||||
{
|
||||
name: "nil commitmentCount is safe",
|
||||
cc: nil,
|
||||
expected: nil,
|
||||
request: nil,
|
||||
},
|
||||
{
|
||||
name: "nil bss, single slot",
|
||||
cc: []commitmentCount{
|
||||
{slot: 11235, count: 1},
|
||||
},
|
||||
expected: &blobRange{low: 11235, high: 11235},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 11235, Count: 1},
|
||||
},
|
||||
{
|
||||
name: "nil bss, sparse slots",
|
||||
cc: []commitmentCount{
|
||||
{slot: 11235, count: 1},
|
||||
{slot: 11240, count: fieldparams.MaxBlobsPerBlock},
|
||||
{slot: 11250, count: 3},
|
||||
},
|
||||
expected: &blobRange{low: 11235, high: 11250},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 11235, Count: 16},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable in middle, some avail low, none high",
|
||||
bss: func(t *testing.T) filesystem.BlobStorageSummarizer {
|
||||
onDisk := map[[32]byte][]int{
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("1")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 3, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: fieldparams.MaxBlobsPerBlock, root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: 15, count: 3},
|
||||
},
|
||||
expected: &blobRange{low: 0, high: 15},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 0, Count: 16},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable at high and low",
|
||||
bss: func(t *testing.T) filesystem.BlobStorageSummarizer {
|
||||
onDisk := map[[32]byte][]int{
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: 3},
|
||||
{slot: 15, count: fieldparams.MaxBlobsPerBlock, root: bytesutil.ToBytes32([]byte("2"))},
|
||||
},
|
||||
expected: &blobRange{low: 5, high: 5},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 1},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable at high and low, adjacent range in middle",
|
||||
bss: func(t *testing.T) filesystem.BlobStorageSummarizer {
|
||||
onDisk := map[[32]byte][]int{
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: 3},
|
||||
{slot: 6, count: 3},
|
||||
{slot: 15, count: fieldparams.MaxBlobsPerBlock, root: bytesutil.ToBytes32([]byte("2"))},
|
||||
},
|
||||
expected: &blobRange{low: 5, high: 6},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 2},
|
||||
},
|
||||
{
|
||||
name: "AllAvailable at high and low, range in middle",
|
||||
bss: func(t *testing.T) filesystem.BlobStorageSummarizer {
|
||||
onDisk := map[[32]byte][]int{
|
||||
bytesutil.ToBytes32([]byte("0")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("1")): {0, 1},
|
||||
bytesutil.ToBytes32([]byte("2")): {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
return filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
},
|
||||
cc: []commitmentCount{
|
||||
{slot: 0, count: 2, root: bytesutil.ToBytes32([]byte("0"))},
|
||||
{slot: 5, count: 3, root: bytesutil.ToBytes32([]byte("1"))},
|
||||
{slot: 10, count: 3},
|
||||
{slot: 15, count: fieldparams.MaxBlobsPerBlock, root: bytesutil.ToBytes32([]byte("2"))},
|
||||
},
|
||||
expected: &blobRange{low: 5, high: 10},
|
||||
request: ðpb.BlobSidecarsByRangeRequest{StartSlot: 5, Count: 6},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
var bss filesystem.BlobStorageSummarizer
|
||||
if c.bss != nil {
|
||||
bss = c.bss(t)
|
||||
}
|
||||
br := c.cc.blobRange(bss)
|
||||
require.DeepEqual(t, c.expected, br)
|
||||
if c.request == nil {
|
||||
require.IsNil(t, br.Request())
|
||||
} else {
|
||||
req := br.Request()
|
||||
require.DeepEqual(t, req.StartSlot, c.request.StartSlot)
|
||||
require.DeepEqual(t, req.Count, c.request.Count)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROBlobs, []blocks.ROBlob) {
|
||||
blks, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, nblocks)
|
||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
@@ -1058,91 +1191,75 @@ func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROB
|
||||
return bwb, blobs
|
||||
}
|
||||
|
||||
func testReqFromResp(bwb []blocks.BlockWithROBlobs) *ethpb.BlobSidecarsByRangeRequest {
|
||||
return ðpb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: bwb[0].Block.Block().Slot(),
|
||||
Count: uint64(bwb[len(bwb)-1].Block.Block().Slot()-bwb[0].Block.Block().Slot()) + 1,
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
bwb, blobs := testSequenceBlockWithBlob(t, 10)
|
||||
lastBlobIdx := len(blobs) - 1
|
||||
// Blocks are all before the retention window, blobs argument is ignored.
|
||||
windowAfter := bwb[len(bwb)-1].Block.Block().Slot() + 1
|
||||
_, err := verifyAndPopulateBlobs(bwb, nil, windowAfter)
|
||||
require.NoError(t, err)
|
||||
t.Run("happy path", func(t *testing.T) {
|
||||
bwb, blobs := testSequenceBlockWithBlob(t, 10)
|
||||
|
||||
firstBlockSlot := bwb[0].Block.Block().Slot()
|
||||
// slice off blobs for the last block so we hit the out of bounds / blob exhaustion check.
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs[0:len(blobs)-6], firstBlockSlot)
|
||||
require.ErrorIs(t, err, errMissingBlobsForBlockCommitments)
|
||||
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
// Misalign the slots of the blobs for the first block to simulate them being missing from the response.
|
||||
offByOne := blobs[0].Slot()
|
||||
for i := range blobs {
|
||||
if blobs[i].Slot() == offByOne {
|
||||
blobs[i].SignedBlockHeader.Header.Slot = offByOne + 1
|
||||
expectedCommits := make(map[[48]byte]bool)
|
||||
for _, bl := range blobs {
|
||||
expectedCommits[bytesutil.ToBytes48(bl.KzgCommitment)] = true
|
||||
}
|
||||
}
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, verify.ErrBlobBlockMisaligned)
|
||||
require.Equal(t, len(blobs), len(expectedCommits))
|
||||
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
blobs[lastBlobIdx], err = blocks.NewROBlobWithRoot(blobs[lastBlobIdx].BlobSidecar, blobs[0].BlockRoot())
|
||||
require.NoError(t, err)
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, verify.ErrBlobBlockMisaligned)
|
||||
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
blobs[lastBlobIdx].Index = 100
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, verify.ErrIncorrectBlobIndex)
|
||||
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
blobs[lastBlobIdx].SignedBlockHeader.Header.ProposerIndex = 100
|
||||
blobs[lastBlobIdx], err = blocks.NewROBlob(blobs[lastBlobIdx].BlobSidecar)
|
||||
require.NoError(t, err)
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, verify.ErrBlobBlockMisaligned)
|
||||
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
blobs[lastBlobIdx].SignedBlockHeader.Header.ParentRoot = blobs[0].SignedBlockHeader.Header.ParentRoot
|
||||
blobs[lastBlobIdx], err = blocks.NewROBlob(blobs[lastBlobIdx].BlobSidecar)
|
||||
require.NoError(t, err)
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, verify.ErrBlobBlockMisaligned)
|
||||
|
||||
var emptyKzg [48]byte
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
blobs[lastBlobIdx].KzgCommitment = emptyKzg[:]
|
||||
blobs[lastBlobIdx], err = blocks.NewROBlob(blobs[lastBlobIdx].BlobSidecar)
|
||||
require.NoError(t, err)
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, verify.ErrMismatchedBlobCommitments)
|
||||
|
||||
// happy path
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
|
||||
expectedCommits := make(map[[48]byte]bool)
|
||||
for _, bl := range blobs {
|
||||
expectedCommits[bytesutil.ToBytes48(bl.KzgCommitment)] = true
|
||||
}
|
||||
// The assertions using this map expect all commitments to be unique, so make sure that stays true.
|
||||
require.Equal(t, len(blobs), len(expectedCommits))
|
||||
|
||||
bwb, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.NoError(t, err)
|
||||
for _, bw := range bwb {
|
||||
commits, err := bw.Block.Block().Body().BlobKzgCommitments()
|
||||
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(commits), len(bw.Blobs))
|
||||
for i := range commits {
|
||||
bc := bytesutil.ToBytes48(commits[i])
|
||||
require.Equal(t, bc, bytesutil.ToBytes48(bw.Blobs[i].KzgCommitment))
|
||||
// Since we delete entries we've seen, duplicates will cause an error here.
|
||||
_, ok := expectedCommits[bc]
|
||||
// Make sure this was an expected delete, then delete it from the map so we can make sure we saw all of them.
|
||||
require.Equal(t, true, ok)
|
||||
delete(expectedCommits, bc)
|
||||
for _, bw := range bwb {
|
||||
commits, err := bw.Block.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(commits), len(bw.Blobs))
|
||||
for i := range commits {
|
||||
bc := bytesutil.ToBytes48(commits[i])
|
||||
require.Equal(t, bc, bytesutil.ToBytes48(bw.Blobs[i].KzgCommitment))
|
||||
// Since we delete entries we've seen, duplicates will cause an error here.
|
||||
_, ok := expectedCommits[bc]
|
||||
// Make sure this was an expected delete, then delete it from the map so we can make sure we saw all of them.
|
||||
require.Equal(t, true, ok)
|
||||
delete(expectedCommits, bc)
|
||||
}
|
||||
}
|
||||
}
|
||||
// We delete each entry we've seen, so if we see all expected commits, the map should be empty at the end.
|
||||
require.Equal(t, 0, len(expectedCommits))
|
||||
// We delete each entry we've seen, so if we see all expected commits, the map should be empty at the end.
|
||||
require.Equal(t, 0, len(expectedCommits))
|
||||
})
|
||||
t.Run("missing blobs", func(t *testing.T) {
|
||||
bwb, blobs := testSequenceBlockWithBlob(t, 10)
|
||||
_, err := verifyAndPopulateBlobs(bwb, blobs[1:], testReqFromResp(bwb), nil)
|
||||
require.ErrorIs(t, err, errMissingBlobsForBlockCommitments)
|
||||
})
|
||||
t.Run("no blobs for last block", func(t *testing.T) {
|
||||
bwb, blobs := testSequenceBlockWithBlob(t, 10)
|
||||
lastIdx := len(bwb) - 1
|
||||
lastBlk := bwb[lastIdx].Block
|
||||
cmts, err := lastBlk.Block().Body().BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
blobs = blobs[0 : len(blobs)-len(cmts)]
|
||||
lastBlk, _ = util.GenerateTestDenebBlockWithSidecar(t, lastBlk.Block().ParentRoot(), lastBlk.Block().Slot(), 0)
|
||||
bwb[lastIdx].Block = lastBlk
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run("blobs not copied if all locally available", func(t *testing.T) {
|
||||
bwb, blobs := testSequenceBlockWithBlob(t, 10)
|
||||
// r1 only has some blobs locally available, so we'll still copy them all.
|
||||
// r7 has all blobs locally available, so we shouldn't copy them.
|
||||
i1, i7 := 1, 7
|
||||
r1, r7 := bwb[i1].Block.Root(), bwb[i7].Block.Root()
|
||||
onDisk := map[[32]byte][]int{
|
||||
r1: {0, 1},
|
||||
r7: {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(bwb[i1].Blobs))
|
||||
require.Equal(t, 0, len(bwb[i7].Blobs))
|
||||
})
|
||||
}
|
||||
|
||||
func TestBatchLimit(t *testing.T) {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
beaconsync "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
|
||||
@@ -69,6 +70,7 @@ type blocksQueueConfig struct {
|
||||
p2p p2p.P2P
|
||||
db db.ReadOnlyDatabase
|
||||
mode syncMode
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
}
|
||||
|
||||
// blocksQueue is a priority queue that serves as a intermediary between block fetchers (producers)
|
||||
@@ -101,12 +103,16 @@ func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
|
||||
|
||||
blocksFetcher := cfg.blocksFetcher
|
||||
if blocksFetcher == nil {
|
||||
if cfg.bs == nil {
|
||||
log.Warn("rpc fetcher starting without blob availability cache, duplicate blobs may be requested.")
|
||||
}
|
||||
blocksFetcher = newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
ctxMap: cfg.ctxMap,
|
||||
chain: cfg.chain,
|
||||
p2p: cfg.p2p,
|
||||
db: cfg.db,
|
||||
clock: cfg.clock,
|
||||
bs: cfg.bs,
|
||||
})
|
||||
}
|
||||
highestExpectedSlot := cfg.highestExpectedSlot
|
||||
@@ -139,7 +145,7 @@ func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
|
||||
queue.smm.addEventHandler(eventDataReceived, stateScheduled, queue.onDataReceivedEvent(ctx))
|
||||
queue.smm.addEventHandler(eventTick, stateDataParsed, queue.onReadyToSendEvent(ctx))
|
||||
queue.smm.addEventHandler(eventTick, stateSkipped, queue.onProcessSkippedEvent(ctx))
|
||||
queue.smm.addEventHandler(eventTick, stateSent, queue.onCheckStaleEvent(ctx))
|
||||
queue.smm.addEventHandler(eventTick, stateSent, onCheckStaleEvent(ctx))
|
||||
|
||||
return queue
|
||||
}
|
||||
@@ -451,7 +457,7 @@ func (q *blocksQueue) onProcessSkippedEvent(ctx context.Context) eventHandlerFn
|
||||
|
||||
// onCheckStaleEvent is an event that allows to mark stale epochs,
|
||||
// so that they can be re-processed.
|
||||
func (_ *blocksQueue) onCheckStaleEvent(ctx context.Context) eventHandlerFn {
|
||||
func onCheckStaleEvent(ctx context.Context) eventHandlerFn {
|
||||
return func(m *stateMachine, in interface{}) (stateID, error) {
|
||||
if ctx.Err() != nil {
|
||||
return m.state, ctx.Err()
|
||||
|
||||
@@ -971,24 +971,12 @@ func TestBlocksQueue_onProcessSkippedEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlocksQueue_onCheckStaleEvent(t *testing.T) {
|
||||
blockBatchLimit := flags.Get().BlockBatchLimit
|
||||
mc, p2p, _ := initializeTestServices(t, []primitives.Slot{}, []*peerData{})
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
})
|
||||
|
||||
t.Run("expired context", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
blocksFetcher: fetcher,
|
||||
chain: mc,
|
||||
highestExpectedSlot: primitives.Slot(blockBatchLimit),
|
||||
})
|
||||
handlerFn := queue.onCheckStaleEvent(ctx)
|
||||
handlerFn := onCheckStaleEvent(ctx)
|
||||
cancel()
|
||||
updatedState, err := handlerFn(&stateMachine{
|
||||
state: stateSkipped,
|
||||
@@ -998,16 +986,10 @@ func TestBlocksQueue_onCheckStaleEvent(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("invalid input state", func(t *testing.T) {
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
blocksFetcher: fetcher,
|
||||
chain: mc,
|
||||
highestExpectedSlot: primitives.Slot(blockBatchLimit),
|
||||
})
|
||||
|
||||
invalidStates := []stateID{stateNew, stateScheduled, stateDataParsed, stateSkipped}
|
||||
for _, state := range invalidStates {
|
||||
t.Run(state.String(), func(t *testing.T) {
|
||||
handlerFn := queue.onCheckStaleEvent(ctx)
|
||||
handlerFn := onCheckStaleEvent(ctx)
|
||||
updatedState, err := handlerFn(&stateMachine{
|
||||
state: state,
|
||||
}, nil)
|
||||
@@ -1018,12 +1000,7 @@ func TestBlocksQueue_onCheckStaleEvent(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("process non stale machine", func(t *testing.T) {
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
blocksFetcher: fetcher,
|
||||
chain: mc,
|
||||
highestExpectedSlot: primitives.Slot(blockBatchLimit),
|
||||
})
|
||||
handlerFn := queue.onCheckStaleEvent(ctx)
|
||||
handlerFn := onCheckStaleEvent(ctx)
|
||||
updatedState, err := handlerFn(&stateMachine{
|
||||
state: stateSent,
|
||||
updated: prysmTime.Now().Add(-staleEpochTimeout / 2),
|
||||
@@ -1034,12 +1011,7 @@ func TestBlocksQueue_onCheckStaleEvent(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("process stale machine", func(t *testing.T) {
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
blocksFetcher: fetcher,
|
||||
chain: mc,
|
||||
highestExpectedSlot: primitives.Slot(blockBatchLimit),
|
||||
})
|
||||
handlerFn := queue.onCheckStaleEvent(ctx)
|
||||
handlerFn := onCheckStaleEvent(ctx)
|
||||
updatedState, err := handlerFn(&stateMachine{
|
||||
state: stateSent,
|
||||
updated: prysmTime.Now().Add(-staleEpochTimeout),
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -62,7 +63,39 @@ func (s *Service) roundRobinSync(genesis time.Time) error {
|
||||
return s.syncToNonFinalizedEpoch(ctx, genesis)
|
||||
}
|
||||
|
||||
// syncToFinalizedEpoch sync from head to best known finalized epoch.
|
||||
func (s *Service) startBlocksQueue(ctx context.Context, highestSlot primitives.Slot, mode syncMode) (*blocksQueue, error) {
|
||||
vr := s.clock.GenesisValidatorsRoot()
|
||||
ctxMap, err := sync.ContextByteVersionsForValRoot(vr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to initialize context version map using genesis validator root = %#x", vr)
|
||||
}
|
||||
|
||||
summarizer, err := s.cfg.BlobStorage.WaitForSummarizer(ctx)
|
||||
if err != nil {
|
||||
// The summarizer is an optional optimization, we can continue without, only stop if there is a different error.
|
||||
if !errors.Is(err, filesystem.ErrBlobStorageSummarizerUnavailable) {
|
||||
return nil, err
|
||||
}
|
||||
summarizer = nil // This should already be nil, but we'll set it just to be safe.
|
||||
}
|
||||
cfg := &blocksQueueConfig{
|
||||
p2p: s.cfg.P2P,
|
||||
db: s.cfg.DB,
|
||||
chain: s.cfg.Chain,
|
||||
clock: s.clock,
|
||||
ctxMap: ctxMap,
|
||||
highestExpectedSlot: highestSlot,
|
||||
mode: mode,
|
||||
bs: summarizer,
|
||||
}
|
||||
queue := newBlocksQueue(ctx, cfg)
|
||||
if err := queue.start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return queue, nil
|
||||
}
|
||||
|
||||
// syncToFinalizedEpoch sync from head to the best known finalized epoch.
|
||||
func (s *Service) syncToFinalizedEpoch(ctx context.Context, genesis time.Time) error {
|
||||
highestFinalizedSlot, err := slots.EpochStart(s.highestFinalizedEpoch())
|
||||
if err != nil {
|
||||
@@ -74,28 +107,12 @@ func (s *Service) syncToFinalizedEpoch(ctx context.Context, genesis time.Time) e
|
||||
return nil
|
||||
}
|
||||
|
||||
vr := s.clock.GenesisValidatorsRoot()
|
||||
ctxMap, err := sync.ContextByteVersionsForValRoot(vr)
|
||||
queue, err := s.startBlocksQueue(ctx, highestFinalizedSlot, modeStopOnFinalizedEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to initialize context version map using genesis validator root = %#x", vr)
|
||||
}
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
p2p: s.cfg.P2P,
|
||||
db: s.cfg.DB,
|
||||
chain: s.cfg.Chain,
|
||||
clock: s.clock,
|
||||
ctxMap: ctxMap,
|
||||
highestExpectedSlot: highestFinalizedSlot,
|
||||
mode: modeStopOnFinalizedEpoch,
|
||||
})
|
||||
if err := queue.start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for data := range queue.fetchedData {
|
||||
// If blobs are available. Verify blobs and blocks are consistence.
|
||||
// We can't import a block if there's no associated blob within DA bound.
|
||||
// The blob has to pass aggregated proof check.
|
||||
s.processFetchedData(ctx, genesis, s.cfg.Chain.HeadSlot(), data)
|
||||
}
|
||||
|
||||
@@ -113,21 +130,8 @@ func (s *Service) syncToFinalizedEpoch(ctx context.Context, genesis time.Time) e
|
||||
// syncToNonFinalizedEpoch sync from head to best known non-finalized epoch supported by majority
|
||||
// of peers (no less than MinimumSyncPeers*2 peers).
|
||||
func (s *Service) syncToNonFinalizedEpoch(ctx context.Context, genesis time.Time) error {
|
||||
vr := s.clock.GenesisValidatorsRoot()
|
||||
ctxMap, err := sync.ContextByteVersionsForValRoot(vr)
|
||||
queue, err := s.startBlocksQueue(ctx, slots.Since(genesis), modeNonConstrained)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to initialize context version map using genesis validator root = %#x", vr)
|
||||
}
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
p2p: s.cfg.P2P,
|
||||
db: s.cfg.DB,
|
||||
chain: s.cfg.Chain,
|
||||
clock: s.clock,
|
||||
ctxMap: ctxMap,
|
||||
highestExpectedSlot: slots.Since(genesis),
|
||||
mode: modeNonConstrained,
|
||||
})
|
||||
if err := queue.start(); err != nil {
|
||||
return err
|
||||
}
|
||||
for data := range queue.fetchedData {
|
||||
|
||||
@@ -646,7 +646,7 @@ func TestService_BatchRootRequest(t *testing.T) {
|
||||
b4Root, err := b4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Send in duplicated roots to also test deduplicaton.
|
||||
// Send in duplicated roots to also test deduplication.
|
||||
sentRoots := p2ptypes.BeaconBlockByRootsReq{b2Root, b2Root, b3Root, b3Root, b4Root, b5Root}
|
||||
expectedRoots := p2ptypes.BeaconBlockByRootsReq{b2Root, b3Root, b4Root, b5Root}
|
||||
|
||||
|
||||
@@ -142,7 +142,13 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
// it successfully writes a response. We don't blindly call
|
||||
// Close here because we may have only written a partial
|
||||
// response.
|
||||
// About the special case for quic-v1, please see:
|
||||
// https://github.com/quic-go/quic-go/issues/3291
|
||||
defer func() {
|
||||
if strings.Contains(stream.Conn().RemoteMultiaddr().String(), "quic-v1") {
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
_err := stream.Reset()
|
||||
_ = _err
|
||||
}()
|
||||
|
||||
@@ -219,7 +219,7 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
require.NotEqual(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
require.NotEqual(t, 0, len(r.cfg.p2p.Host().Mux().Protocols()))
|
||||
|
||||
// Both pubsub and rpc topcis should be unsubscribed.
|
||||
// Both pubsub and rpc topics should be unsubscribed.
|
||||
require.NoError(t, r.Stop())
|
||||
|
||||
// Sleep to allow pubsub topics to be deregistered.
|
||||
|
||||
@@ -126,7 +126,7 @@ func (bv *ROBlobVerifier) VerifiedROBlob() (blocks.VerifiedROBlob, error) {
|
||||
// For example, when batch syncing, forkchoice is only updated at the end of the batch. So the checks that use
|
||||
// forkchoice, like descends from finalized or parent seen, would necessarily fail. Allowing the caller to
|
||||
// assert the requirement has been satisfied ensures we have an easy way to audit which piece of code is satisfying
|
||||
// a requireent outside of this package.
|
||||
// a requirement outside of this package.
|
||||
func (bv *ROBlobVerifier) SatisfyRequirement(req Requirement) {
|
||||
bv.recordResult(req, nil)
|
||||
}
|
||||
|
||||
@@ -90,6 +90,7 @@ var appFlags = []cli.Flag{
|
||||
cmd.StaticPeers,
|
||||
cmd.RelayNode,
|
||||
cmd.P2PUDPPort,
|
||||
cmd.P2PQUICPort,
|
||||
cmd.P2PTCPPort,
|
||||
cmd.P2PIP,
|
||||
cmd.P2PHost,
|
||||
|
||||
@@ -9,7 +9,7 @@ var (
|
||||
backfillWorkerCountName = "backfill-worker-count"
|
||||
|
||||
// EnableExperimentalBackfill enables backfill for checkpoint synced nodes.
|
||||
// This flag will be removed onced backfill is enabled by default.
|
||||
// This flag will be removed once backfill is enabled by default.
|
||||
EnableExperimentalBackfill = &cli.BoolFlag{
|
||||
Name: "enable-experimental-backfill",
|
||||
Usage: "Backfill is still experimental at this time. " +
|
||||
|
||||
@@ -55,6 +55,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.BootstrapNode,
|
||||
cmd.RelayNode,
|
||||
cmd.P2PUDPPort,
|
||||
cmd.P2PQUICPort,
|
||||
cmd.P2PTCPPort,
|
||||
cmd.DataDirFlag,
|
||||
cmd.VerbosityFlag,
|
||||
|
||||
21
cmd/flags.go
21
cmd/flags.go
@@ -4,6 +4,7 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -112,13 +113,19 @@ var (
|
||||
// P2PUDPPort defines the port to be used by discv5.
|
||||
P2PUDPPort = &cli.IntFlag{
|
||||
Name: "p2p-udp-port",
|
||||
Usage: "The port used by discv5.",
|
||||
Usage: "The UDP port used by the discovery service discv5.",
|
||||
Value: 12000,
|
||||
}
|
||||
// P2PTCPPort defines the port to be used by libp2p.
|
||||
// P2PQUICPort defines the QUIC port to be used by libp2p.
|
||||
P2PQUICPort = &cli.IntFlag{
|
||||
Name: "p2p-quic-port",
|
||||
Usage: "The QUIC port used by libp2p.",
|
||||
Value: 13000,
|
||||
}
|
||||
// P2PTCPPort defines the TCP port to be used by libp2p.
|
||||
P2PTCPPort = &cli.IntFlag{
|
||||
Name: "p2p-tcp-port",
|
||||
Usage: "The port used by libp2p.",
|
||||
Usage: "The TCP port used by libp2p.",
|
||||
Value: 13000,
|
||||
}
|
||||
// P2PIP defines the local IP to be used by libp2p.
|
||||
@@ -323,10 +330,10 @@ func ValidateNoArgs(ctx *cli.Context) error {
|
||||
|
||||
// verifies that the provided command is in the command list.
|
||||
func checkCommandList(commands []*cli.Command, name string) *cli.Command {
|
||||
for _, c := range commands {
|
||||
if c.Name == name {
|
||||
return c
|
||||
}
|
||||
if i := slices.IndexFunc(commands, func(c *cli.Command) bool {
|
||||
return c.Name == name
|
||||
}); i >= 0 {
|
||||
return commands[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -14,7 +14,8 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
||||
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
@@ -43,7 +44,7 @@ type client struct {
|
||||
nodeClient pb.NodeClient
|
||||
}
|
||||
|
||||
func newClient(beaconEndpoints []string, clientPort uint) (*client, error) {
|
||||
func newClient(beaconEndpoints []string, tcpPort, quicPort uint) (*client, error) {
|
||||
ipAdd := ipAddr()
|
||||
priv, err := privKey()
|
||||
if err != nil {
|
||||
@@ -53,15 +54,16 @@ func newClient(beaconEndpoints []string, clientPort uint) (*client, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set up p2p metadata")
|
||||
}
|
||||
listen, err := p2p.MultiAddressBuilder(ipAdd.String(), clientPort)
|
||||
multiaddrs, err := p2p.MultiAddressBuilder(ipAdd, tcpPort, quicPort)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set up listening multiaddr")
|
||||
}
|
||||
options := []libp2p.Option{
|
||||
privKeyOption(priv),
|
||||
libp2p.ListenAddrs(listen),
|
||||
libp2p.ListenAddrs(multiaddrs...),
|
||||
libp2p.UserAgent(version.BuildData()),
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.Transport(libp2pquic.NewTransport),
|
||||
libp2p.Transport(libp2ptcp.NewTCPTransport),
|
||||
}
|
||||
options = append(options, libp2p.Security(noise.ID, noise.New))
|
||||
options = append(options, libp2p.Ping(false))
|
||||
|
||||
@@ -22,11 +22,12 @@ import (
|
||||
)
|
||||
|
||||
var requestBlobsFlags = struct {
|
||||
Peers string
|
||||
ClientPort uint
|
||||
APIEndpoints string
|
||||
StartSlot uint64
|
||||
Count uint64
|
||||
Peers string
|
||||
ClientPortTCP uint
|
||||
ClientPortQUIC uint
|
||||
APIEndpoints string
|
||||
StartSlot uint64
|
||||
Count uint64
|
||||
}{}
|
||||
|
||||
var requestBlobsCmd = &cli.Command{
|
||||
@@ -47,9 +48,16 @@ var requestBlobsCmd = &cli.Command{
|
||||
Value: "",
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "client-port",
|
||||
Usage: "port to use for the client as a libp2p host",
|
||||
Destination: &requestBlobsFlags.ClientPort,
|
||||
Name: "client-port-tcp",
|
||||
Aliases: []string{"client-port"},
|
||||
Usage: "TCP port to use for the client as a libp2p host",
|
||||
Destination: &requestBlobsFlags.ClientPortTCP,
|
||||
Value: 13001,
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "client-port-quic",
|
||||
Usage: "QUIC port to use for the client as a libp2p host",
|
||||
Destination: &requestBlobsFlags.ClientPortQUIC,
|
||||
Value: 13001,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@@ -60,13 +68,13 @@ var requestBlobsCmd = &cli.Command{
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "start-slot",
|
||||
Usage: "start slot for blocks by range request. If unset, will use start_slot(current_epoch-1)",
|
||||
Usage: "start slot for blobs by range request. If unset, will use start_slot(current_epoch-1)",
|
||||
Destination: &requestBlobsFlags.StartSlot,
|
||||
Value: 0,
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "count",
|
||||
Usage: "number of blocks to request, (default 32)",
|
||||
Usage: "number of blobs to request, (default 32)",
|
||||
Destination: &requestBlobsFlags.Count,
|
||||
Value: 32,
|
||||
},
|
||||
@@ -90,7 +98,7 @@ func cliActionRequestBlobs(cliCtx *cli.Context) error {
|
||||
allAPIEndpoints = strings.Split(requestBlobsFlags.APIEndpoints, ",")
|
||||
}
|
||||
var err error
|
||||
c, err := newClient(allAPIEndpoints, requestBlobsFlags.ClientPort)
|
||||
c, err := newClient(allAPIEndpoints, requestBlobsFlags.ClientPortTCP, requestBlobsFlags.ClientPortQUIC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -23,12 +23,14 @@ import (
|
||||
)
|
||||
|
||||
var requestBlocksFlags = struct {
|
||||
Peers string
|
||||
ClientPort uint
|
||||
APIEndpoints string
|
||||
StartSlot uint64
|
||||
Count uint64
|
||||
Step uint64
|
||||
Network string
|
||||
Peers string
|
||||
ClientPortTCP uint
|
||||
ClientPortQUIC uint
|
||||
APIEndpoints string
|
||||
StartSlot uint64
|
||||
Count uint64
|
||||
Step uint64
|
||||
}{}
|
||||
|
||||
var requestBlocksCmd = &cli.Command{
|
||||
@@ -42,6 +44,12 @@ var requestBlocksCmd = &cli.Command{
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
cmd.ChainConfigFileFlag,
|
||||
&cli.StringFlag{
|
||||
Name: "network",
|
||||
Usage: "network to run on (mainnet, sepolia, holesky)",
|
||||
Destination: &requestBlocksFlags.Network,
|
||||
Value: "mainnet",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "peer-multiaddrs",
|
||||
Usage: "comma-separated, peer multiaddr(s) to connect to for p2p requests",
|
||||
@@ -49,9 +57,16 @@ var requestBlocksCmd = &cli.Command{
|
||||
Value: "",
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "client-port",
|
||||
Usage: "port to use for the client as a libp2p host",
|
||||
Destination: &requestBlocksFlags.ClientPort,
|
||||
Name: "client-port-tcp",
|
||||
Aliases: []string{"client-port"},
|
||||
Usage: "TCP port to use for the client as a libp2p host",
|
||||
Destination: &requestBlocksFlags.ClientPortTCP,
|
||||
Value: 13001,
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "client-port-quic",
|
||||
Usage: "QUIC port to use for the client as a libp2p host",
|
||||
Destination: &requestBlocksFlags.ClientPortQUIC,
|
||||
Value: 13001,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@@ -82,6 +97,21 @@ var requestBlocksCmd = &cli.Command{
|
||||
}
|
||||
|
||||
func cliActionRequestBlocks(cliCtx *cli.Context) error {
|
||||
switch requestBlocksFlags.Network {
|
||||
case params.SepoliaName:
|
||||
if err := params.SetActive(params.SepoliaConfig()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
case params.HoleskyName:
|
||||
if err := params.SetActive(params.HoleskyConfig()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
case params.MainnetName:
|
||||
// Do nothing
|
||||
default:
|
||||
log.Fatalf("Unknown network provided: %s", requestBlocksFlags.Network)
|
||||
}
|
||||
|
||||
if cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
chainConfigFileName := cliCtx.String(cmd.ChainConfigFileFlag.Name)
|
||||
if err := params.LoadChainConfigFile(chainConfigFileName, nil); err != nil {
|
||||
@@ -98,7 +128,7 @@ func cliActionRequestBlocks(cliCtx *cli.Context) error {
|
||||
allAPIEndpoints = strings.Split(requestBlocksFlags.APIEndpoints, ",")
|
||||
}
|
||||
var err error
|
||||
c, err := newClient(allAPIEndpoints, requestBlocksFlags.ClientPort)
|
||||
c, err := newClient(allAPIEndpoints, requestBlocksFlags.ClientPortTCP, requestBlocksFlags.ClientPortQUIC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -85,7 +85,7 @@ var (
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "config-name",
|
||||
Usage: "Config kind to be used for generating the genesis state. Default: mainnet. Options include mainnet, interop, minimal, prater, sepolia. --chain-config-file will override this flag.",
|
||||
Usage: "Config kind to be used for generating the genesis state. Default: mainnet. Options include mainnet, interop, minimal, sepolia, holesky. --chain-config-file will override this flag.",
|
||||
Destination: &generateGenesisStateFlags.ConfigName,
|
||||
Value: params.MainnetName,
|
||||
},
|
||||
|
||||
@@ -167,7 +167,6 @@ var Commands = []*cli.Command{
|
||||
flags.ForceExitFlag,
|
||||
flags.VoluntaryExitJSONOutputPath,
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.SepoliaTestnet,
|
||||
features.HoleskyTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
|
||||
@@ -27,7 +27,6 @@ var Commands = &cli.Command{
|
||||
flags.WalletPasswordFileFlag,
|
||||
flags.DeletePublicKeysFlag,
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.SepoliaTestnet,
|
||||
features.HoleskyTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
@@ -63,7 +62,6 @@ var Commands = &cli.Command{
|
||||
flags.GrpcRetriesFlag,
|
||||
flags.GrpcRetryDelayFlag,
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.SepoliaTestnet,
|
||||
features.HoleskyTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
@@ -97,7 +95,6 @@ var Commands = &cli.Command{
|
||||
flags.BackupPublicKeysFlag,
|
||||
flags.BackupPasswordFile,
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.SepoliaTestnet,
|
||||
features.HoleskyTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
@@ -128,7 +125,6 @@ var Commands = &cli.Command{
|
||||
flags.AccountPasswordFileFlag,
|
||||
flags.ImportPrivateKeyFileFlag,
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.SepoliaTestnet,
|
||||
features.HoleskyTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
@@ -171,7 +167,6 @@ var Commands = &cli.Command{
|
||||
flags.ForceExitFlag,
|
||||
flags.VoluntaryExitJSONOutputPath,
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.SepoliaTestnet,
|
||||
features.HoleskyTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
|
||||
@@ -312,7 +312,7 @@ var (
|
||||
Usage: "(Work in progress): Enables the web portal for the validator client.",
|
||||
Value: false,
|
||||
}
|
||||
// SlashingProtectionExportDirFlag allows specifying the outpt directory
|
||||
// SlashingProtectionExportDirFlag allows specifying the output directory
|
||||
// for a validator's slashing protection history.
|
||||
SlashingProtectionExportDirFlag = &cli.StringFlag{
|
||||
Name: "slashing-protection-export-dir",
|
||||
|
||||
@@ -22,7 +22,6 @@ var Commands = &cli.Command{
|
||||
cmd.DataDirFlag,
|
||||
flags.SlashingProtectionExportDirFlag,
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.SepoliaTestnet,
|
||||
features.HoleskyTestnet,
|
||||
features.EnableMinimalSlashingProtection,
|
||||
@@ -51,7 +50,6 @@ var Commands = &cli.Command{
|
||||
cmd.DataDirFlag,
|
||||
flags.SlashingProtectionJSONFileFlag,
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.SepoliaTestnet,
|
||||
features.HoleskyTestnet,
|
||||
features.EnableMinimalSlashingProtection,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
@@ -34,10 +35,7 @@ func TestAllFlagsExistInHelp(t *testing.T) {
|
||||
}
|
||||
|
||||
func doesFlagExist(flag cli.Flag, flags []cli.Flag) bool {
|
||||
for _, f := range flags {
|
||||
if f.String() == flag.String() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.ContainsFunc(flags, func(f cli.Flag) bool {
|
||||
return f.String() == flag.String()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ var Commands = &cli.Command{
|
||||
flags.Mnemonic25thWordFileFlag,
|
||||
flags.SkipMnemonic25thWordCheckFlag,
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.SepoliaTestnet,
|
||||
features.HoleskyTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
@@ -63,7 +62,6 @@ var Commands = &cli.Command{
|
||||
flags.Mnemonic25thWordFileFlag,
|
||||
flags.SkipMnemonic25thWordCheckFlag,
|
||||
features.Mainnet,
|
||||
features.PraterTestnet,
|
||||
features.SepoliaTestnet,
|
||||
features.HoleskyTestnet,
|
||||
cmd.AcceptTosFlag,
|
||||
|
||||
@@ -42,6 +42,7 @@ type Flags struct {
|
||||
WriteSSZStateTransitions bool // WriteSSZStateTransitions to tmp directory.
|
||||
EnablePeerScorer bool // EnablePeerScorer enables experimental peer scoring in p2p.
|
||||
EnableLightClient bool // EnableLightClient enables light client APIs.
|
||||
EnableQUIC bool // EnableQUIC specifies whether to enable QUIC transport for libp2p.
|
||||
WriteWalletPasswordOnWebOnboarding bool // WriteWalletPasswordOnWebOnboarding writes the password to disk after Prysm web signup.
|
||||
EnableDoppelGanger bool // EnableDoppelGanger enables doppelganger protection on startup for the validator.
|
||||
EnableHistoricalSpaceRepresentation bool // EnableHistoricalSpaceRepresentation enables the saving of registry validators in separate buckets to save space
|
||||
@@ -123,14 +124,7 @@ func InitWithReset(c *Flags) func() {
|
||||
|
||||
// configureTestnet sets the config according to specified testnet flag
|
||||
func configureTestnet(ctx *cli.Context) error {
|
||||
if ctx.Bool(PraterTestnet.Name) {
|
||||
log.Info("Running on the Prater Testnet")
|
||||
if err := params.SetActive(params.PraterConfig().Copy()); err != nil {
|
||||
return err
|
||||
}
|
||||
applyPraterFeatureFlags(ctx)
|
||||
params.UsePraterNetworkConfig()
|
||||
} else if ctx.Bool(SepoliaTestnet.Name) {
|
||||
if ctx.Bool(SepoliaTestnet.Name) {
|
||||
log.Info("Running on the Sepolia Beacon Chain Testnet")
|
||||
if err := params.SetActive(params.SepoliaConfig().Copy()); err != nil {
|
||||
return err
|
||||
@@ -157,10 +151,6 @@ func configureTestnet(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Insert feature flags within the function to be enabled for Prater testnet.
|
||||
func applyPraterFeatureFlags(ctx *cli.Context) {
|
||||
}
|
||||
|
||||
// Insert feature flags within the function to be enabled for Sepolia testnet.
|
||||
func applySepoliaFeatureFlags(ctx *cli.Context) {
|
||||
}
|
||||
@@ -265,6 +255,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
logEnabled(BlobSaveFsync)
|
||||
cfg.BlobSaveFsync = true
|
||||
}
|
||||
if ctx.IsSet(EnableQUIC.Name) {
|
||||
logEnabled(EnableQUIC)
|
||||
cfg.EnableQUIC = true
|
||||
}
|
||||
|
||||
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}
|
||||
Init(cfg)
|
||||
|
||||
@@ -8,12 +8,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// PraterTestnet flag for the multiclient Ethereum consensus testnet.
|
||||
PraterTestnet = &cli.BoolFlag{
|
||||
Name: "prater",
|
||||
Usage: "Runs Prysm configured for the Prater / Goerli test network.",
|
||||
Aliases: []string{"goerli"},
|
||||
}
|
||||
// SepoliaTestnet flag for the multiclient Ethereum consensus testnet.
|
||||
SepoliaTestnet = &cli.BoolFlag{
|
||||
Name: "sepolia",
|
||||
@@ -109,8 +103,8 @@ var (
|
||||
}
|
||||
enableDoppelGangerProtection = &cli.BoolFlag{
|
||||
Name: "enable-doppelganger",
|
||||
Usage: `Enables the validator to perform a doppelganger check.
|
||||
This is not "a foolproof method to find duplicate instances in the network.
|
||||
Usage: `Enables the validator to perform a doppelganger check.
|
||||
This is not a foolproof method to find duplicate instances in the network.
|
||||
Your validator will still be vulnerable if it is being run in unsafe configurations.`,
|
||||
}
|
||||
disableStakinContractCheck = &cli.BoolFlag{
|
||||
@@ -171,19 +165,24 @@ var (
|
||||
Name: "blob-save-fsync",
|
||||
Usage: "Forces new blob files to be fysnc'd before continuing, ensuring durable blob writes.",
|
||||
}
|
||||
// EnableQUIC enables connection using the QUIC protocol for peers which support it.
|
||||
EnableQUIC = &cli.BoolFlag{
|
||||
Name: "enable-quic",
|
||||
Usage: "Enables connection using the QUIC protocol for peers which support it.",
|
||||
}
|
||||
)
|
||||
|
||||
// devModeFlags holds list of flags that are set when development mode is on.
|
||||
var devModeFlags = []cli.Flag{
|
||||
enableExperimentalState,
|
||||
backfill.EnableExperimentalBackfill,
|
||||
EnableQUIC,
|
||||
}
|
||||
|
||||
// ValidatorFlags contains a list of all the feature flags that apply to the validator client.
|
||||
var ValidatorFlags = append(deprecatedFlags, []cli.Flag{
|
||||
writeWalletPasswordOnWebOnboarding,
|
||||
HoleskyTestnet,
|
||||
PraterTestnet,
|
||||
SepoliaTestnet,
|
||||
Mainnet,
|
||||
dynamicKeyReloadDebounceInterval,
|
||||
@@ -208,7 +207,6 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c
|
||||
saveInvalidBlobTempFlag,
|
||||
disableGRPCConnectionLogging,
|
||||
HoleskyTestnet,
|
||||
PraterTestnet,
|
||||
SepoliaTestnet,
|
||||
Mainnet,
|
||||
disablePeerScorer,
|
||||
@@ -229,6 +227,7 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c
|
||||
DisableRegistrationCache,
|
||||
EnableLightClient,
|
||||
BlobSaveFsync,
|
||||
EnableQUIC,
|
||||
}...)...)
|
||||
|
||||
// E2EBeaconChainFlags contains a list of the beacon chain feature flags to be tested in E2E.
|
||||
@@ -239,7 +238,6 @@ var E2EBeaconChainFlags = []string{
|
||||
// NetworkFlags contains a list of network flags.
|
||||
var NetworkFlags = []cli.Flag{
|
||||
Mainnet,
|
||||
PraterTestnet,
|
||||
SepoliaTestnet,
|
||||
HoleskyTestnet,
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"network_config.go",
|
||||
"testnet_e2e_config.go",
|
||||
"testnet_holesky_config.go",
|
||||
"testnet_prater_config.go",
|
||||
"testnet_sepolia_config.go",
|
||||
"testutils.go",
|
||||
"testutils_develop.go", # keep
|
||||
@@ -50,7 +49,6 @@ go_test(
|
||||
"mainnet_config_test.go",
|
||||
"testnet_config_test.go",
|
||||
"testnet_holesky_config_test.go",
|
||||
"testnet_prater_config_test.go",
|
||||
],
|
||||
data = glob(["*.yaml"]) + [
|
||||
"testdata/e2e_config.yaml",
|
||||
@@ -58,14 +56,12 @@ go_test(
|
||||
"@consensus_spec_tests_mainnet//:test_data",
|
||||
"@consensus_spec_tests_minimal//:test_data",
|
||||
"@eth2_networks//:configs",
|
||||
"@goerli_testnet//:configs",
|
||||
"@holesky_testnet//:configs",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
gotags = ["develop"],
|
||||
tags = ["CI_race_detection"],
|
||||
deps = [
|
||||
"//build/bazel:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
|
||||
@@ -3,7 +3,6 @@ package params
|
||||
func init() {
|
||||
defaults := []*BeaconChainConfig{
|
||||
MainnetConfig(),
|
||||
PraterConfig(),
|
||||
MinimalSpecConfig(),
|
||||
E2ETestConfig(),
|
||||
E2EMainnetTestConfig(),
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -401,10 +402,5 @@ func assertYamlFieldsMatch(t *testing.T, name string, fields []string, c1, c2 *p
|
||||
}
|
||||
|
||||
func isPlaceholderField(field string) bool {
|
||||
for _, f := range placeholderFields {
|
||||
if f == field {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(placeholderFields, field)
|
||||
}
|
||||
|
||||
2
config/params/testdata/e2e_config.yaml
vendored
2
config/params/testdata/e2e_config.yaml
vendored
@@ -82,7 +82,7 @@ PROPOSER_SCORE_BOOST: 40
|
||||
|
||||
# Deposit contract
|
||||
# ---------------------------------------------------------------
|
||||
# Ethereum Goerli testnet
|
||||
# Testnet
|
||||
DEPOSIT_CHAIN_ID: 1337 # Override for e2e tests
|
||||
DEPOSIT_NETWORK_ID: 1337 # Override for e2e tests
|
||||
# Configured on a per testnet basis
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
package params
|
||||
|
||||
import (
|
||||
eth1Params "github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
// UsePraterNetworkConfig uses the Prater specific
|
||||
// network config.
|
||||
func UsePraterNetworkConfig() {
|
||||
cfg := BeaconNetworkConfig().Copy()
|
||||
cfg.ContractDeploymentBlock = 4367322
|
||||
cfg.BootstrapNodes = []string{
|
||||
// Prysm's bootnode
|
||||
"enr:-Ku4QFmUkNp0g9bsLX2PfVeIyT-9WO-PZlrqZBNtEyofOOfLMScDjaTzGxIb1Ns9Wo5Pm_8nlq-SZwcQfTH2cgO-s88Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDkvpOTAAAQIP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQLV_jMOIxKbjHFKgrkFvwDvpexo6Nd58TK5k7ss4Vt0IoN1ZHCCG1g",
|
||||
// Lighthouse's bootnode by Afri
|
||||
"enr:-LK4QH1xnjotgXwg25IDPjrqRGFnH1ScgNHA3dv1Z8xHCp4uP3N3Jjl_aYv_WIxQRdwZvSukzbwspXZ7JjpldyeVDzMCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpB53wQoAAAQIP__________gmlkgnY0gmlwhIe1te-Jc2VjcDI1NmsxoQOkcGXqbCJYbcClZ3z5f6NWhX_1YPFRYRRWQpJjwSHpVIN0Y3CCIyiDdWRwgiMo",
|
||||
// Lighthouse's bootnode by Sigp
|
||||
"enr:-Ly4QFPk-cTMxZ3jWTafiNblEZkQIXGF2aVzCIGW0uHp6KaEAvBMoctE8S7YU0qZtuS7By0AA4YMfKoN9ls_GJRccVpFh2F0dG5ldHOI__________-EZXRoMpCC9KcrAgAQIIS2AQAAAAAAgmlkgnY0gmlwhKh3joWJc2VjcDI1NmsxoQKrxz8M1IHwJqRIpDqdVW_U1PeixMW5SfnBD-8idYIQrIhzeW5jbmV0cw-DdGNwgiMog3VkcIIjKA",
|
||||
"enr:-L64QJmwSDtaHVgGiqIxJWUtxWg6uLCipsms6j-8BdsOJfTWAs7CLF9HJnVqFE728O-JYUDCxzKvRdeMqBSauHVCMdaCAVWHYXR0bmV0c4j__________4RldGgykIL0pysCABAghLYBAAAAAACCaWSCdjSCaXCEQWxOdolzZWNwMjU2azGhA7Qmod9fK86WidPOzLsn5_8QyzL7ZcJ1Reca7RnD54vuiHN5bmNuZXRzD4N0Y3CCIyiDdWRwgiMo",
|
||||
// Teku's bootnode By Afri
|
||||
"enr:-KG4QCIzJZTY_fs_2vqWEatJL9RrtnPwDCv-jRBuO5FQ2qBrfJubWOWazri6s9HsyZdu-fRUfEzkebhf1nvO42_FVzwDhGV0aDKQed8EKAAAECD__________4JpZIJ2NIJpcISHtbYziXNlY3AyNTZrMaED4m9AqVs6F32rSCGsjtYcsyfQE2K8nDiGmocUY_iq-TSDdGNwgiMog3VkcIIjKA",
|
||||
}
|
||||
OverrideBeaconNetworkConfig(cfg)
|
||||
}
|
||||
|
||||
// PraterConfig defines the config for the
|
||||
// Prater testnet.
|
||||
func PraterConfig() *BeaconChainConfig {
|
||||
cfg := MainnetConfig().Copy()
|
||||
cfg.MinGenesisTime = 1614588812
|
||||
cfg.GenesisDelay = 1919188
|
||||
cfg.ConfigName = PraterName
|
||||
cfg.GenesisForkVersion = []byte{0x00, 0x00, 0x10, 0x20}
|
||||
cfg.SecondsPerETH1Block = 14
|
||||
cfg.DepositChainID = eth1Params.GoerliChainConfig.ChainID.Uint64()
|
||||
cfg.DepositNetworkID = eth1Params.GoerliChainConfig.ChainID.Uint64()
|
||||
cfg.AltairForkEpoch = 36660
|
||||
cfg.AltairForkVersion = []byte{0x1, 0x0, 0x10, 0x20}
|
||||
cfg.BellatrixForkEpoch = 112260
|
||||
cfg.BellatrixForkVersion = []byte{0x2, 0x0, 0x10, 0x20}
|
||||
cfg.CapellaForkEpoch = 162304
|
||||
cfg.CapellaForkVersion = []byte{0x3, 0x0, 0x10, 0x20}
|
||||
cfg.DenebForkEpoch = 231680 // 2024-01-17 06:32:00 (UTC)
|
||||
cfg.DenebForkVersion = []byte{0x4, 0x0, 0x10, 0x20}
|
||||
cfg.TerminalTotalDifficulty = "10790000"
|
||||
cfg.DepositContractAddress = "0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b"
|
||||
cfg.InitializeForkSchedule()
|
||||
return cfg
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package params_test
|
||||
|
||||
import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/build/bazel"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestPraterConfigMatchesUpstreamYaml(t *testing.T) {
|
||||
presetFPs := presetsFilePath(t, "mainnet")
|
||||
mn, err := params.ByName(params.MainnetName)
|
||||
require.NoError(t, err)
|
||||
cfg := mn.Copy()
|
||||
for _, fp := range presetFPs {
|
||||
cfg, err = params.UnmarshalConfigFile(fp, cfg)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
fPath, err := bazel.Runfile("external/goerli_testnet")
|
||||
require.NoError(t, err)
|
||||
configFP := path.Join(fPath, "prater", "config.yaml")
|
||||
pcfg, err := params.UnmarshalConfigFile(configFP, nil)
|
||||
require.NoError(t, err)
|
||||
fields := fieldsFromYamls(t, append(presetFPs, configFP))
|
||||
assertYamlFieldsMatch(t, "prater", fields, pcfg, params.PraterConfig())
|
||||
}
|
||||
@@ -8,8 +8,6 @@ const (
|
||||
MainnetName = "mainnet"
|
||||
MainnetTestName = "mainnet-test"
|
||||
MinimalName = "minimal"
|
||||
PraterName = "prater"
|
||||
GoerliName = "goerli"
|
||||
SepoliaName = "sepolia"
|
||||
HoleskyName = "holesky"
|
||||
)
|
||||
|
||||
@@ -108,7 +108,7 @@ type Settings struct {
|
||||
DefaultConfig *Option
|
||||
}
|
||||
|
||||
// ShouldBeSaved goes through checks to see if the value should be saveable
|
||||
// ShouldBeSaved goes through checks to see if the value should be savable
|
||||
// Pseudocode: conditions for being saved into the database
|
||||
// 1. settings are not nil
|
||||
// 2. proposeconfig is not nil (this defines specific settings for each validator key), default config can be nil in this case and fall back to beacon node settings
|
||||
|
||||
@@ -24,7 +24,7 @@ type Node[T any] struct {
|
||||
next *Node[T]
|
||||
}
|
||||
|
||||
// Copy returns a copy of the origina list.
|
||||
// Copy returns a copy of the original list.
|
||||
func (l *List[T]) Copy() *List[T] {
|
||||
if l == nil {
|
||||
return nil
|
||||
|
||||
@@ -91,6 +91,7 @@ package mvslice
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -578,10 +579,8 @@ func (s *Slice[V]) updateAppendedItem(obj Identifiable, index uint64, val V) err
|
||||
}
|
||||
|
||||
func containsId(ids []uint64, wanted uint64) (int, bool) {
|
||||
for i, id := range ids {
|
||||
if id == wanted {
|
||||
return i, true
|
||||
}
|
||||
if i := slices.Index(ids, wanted); i >= 0 {
|
||||
return i, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package slice
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -140,12 +141,7 @@ func NotUint64(a, b []uint64) []uint64 {
|
||||
|
||||
// IsInUint64 returns true if a is in b and False otherwise.
|
||||
func IsInUint64(a uint64, b []uint64) bool {
|
||||
for _, v := range b {
|
||||
if a == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(b, a)
|
||||
}
|
||||
|
||||
// IntersectionInt64 of any number of int64 slices with time
|
||||
@@ -226,12 +222,7 @@ func NotInt64(a, b []int64) []int64 {
|
||||
|
||||
// IsInInt64 returns true if a is in b and False otherwise.
|
||||
func IsInInt64(a int64, b []int64) bool {
|
||||
for _, v := range b {
|
||||
if a == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(b, a)
|
||||
}
|
||||
|
||||
// UnionByteSlices returns the all elements between sets of byte slices.
|
||||
@@ -358,12 +349,7 @@ func NotSlot(a, b []primitives.Slot) []primitives.Slot {
|
||||
|
||||
// IsInSlots returns true if a is in b and False otherwise.
|
||||
func IsInSlots(a primitives.Slot, b []primitives.Slot) bool {
|
||||
for _, v := range b {
|
||||
if a == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(b, a)
|
||||
}
|
||||
|
||||
// Unique returns an array with duplicates filtered based on the type given
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
// During deepValueEqual, must keep track of checks that are
|
||||
// in progress. The comparison algorithm assumes that all
|
||||
// checks in progress are true when it reencounters them.
|
||||
// checks in progress are true when it re-encounters them.
|
||||
// Visited comparisons are stored in a map indexed by visit.
|
||||
type visit struct {
|
||||
a1 unsafe.Pointer // #nosec G103 -- Test use only
|
||||
@@ -27,7 +27,7 @@ type visit struct {
|
||||
// intricacies when determining equality of empty values.
|
||||
//
|
||||
// Tests for deep equality using reflected types. The map argument tracks
|
||||
// comparisons that have already been seen, which allows short circuiting on
|
||||
// comparisons that have already been seen, which allows short-circuiting on
|
||||
// recursive types.
|
||||
func deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool {
|
||||
if !v1.IsValid() || !v2.IsValid() {
|
||||
@@ -273,7 +273,7 @@ func deepValueBaseTypeEqual(v1, v2 reflect.Value) bool {
|
||||
// In general DeepEqual is a recursive relaxation of Go's == operator.
|
||||
// However, this idea is impossible to implement without some inconsistency.
|
||||
// Specifically, it is possible for a value to be unequal to itself,
|
||||
// either because it is of func type (uncomparable in general)
|
||||
// either because it is of func type (incomparable in general)
|
||||
// or because it is a floating-point NaN value (not equal to itself in floating-point comparison),
|
||||
// or because it is an array, struct, or interface containing
|
||||
// such a value.
|
||||
|
||||
@@ -1133,7 +1133,7 @@ fi
|
||||
|
||||
if [ "$ft_search" = "1" ];
|
||||
then
|
||||
# detect bower comoponents location
|
||||
# detect bower components location
|
||||
bower_components="bower_components"
|
||||
bower_rc=$(cd "$git_root" && cat .bowerrc 2>/dev/null || echo "")
|
||||
if [ "$bower_rc" != "" ];
|
||||
|
||||
@@ -13,8 +13,8 @@ var urltests = []struct {
|
||||
maskedUrl string
|
||||
}{
|
||||
{"https://a:b@xyz.net", "https://***@xyz.net"},
|
||||
{"https://eth-goerli.alchemyapi.io/v2/tOZG5mjl3.zl_nZdZTNIBUzsDq62R_dkOtY",
|
||||
"https://eth-goerli.alchemyapi.io/***"},
|
||||
{"https://eth-holesky.alchemyapi.io/v2/tOZG5mjl3.zl_nZdZTNIBUzsDq62R_dkOtY",
|
||||
"https://eth-holesky.alchemyapi.io/***"},
|
||||
{"https://google.com/search?q=golang", "https://google.com/***"},
|
||||
{"https://user@example.com/foo%2fbar", "https://***@example.com/***"},
|
||||
{"http://john@example.com/#x/y%2Fz", "http://***@example.com/#***"},
|
||||
|
||||
@@ -20,7 +20,7 @@ type APIMessage struct {
|
||||
|
||||
// CommonStats represent generic metrics that are expected on both
|
||||
// beaconnode and validator metric types. This type is used for
|
||||
// marshaling metrics to the POST body sent to the metrics collcetor.
|
||||
// marshaling metrics to the POST body sent to the metrics collector.
|
||||
// Note that some metrics are labeled NA because they are expected
|
||||
// to be present with their zero-value when not supported by a client.
|
||||
type CommonStats struct {
|
||||
@@ -42,7 +42,7 @@ type CommonStats struct {
|
||||
|
||||
// BeaconNodeStats embeds CommonStats and represents metrics specific to
|
||||
// the beacon-node process. This type is used to marshal metrics data
|
||||
// to the POST body sent to the metrics collcetor. To make the connection
|
||||
// to the POST body sent to the metrics collector. To make the connection
|
||||
// to client-stats clear, BeaconNodeStats is also used by prometheus
|
||||
// collection code introduced to support client-stats.
|
||||
// Note that some metrics are labeled NA because they are expected
|
||||
@@ -66,7 +66,7 @@ type BeaconNodeStats struct {
|
||||
|
||||
// ValidatorStats embeds CommonStats and represents metrics specific to
|
||||
// the validator process. This type is used to marshal metrics data
|
||||
// to the POST body sent to the metrics collcetor.
|
||||
// to the POST body sent to the metrics collector.
|
||||
// Note that some metrics are labeled NA because they are expected
|
||||
// to be present with their zero-value when not supported by a client.
|
||||
type ValidatorStats struct {
|
||||
|
||||
@@ -257,6 +257,7 @@ func (node *BeaconNode) Start(ctx context.Context) error {
|
||||
fmt.Sprintf("--%s=%s", flags.ExecutionJWTSecretFlag.Name, jwtPath),
|
||||
fmt.Sprintf("--%s=%d", flags.MinSyncPeers.Name, 1),
|
||||
fmt.Sprintf("--%s=%d", cmdshared.P2PUDPPort.Name, e2e.TestParams.Ports.PrysmBeaconNodeUDPPort+index),
|
||||
fmt.Sprintf("--%s=%d", cmdshared.P2PQUICPort.Name, e2e.TestParams.Ports.PrysmBeaconNodeQUICPort+index),
|
||||
fmt.Sprintf("--%s=%d", cmdshared.P2PTCPPort.Name, e2e.TestParams.Ports.PrysmBeaconNodeTCPPort+index),
|
||||
fmt.Sprintf("--%s=%d", cmdshared.P2PMaxPeers.Name, expectedNumOfPeers),
|
||||
fmt.Sprintf("--%s=%d", flags.MonitoringPortFlag.Name, e2e.TestParams.Ports.PrysmBeaconNodeMetricsPort+index),
|
||||
@@ -275,6 +276,7 @@ func (node *BeaconNode) Start(ctx context.Context) error {
|
||||
"--" + cmdshared.ForceClearDB.Name,
|
||||
"--" + cmdshared.AcceptTosFlag.Name,
|
||||
"--" + flags.EnableDebugRPCEndpoints.Name,
|
||||
"--" + features.EnableQUIC.Name,
|
||||
}
|
||||
if config.UsePprof {
|
||||
args = append(args, "--pprof", fmt.Sprintf("--pprofport=%d", e2e.TestParams.Ports.PrysmBeaconNodePprofPort+index))
|
||||
@@ -313,7 +315,7 @@ func (node *BeaconNode) Start(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if config.UseFixedPeerIDs {
|
||||
peerId, err := helpers.FindFollowingTextInFile(stdOutFile, "Running node with peer id of ")
|
||||
peerId, err := helpers.FindFollowingTextInFile(stdOutFile, "Running node with id")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find peer id: %w", err)
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ func feeRecipientIsPresent(_ *types.EvaluationContext, conns ...*grpc.ClientConn
|
||||
// In e2e we generate deterministic keys by validator index, and then use a slice of their public key bytes
|
||||
// as the fee recipient, so that this will also be deterministic, so this test can statelessly verify it.
|
||||
// These should be the only keys we see.
|
||||
// Otherwise something has changed in e2e and this test needs to be updated.
|
||||
// Otherwise, something has changed in e2e and this test needs to be updated.
|
||||
_, knownKey := valkeys[pk]
|
||||
if !knownKey {
|
||||
log.WithField("pubkey", pk).
|
||||
|
||||
@@ -245,7 +245,7 @@ func activatesDepositedValidators(ec *e2etypes.EvaluationContext, conns ...*grpc
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure every post-genesis deposit has been proecssed, resulting in a validator.
|
||||
// Make sure every post-genesis deposit has been processed, resulting in a validator.
|
||||
if len(expected) > 0 {
|
||||
return fmt.Errorf("missing %d validators for post-genesis deposits", len(expected))
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ type doubleAttestationHelper struct {
|
||||
committee []primitives.ValidatorIndex
|
||||
}
|
||||
|
||||
// Initializes helper with details needed to make a double attestation for testint purposes
|
||||
// Initializes helper with details needed to make a double attestation for testing purposes
|
||||
// Populates the committee of that is responsible for the
|
||||
func (h *doubleAttestationHelper) setup(ctx context.Context) error {
|
||||
chainHead, err := h.beaconClient.GetChainHead(ctx, &emptypb.Empty{})
|
||||
|
||||
@@ -46,6 +46,7 @@ type ports struct {
|
||||
Eth1ProxyPort int
|
||||
PrysmBeaconNodeRPCPort int
|
||||
PrysmBeaconNodeUDPPort int
|
||||
PrysmBeaconNodeQUICPort int
|
||||
PrysmBeaconNodeTCPPort int
|
||||
PrysmBeaconNodeGatewayPort int
|
||||
PrysmBeaconNodeMetricsPort int
|
||||
@@ -144,10 +145,11 @@ const (
|
||||
|
||||
PrysmBeaconNodeRPCPort = 4150
|
||||
PrysmBeaconNodeUDPPort = PrysmBeaconNodeRPCPort + portSpan
|
||||
PrysmBeaconNodeTCPPort = PrysmBeaconNodeRPCPort + 2*portSpan
|
||||
PrysmBeaconNodeGatewayPort = PrysmBeaconNodeRPCPort + 3*portSpan
|
||||
PrysmBeaconNodeMetricsPort = PrysmBeaconNodeRPCPort + 4*portSpan
|
||||
PrysmBeaconNodePprofPort = PrysmBeaconNodeRPCPort + 5*portSpan
|
||||
PrysmBeaconNodeQUICPort = PrysmBeaconNodeRPCPort + 2*portSpan
|
||||
PrysmBeaconNodeTCPPort = PrysmBeaconNodeRPCPort + 3*portSpan
|
||||
PrysmBeaconNodeGatewayPort = PrysmBeaconNodeRPCPort + 4*portSpan
|
||||
PrysmBeaconNodeMetricsPort = PrysmBeaconNodeRPCPort + 5*portSpan
|
||||
PrysmBeaconNodePprofPort = PrysmBeaconNodeRPCPort + 6*portSpan
|
||||
|
||||
LighthouseBeaconNodeP2PPort = 5150
|
||||
LighthouseBeaconNodeHTTPPort = LighthouseBeaconNodeP2PPort + portSpan
|
||||
@@ -330,6 +332,10 @@ func initializeStandardPorts(shardCount, shardIndex int, ports *ports, existingR
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
beaconNodeQUICPort, err := port(PrysmBeaconNodeQUICPort, shardCount, shardIndex, existingRegistrations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
beaconNodeTCPPort, err := port(PrysmBeaconNodeTCPPort, shardCount, shardIndex, existingRegistrations)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -367,6 +373,7 @@ func initializeStandardPorts(shardCount, shardIndex int, ports *ports, existingR
|
||||
ports.Eth1ProxyPort = eth1ProxyPort
|
||||
ports.PrysmBeaconNodeRPCPort = beaconNodeRPCPort
|
||||
ports.PrysmBeaconNodeUDPPort = beaconNodeUDPPort
|
||||
ports.PrysmBeaconNodeQUICPort = beaconNodeQUICPort
|
||||
ports.PrysmBeaconNodeTCPPort = beaconNodeTCPPort
|
||||
ports.PrysmBeaconNodeGatewayPort = beaconNodeGatewayPort
|
||||
ports.PrysmBeaconNodeMetricsPort = beaconNodeMetricsPort
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestStandardPorts(t *testing.T) {
|
||||
var existingRegistrations []int
|
||||
testPorts := &ports{}
|
||||
assert.NoError(t, initializeStandardPorts(2, 0, testPorts, &existingRegistrations))
|
||||
assert.Equal(t, 16, len(existingRegistrations))
|
||||
assert.Equal(t, 17, len(existingRegistrations))
|
||||
assert.NotEqual(t, 0, testPorts.PrysmBeaconNodeGatewayPort)
|
||||
assert.NotEqual(t, 0, testPorts.PrysmBeaconNodeTCPPort)
|
||||
assert.NotEqual(t, 0, testPorts.JaegerTracingPort)
|
||||
|
||||
@@ -208,7 +208,7 @@ func GenerateFullBlockCapella(
|
||||
return ðpb.SignedBeaconBlockCapella{Block: block, Signature: signature.Marshal()}, nil
|
||||
}
|
||||
|
||||
// GenerateBLSToExecutionChange generates a valid bls to exec changae for validator `val` and its private key `priv` with the given beacon state `st`.
|
||||
// GenerateBLSToExecutionChange generates a valid bls to exec change for validator `val` and its private key `priv` with the given beacon state `st`.
|
||||
func GenerateBLSToExecutionChange(st state.BeaconState, priv bls.SecretKey, val primitives.ValidatorIndex) (*ethpb.SignedBLSToExecutionChange, error) {
|
||||
cred := indexToHash(uint64(val))
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user