mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
61 Commits
reorg-mon
...
quicStream
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
501ad49f58 | ||
|
|
875c654f55 | ||
|
|
c3ccf8d536 | ||
|
|
649e2ea9fa | ||
|
|
f3b54dbdd3 | ||
|
|
5cd86812c7 | ||
|
|
6b7a7a1c10 | ||
|
|
4efb331924 | ||
|
|
46497793b9 | ||
|
|
1a0bd725fa | ||
|
|
ebbdc78b2b | ||
|
|
a1bb70e50a | ||
|
|
b95bdf87fd | ||
|
|
3102a79a5d | ||
|
|
2cc3f69a3f | ||
|
|
a861489a83 | ||
|
|
0e1c585f7d | ||
|
|
9df20e616c | ||
|
|
53fdd2d062 | ||
|
|
2b4bb5d890 | ||
|
|
38f208d70d | ||
|
|
65b90abdda | ||
|
|
f3b49d4eaf | ||
|
|
37332e2e49 | ||
|
|
123b034d0b | ||
|
|
7f449660f0 | ||
|
|
a2d0702aa5 | ||
|
|
c9ccc1cb2c | ||
|
|
a9687a5f35 | ||
|
|
3996a5b5be | ||
|
|
e01decfc4f | ||
|
|
8ea340026b | ||
|
|
9e382c4ff4 | ||
|
|
54b2ee8435 | ||
|
|
5b1da7353c | ||
|
|
9f17e65860 | ||
|
|
da51fae94c | ||
|
|
9b2d53b0d1 | ||
|
|
d6f9196707 | ||
|
|
1b0e09369e | ||
|
|
12482eeb40 | ||
|
|
acc307b959 | ||
|
|
c1d75c295a | ||
|
|
fad118cb04 | ||
|
|
cdd1d819df | ||
|
|
97edffaff5 | ||
|
|
6de7df6b9d | ||
|
|
14d7416c16 | ||
|
|
6782df917a | ||
|
|
3d2230223f | ||
|
|
b008a6422d | ||
|
|
d19365507f | ||
|
|
c05e39a668 | ||
|
|
63c2b3563a | ||
|
|
a6e86c6731 | ||
|
|
32fb183392 | ||
|
|
cade09ba0b | ||
|
|
f85ddfe265 | ||
|
|
3b97094ea4 | ||
|
|
acdbf7c491 | ||
|
|
1cc1effd75 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -41,3 +41,6 @@ jwt.hex
|
||||
|
||||
# manual testing
|
||||
tmp
|
||||
|
||||
# spectest coverage reports
|
||||
report.txt
|
||||
|
||||
@@ -130,9 +130,9 @@ aspect_bazel_lib_register_toolchains()
|
||||
|
||||
http_archive(
|
||||
name = "rules_oci",
|
||||
sha256 = "c71c25ed333a4909d2dd77e0b16c39e9912525a98c7fa85144282be8d04ef54c",
|
||||
strip_prefix = "rules_oci-1.3.4",
|
||||
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.3.4/rules_oci-v1.3.4.tar.gz",
|
||||
sha256 = "4a276e9566c03491649eef63f27c2816cc222f41ccdebd97d2c5159e84917c3b",
|
||||
strip_prefix = "rules_oci-1.7.4",
|
||||
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.7.4/rules_oci-v1.7.4.tar.gz",
|
||||
)
|
||||
|
||||
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
|
||||
|
||||
@@ -88,7 +88,7 @@ func TestToggle(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestToogleMultipleTimes(t *testing.T) {
|
||||
func TestToggleMultipleTimes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
v := New()
|
||||
@@ -101,16 +101,16 @@ func TestToogleMultipleTimes(t *testing.T) {
|
||||
|
||||
expected := i%2 != 0
|
||||
if v.IsSet() != expected {
|
||||
t.Fatalf("AtomicBool.Toogle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
|
||||
t.Fatalf("AtomicBool.Toggle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
|
||||
}
|
||||
|
||||
if pre == v.IsSet() {
|
||||
t.Fatalf("AtomicBool.Toogle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
|
||||
t.Fatalf("AtomicBool.Toggle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToogleAfterOverflow(t *testing.T) {
|
||||
func TestToggleAfterOverflow(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var value int32 = math.MaxInt32
|
||||
@@ -122,7 +122,7 @@ func TestToogleAfterOverflow(t *testing.T) {
|
||||
v.Toggle()
|
||||
expected := math.MaxInt32%2 == 0
|
||||
if v.IsSet() != expected {
|
||||
t.Fatalf("AtomicBool.Toogle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
t.Fatalf("AtomicBool.Toggle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
}
|
||||
|
||||
// make sure overflow happened
|
||||
@@ -135,7 +135,7 @@ func TestToogleAfterOverflow(t *testing.T) {
|
||||
v.Toggle()
|
||||
expected = !expected
|
||||
if v.IsSet() != expected {
|
||||
t.Fatalf("AtomicBool.Toogle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
t.Fatalf("AtomicBool.Toggle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ package event
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@@ -219,12 +220,9 @@ type caseList []reflect.SelectCase
|
||||
|
||||
// find returns the index of a case containing the given channel.
|
||||
func (cs caseList) find(channel interface{}) int {
|
||||
for i, cas := range cs {
|
||||
if cas.Chan.Interface() == channel {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
|
||||
return selectCase.Chan.Interface() == channel
|
||||
})
|
||||
}
|
||||
|
||||
// delete removes the given case from cs.
|
||||
|
||||
@@ -63,7 +63,7 @@ func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, err
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelisation.
|
||||
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelization.
|
||||
func calculateChunkSize(items int) int {
|
||||
// Start with a simple even split
|
||||
chunkSize := items / runtime.GOMAXPROCS(0)
|
||||
|
||||
2
bazel.sh
2
bazel.sh
@@ -2,7 +2,7 @@
|
||||
|
||||
# This script serves as a wrapper around bazel to limit the scope of environment variables that
|
||||
# may change the action output. Using this script should result in a higher cache hit ratio for
|
||||
# cached actions with a more heremtic build.
|
||||
# cached actions with a more hermetic build.
|
||||
|
||||
env -i \
|
||||
PATH=/usr/bin:/bin \
|
||||
|
||||
@@ -586,7 +586,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
s.blobNotifiers.delete(root)
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
|
||||
return errors.Wrapf(ctx.Err(), "context deadline waiting for blob sidecars slot: %d, BlockRoot: %#x", block.Slot(), root)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -594,7 +594,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": root,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
|
||||
|
||||
// logNonCanonicalBlockReceived prints a message informing that the received
|
||||
// block is not the head of the chain. It requires the caller holds a lock on
|
||||
// Foprkchoice.
|
||||
// Forkchoice.
|
||||
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
if err != nil {
|
||||
|
||||
@@ -290,18 +290,10 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if params.BeaconConfig().ConfigName != params.PraterName {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
|
||||
@@ -804,7 +804,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform this in a non-sensical ordering
|
||||
// Perform this in a nonsensical ordering
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 10, [32]byte{}, 0))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0))
|
||||
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3, [32]byte{}, 0))
|
||||
|
||||
@@ -784,7 +784,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Perform this in a non-sensical ordering
|
||||
// Perform this in a nonsensical ordering
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -133,9 +132,6 @@ func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx primitives.
|
||||
//
|
||||
// In the attestation must be within the range of 95 to 102 in the example above.
|
||||
func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
|
||||
if err := slots.ValidateClock(attSlot, uint64(genesisTime.Unix())); err != nil {
|
||||
return err
|
||||
}
|
||||
attTime, err := slots.ToTime(uint64(genesisTime.Unix()), attSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -182,24 +178,15 @@ func ValidateAttestationTime(attSlot primitives.Slot, genesisTime time.Time, clo
|
||||
}
|
||||
|
||||
// EIP-7045: Starting in Deneb, allow any attestations from the current or previous epoch.
|
||||
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
prevEpoch, err := currentEpoch.SafeSub(1)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Ignoring underflow for a deneb attestation inclusion check in epoch 0")
|
||||
prevEpoch = 0
|
||||
}
|
||||
attSlotEpoch := slots.ToEpoch(attSlot)
|
||||
if attSlotEpoch != currentEpoch && attSlotEpoch != prevEpoch {
|
||||
if attEpoch+1 < currentEpoch {
|
||||
attError = fmt.Errorf(
|
||||
"attestation epoch %d not within current epoch %d or previous epoch %d",
|
||||
attSlot/params.BeaconConfig().SlotsPerEpoch,
|
||||
"attestation epoch %d not within current epoch %d or previous epoch",
|
||||
attEpoch,
|
||||
currentEpoch,
|
||||
prevEpoch,
|
||||
)
|
||||
return errors.Join(ErrTooLate, attError)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -197,7 +197,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
-500 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
|
||||
).Add(200 * time.Millisecond),
|
||||
},
|
||||
wantedErr: "attestation epoch 8 not within current epoch 15 or previous epoch 14",
|
||||
wantedErr: "attestation epoch 8 not within current epoch 15 or previous epoch",
|
||||
},
|
||||
{
|
||||
name: "attestation.slot is well beyond current slot",
|
||||
@@ -205,7 +205,7 @@ func Test_ValidateAttestationTime(t *testing.T) {
|
||||
attSlot: 1 << 32,
|
||||
genesisTime: prysmTime.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
},
|
||||
wantedErr: "which exceeds max allowed value relative to the local clock",
|
||||
wantedErr: "attestation slot 4294967296 not within attestation propagation range of 0 to 15 (current slot)",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -22,7 +22,7 @@ var balanceCache = cache.NewEffectiveBalanceCache()
|
||||
// """
|
||||
// Return the combined effective balance of the ``indices``.
|
||||
// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
|
||||
// Math safe up to ~10B ETH, afterwhich this overflows uint64.
|
||||
// Math safe up to ~10B ETH, after which this overflows uint64.
|
||||
// """
|
||||
// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices])))
|
||||
func TotalBalance(state state.ReadOnlyValidators, indices []primitives.ValidatorIndex) uint64 {
|
||||
|
||||
@@ -59,7 +59,7 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch,
|
||||
return ComputeDomainAndSignWithoutState(st.Fork(), epoch, domain, st.GenesisValidatorsRoot(), obj, key)
|
||||
}
|
||||
|
||||
// ComputeDomainAndSignWithoutState offers the same functionalit as ComputeDomainAndSign without the need to provide a BeaconState.
|
||||
// ComputeDomainAndSignWithoutState offers the same functionality as ComputeDomainAndSign without the need to provide a BeaconState.
|
||||
// This is particularly helpful for signing values in tests.
|
||||
func ComputeDomainAndSignWithoutState(fork *ethpb.Fork, epoch primitives.Epoch, domain [4]byte, vr []byte, obj fssz.HashRoot, key bls.SecretKey) ([]byte, error) {
|
||||
// EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits.
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"cache.go",
|
||||
"ephemeral.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
@@ -33,6 +34,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"blob_test.go",
|
||||
"cache_test.go",
|
||||
"pruner_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
@@ -103,12 +104,29 @@ func (bs *BlobStorage) WarmCache() {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
if err := bs.pruner.prune(0); err != nil {
|
||||
start := time.Now()
|
||||
if err := bs.pruner.warmCache(); err != nil {
|
||||
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
|
||||
}()
|
||||
}
|
||||
|
||||
// ErrBlobStorageSummarizerUnavailable is a sentinel error returned when there is no pruner/cache available.
|
||||
// This should be used by code that optionally uses the summarizer to optimize rpc requests. Being able to
|
||||
// fallback when there is no summarizer allows client code to avoid test complexity where the summarizer doesn't matter.
|
||||
var ErrBlobStorageSummarizerUnavailable = errors.New("BlobStorage not initialized with a pruner or cache")
|
||||
|
||||
// WaitForSummarizer blocks until the BlobStorageSummarizer is ready to use.
|
||||
// BlobStorageSummarizer is not ready immediately on node startup because it needs to sample the blob filesystem to
|
||||
// determine which blobs are available.
|
||||
func (bs *BlobStorage) WaitForSummarizer(ctx context.Context) (BlobStorageSummarizer, error) {
|
||||
if bs.pruner == nil {
|
||||
return nil, ErrBlobStorageSummarizerUnavailable
|
||||
}
|
||||
return bs.pruner.waitForCache(ctx)
|
||||
}
|
||||
|
||||
// Save saves blobs given a list of sidecars.
|
||||
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
startTime := time.Now()
|
||||
|
||||
119
beacon-chain/db/filesystem/cache.go
Normal file
119
beacon-chain/db/filesystem/cache.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
|
||||
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
|
||||
|
||||
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
|
||||
type BlobStorageSummary struct {
|
||||
slot primitives.Slot
|
||||
mask blobIndexMask
|
||||
}
|
||||
|
||||
// HasIndex returns true if the BlobSidecar at the given index is available in the filesystem.
|
||||
func (s BlobStorageSummary) HasIndex(idx uint64) bool {
|
||||
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return false
|
||||
}
|
||||
return s.mask[idx]
|
||||
}
|
||||
|
||||
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
|
||||
func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
if count > fieldparams.MaxBlobsPerBlock {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < count; i++ {
|
||||
if !s.mask[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// BlobStorageSummarizer can be used to receive a summary of metadata about blobs on disk for a given root.
|
||||
// The BlobStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type BlobStorageSummarizer interface {
|
||||
Summary(root [32]byte) BlobStorageSummary
|
||||
}
|
||||
|
||||
type blobStorageCache struct {
|
||||
mu sync.RWMutex
|
||||
nBlobs float64
|
||||
cache map[string]BlobStorageSummary
|
||||
}
|
||||
|
||||
var _ BlobStorageSummarizer = &blobStorageCache{}
|
||||
|
||||
func newBlobStorageCache() *blobStorageCache {
|
||||
return &blobStorageCache{
|
||||
cache: make(map[string]BlobStorageSummary, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
|
||||
}
|
||||
}
|
||||
|
||||
// Summary returns the BlobStorageSummary for `root`. The BlobStorageSummary can be used to check for the presence of
|
||||
// BlobSidecars based on Index.
|
||||
func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
k := rootString(root)
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.cache[k]
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) ensure(key string, slot primitives.Slot, idx uint64) error {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v := s.cache[key]
|
||||
v.slot = slot
|
||||
if !v.mask[idx] {
|
||||
s.updateMetrics(1)
|
||||
}
|
||||
v.mask[idx] = true
|
||||
s.cache[key] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) slot(key string) (primitives.Slot, bool) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
v, ok := s.cache[key]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return v.slot, ok
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) evict(key string) {
|
||||
var deleted float64
|
||||
s.mu.Lock()
|
||||
v, ok := s.cache[key]
|
||||
if ok {
|
||||
for i := range v.mask {
|
||||
if v.mask[i] {
|
||||
deleted += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(s.cache, key)
|
||||
s.mu.Unlock()
|
||||
if deleted > 0 {
|
||||
s.updateMetrics(-deleted)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) updateMetrics(delta float64) {
|
||||
s.nBlobs += delta
|
||||
blobDiskCount.Set(s.nBlobs)
|
||||
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
|
||||
}
|
||||
150
beacon-chain/db/filesystem/cache_test.go
Normal file
150
beacon-chain/db/filesystem/cache_test.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestSlotByRoot_Summary(t *testing.T) {
|
||||
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
|
||||
firstSet[0] = true
|
||||
lastSet[len(lastSet)-1] = true
|
||||
oneSet[1] = true
|
||||
for i := range allSet {
|
||||
allSet[i] = true
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
root [32]byte
|
||||
expected *blobIndexMask
|
||||
}{
|
||||
{
|
||||
name: "not found",
|
||||
},
|
||||
{
|
||||
name: "none set",
|
||||
expected: &noneSet,
|
||||
},
|
||||
{
|
||||
name: "index 1 set",
|
||||
expected: &oneSet,
|
||||
},
|
||||
{
|
||||
name: "all set",
|
||||
expected: &allSet,
|
||||
},
|
||||
{
|
||||
name: "first set",
|
||||
expected: &firstSet,
|
||||
},
|
||||
{
|
||||
name: "last set",
|
||||
expected: &lastSet,
|
||||
},
|
||||
}
|
||||
sc := newBlobStorageCache()
|
||||
for _, c := range cases {
|
||||
if c.expected != nil {
|
||||
key := rootString(bytesutil.ToBytes32([]byte(c.name)))
|
||||
sc.cache[key] = BlobStorageSummary{slot: 0, mask: *c.expected}
|
||||
}
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
key := bytesutil.ToBytes32([]byte(c.name))
|
||||
sum := sc.Summary(key)
|
||||
for i := range c.expected {
|
||||
ui := uint64(i)
|
||||
if c.expected == nil {
|
||||
require.Equal(t, false, sum.HasIndex(ui))
|
||||
} else {
|
||||
require.Equal(t, c.expected[i], sum.HasIndex(ui))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllAvailable(t *testing.T) {
|
||||
idxUpTo := func(u int) []int {
|
||||
r := make([]int, u)
|
||||
for i := range r {
|
||||
r[i] = i
|
||||
}
|
||||
return r
|
||||
}
|
||||
require.DeepEqual(t, []int{}, idxUpTo(0))
|
||||
require.DeepEqual(t, []int{0}, idxUpTo(1))
|
||||
require.DeepEqual(t, []int{0, 1, 2, 3, 4, 5}, idxUpTo(6))
|
||||
cases := []struct {
|
||||
name string
|
||||
idxSet []int
|
||||
count int
|
||||
aa bool
|
||||
}{
|
||||
{
|
||||
// If there are no blobs committed, then all the committed blobs are available.
|
||||
name: "none in idx, 0 arg",
|
||||
count: 0,
|
||||
aa: true,
|
||||
},
|
||||
{
|
||||
name: "none in idx, 1 arg",
|
||||
count: 1,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "first in idx, 1 arg",
|
||||
idxSet: []int{0},
|
||||
count: 1,
|
||||
aa: true,
|
||||
},
|
||||
{
|
||||
name: "second in idx, 1 arg",
|
||||
idxSet: []int{1},
|
||||
count: 1,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "first missing, 2 arg",
|
||||
idxSet: []int{1},
|
||||
count: 2,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "all missing, 1 arg",
|
||||
count: 6,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "out of bound is safe",
|
||||
count: fieldparams.MaxBlobsPerBlock + 1,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "max present",
|
||||
count: fieldparams.MaxBlobsPerBlock,
|
||||
idxSet: idxUpTo(fieldparams.MaxBlobsPerBlock),
|
||||
aa: true,
|
||||
},
|
||||
{
|
||||
name: "one present",
|
||||
count: 1,
|
||||
idxSet: idxUpTo(1),
|
||||
aa: true,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
var mask blobIndexMask
|
||||
for _, idx := range c.idxSet {
|
||||
mask[idx] = true
|
||||
}
|
||||
sum := BlobStorageSummary{mask: mask}
|
||||
require.Equal(t, c.aa, sum.AllAvailable(c.count))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"path"
|
||||
@@ -12,7 +13,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -32,7 +32,8 @@ type blobPruner struct {
|
||||
sync.Mutex
|
||||
prunedBefore atomic.Uint64
|
||||
windowSize primitives.Slot
|
||||
slotMap *slotForRoot
|
||||
cache *blobStorageCache
|
||||
cacheWarmed chan struct{}
|
||||
fs afero.Fs
|
||||
}
|
||||
|
||||
@@ -41,13 +42,14 @@ func newBlobPruner(fs afero.Fs, retain primitives.Epoch) (*blobPruner, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set retentionSlots")
|
||||
}
|
||||
return &blobPruner{fs: fs, windowSize: r, slotMap: newSlotForRoot()}, nil
|
||||
cw := make(chan struct{})
|
||||
return &blobPruner{fs: fs, windowSize: r, cache: newBlobStorageCache(), cacheWarmed: cw}, nil
|
||||
}
|
||||
|
||||
// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache
|
||||
// of root->slot mappings and decide when to evict old blobs based on the age of present blobs.
|
||||
func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) error {
|
||||
if err := p.slotMap.ensure(rootString(root), latest, idx); err != nil {
|
||||
if err := p.cache.ensure(rootString(root), latest, idx); err != nil {
|
||||
return err
|
||||
}
|
||||
pruned := uint64(windowMin(latest, p.windowSize))
|
||||
@@ -62,7 +64,7 @@ func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
|
||||
func windowMin(latest, offset primitives.Slot) primitives.Slot {
|
||||
// Safely compute the first slot in the epoch for the latest slot
|
||||
latest = latest - latest%params.BeaconConfig().SlotsPerEpoch
|
||||
if latest < offset {
|
||||
@@ -71,6 +73,23 @@ func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
|
||||
return latest - offset
|
||||
}
|
||||
|
||||
func (p *blobPruner) warmCache() error {
|
||||
if err := p.prune(0); err != nil {
|
||||
return err
|
||||
}
|
||||
close(p.cacheWarmed)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *blobPruner) waitForCache(ctx context.Context) (*blobStorageCache, error) {
|
||||
select {
|
||||
case <-p.cacheWarmed:
|
||||
return p.cache, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Prune prunes blobs in the base directory based on the retention epoch.
|
||||
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
|
||||
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
|
||||
@@ -122,7 +141,7 @@ func shouldRetain(slot, pruneBefore primitives.Slot) bool {
|
||||
|
||||
func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) {
|
||||
root := rootFromDir(dir)
|
||||
slot, slotCached := p.slotMap.slot(root)
|
||||
slot, slotCached := p.cache.slot(root)
|
||||
// Return early if the slot is cached and doesn't need pruning.
|
||||
if slotCached && shouldRetain(slot, pruneBefore) {
|
||||
return 0, nil
|
||||
@@ -151,7 +170,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "index could not be determined for blob file %s", scFiles[i])
|
||||
}
|
||||
if err := p.slotMap.ensure(root, slot, idx); err != nil {
|
||||
if err := p.cache.ensure(root, slot, idx); err != nil {
|
||||
return 0, errors.Wrapf(err, "could not update prune cache for blob file %s", scFiles[i])
|
||||
}
|
||||
}
|
||||
@@ -179,7 +198,7 @@ func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int,
|
||||
return removed, errors.Wrapf(err, "unable to remove blob directory %s", dir)
|
||||
}
|
||||
|
||||
p.slotMap.evict(rootFromDir(dir))
|
||||
p.cache.evict(rootFromDir(dir))
|
||||
return len(scFiles), nil
|
||||
}
|
||||
|
||||
@@ -269,71 +288,3 @@ func filterSsz(s string) bool {
|
||||
func filterPart(s string) bool {
|
||||
return filepath.Ext(s) == dotPartExt
|
||||
}
|
||||
|
||||
func newSlotForRoot() *slotForRoot {
|
||||
return &slotForRoot{
|
||||
cache: make(map[string]*slotCacheEntry, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
|
||||
}
|
||||
}
|
||||
|
||||
type slotCacheEntry struct {
|
||||
slot primitives.Slot
|
||||
mask [fieldparams.MaxBlobsPerBlock]bool
|
||||
}
|
||||
|
||||
type slotForRoot struct {
|
||||
sync.RWMutex
|
||||
nBlobs float64
|
||||
cache map[string]*slotCacheEntry
|
||||
}
|
||||
|
||||
func (s *slotForRoot) updateMetrics(delta float64) {
|
||||
s.nBlobs += delta
|
||||
blobDiskCount.Set(s.nBlobs)
|
||||
blobDiskSize.Set(s.nBlobs * bytesPerSidecar)
|
||||
}
|
||||
|
||||
func (s *slotForRoot) ensure(key string, slot primitives.Slot, idx uint64) error {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
v, ok := s.cache[key]
|
||||
if !ok {
|
||||
v = &slotCacheEntry{}
|
||||
}
|
||||
v.slot = slot
|
||||
if !v.mask[idx] {
|
||||
s.updateMetrics(1)
|
||||
}
|
||||
v.mask[idx] = true
|
||||
s.cache[key] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *slotForRoot) slot(key string) (primitives.Slot, bool) {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
v, ok := s.cache[key]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return v.slot, ok
|
||||
}
|
||||
|
||||
func (s *slotForRoot) evict(key string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
v, ok := s.cache[key]
|
||||
var deleted float64
|
||||
if ok {
|
||||
for i := range v.mask {
|
||||
if v.mask[i] {
|
||||
deleted += 1
|
||||
}
|
||||
}
|
||||
s.updateMetrics(-deleted)
|
||||
}
|
||||
delete(s.cache, key)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func TestTryPruneDir_CachedNotExpired(t *testing.T) {
|
||||
root := fmt.Sprintf("%#x", sc.BlockRoot())
|
||||
// This slot is right on the edge of what would need to be pruned, so by adding it to the cache and
|
||||
// skipping any other test setup, we can be certain the hot cache path never touches the filesystem.
|
||||
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
|
||||
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(root, pr.windowSize)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
@@ -45,7 +45,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
root := fmt.Sprintf("%#x", sc.BlockRoot())
|
||||
require.NoError(t, fs.Mkdir(root, directoryPermissions)) // make empty directory
|
||||
require.NoError(t, pr.slotMap.ensure(root, sc.Slot(), 0))
|
||||
require.NoError(t, pr.cache.ensure(root, sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(root, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
@@ -63,7 +63,7 @@ func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
|
||||
cs, cok := bs.pruner.slotMap.slot(root)
|
||||
cs, cok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, true, cok)
|
||||
require.Equal(t, slot, cs)
|
||||
|
||||
@@ -95,12 +95,12 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
|
||||
cs, ok := bs.pruner.slotMap.slot(root)
|
||||
cs, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, slot, cs)
|
||||
// evict it from the cache so that we trigger the file read path
|
||||
bs.pruner.slotMap.evict(root)
|
||||
_, ok = bs.pruner.slotMap.slot(root)
|
||||
bs.pruner.cache.evict(root)
|
||||
_, ok = bs.pruner.cache.slot(root)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// ensure that we see the saved files in the filesystem
|
||||
@@ -119,7 +119,7 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
|
||||
require.NoError(t, err)
|
||||
// Set slot equal to the window size, so it should be retained.
|
||||
var slot primitives.Slot = bs.pruner.windowSize
|
||||
slot := bs.pruner.windowSize
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
@@ -129,8 +129,8 @@ func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
|
||||
// Evict slot mapping from the cache so that we trigger the file read path.
|
||||
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
|
||||
bs.pruner.slotMap.evict(root)
|
||||
_, ok := bs.pruner.slotMap.slot(root)
|
||||
bs.pruner.cache.evict(root)
|
||||
_, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// Ensure that we see the saved files in the filesystem.
|
||||
@@ -243,10 +243,8 @@ func TestListDir(t *testing.T) {
|
||||
}
|
||||
blobWithSszAndTmp := dirFiles{name: "0x1234567890", isDir: true,
|
||||
children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}}
|
||||
fsLayout.children = append(fsLayout.children, notABlob)
|
||||
fsLayout.children = append(fsLayout.children, childlessBlob)
|
||||
fsLayout.children = append(fsLayout.children, blobWithSsz)
|
||||
fsLayout.children = append(fsLayout.children, blobWithSszAndTmp)
|
||||
fsLayout.children = append(fsLayout.children,
|
||||
notABlob, childlessBlob, blobWithSsz, blobWithSszAndTmp)
|
||||
|
||||
topChildren := make([]string, len(fsLayout.children))
|
||||
for i := range fsLayout.children {
|
||||
@@ -282,10 +280,7 @@ func TestListDir(t *testing.T) {
|
||||
dirPath: ".",
|
||||
expected: []string{notABlob.name},
|
||||
filter: func(s string) bool {
|
||||
if s == notABlob.name {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return s == notABlob.name
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -118,7 +118,7 @@ type HeadAccessDatabase interface {
|
||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||
type SlasherDatabase interface {
|
||||
io.Closer
|
||||
SaveLastEpochsWrittenForValidators(
|
||||
SaveLastEpochWrittenForValidators(
|
||||
ctx context.Context, epochByValidator map[primitives.ValidatorIndex]primitives.Epoch,
|
||||
) error
|
||||
SaveAttestationRecordsForValidators(
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"migration.go",
|
||||
"migration_archived_index.go",
|
||||
"migration_block_slot_index.go",
|
||||
"migration_finalized_parent.go",
|
||||
"migration_state_validators.go",
|
||||
"schema.go",
|
||||
"state.go",
|
||||
|
||||
@@ -14,6 +14,7 @@ var migrations = []migration{
|
||||
migrateArchivedIndex,
|
||||
migrateBlockSlotIndex,
|
||||
migrateStateValidators,
|
||||
migrateFinalizedParent,
|
||||
}
|
||||
|
||||
// RunMigrations defined in the migrations array.
|
||||
|
||||
87
beacon-chain/db/kv/migration_finalized_parent.go
Normal file
87
beacon-chain/db/kv/migration_finalized_parent.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var migrationFinalizedParent = []byte("parent_bug_32fb183")
|
||||
|
||||
func migrateFinalizedParent(ctx context.Context, db *bolt.DB) error {
|
||||
if updateErr := db.Update(func(tx *bolt.Tx) error {
|
||||
mb := tx.Bucket(migrationsBucket)
|
||||
if b := mb.Get(migrationFinalizedParent); bytes.Equal(b, migrationCompleted) {
|
||||
return nil // Migration already completed.
|
||||
}
|
||||
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
if bkt == nil {
|
||||
return fmt.Errorf("unable to read %s bucket for migration", finalizedBlockRootsIndexBucket)
|
||||
}
|
||||
bb := tx.Bucket(blocksBucket)
|
||||
if bb == nil {
|
||||
return fmt.Errorf("unable to read %s bucket for migration", blocksBucket)
|
||||
}
|
||||
|
||||
c := bkt.Cursor()
|
||||
var slotsWithoutBug primitives.Slot
|
||||
maxBugSearch := params.BeaconConfig().SlotsPerEpoch * 10
|
||||
for k, v := c.Last(); k != nil; k, v = c.Prev() {
|
||||
// check if context is cancelled in between
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
idxEntry := ðpb.FinalizedBlockRootContainer{}
|
||||
if err := decode(ctx, v, idxEntry); err != nil {
|
||||
return errors.Wrapf(err, "unable to decode finalized block root container for root=%#x", k)
|
||||
}
|
||||
// Not one of the corrupt values
|
||||
if !bytes.Equal(idxEntry.ParentRoot, k) {
|
||||
slotsWithoutBug += 1
|
||||
if slotsWithoutBug > maxBugSearch {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
slotsWithoutBug = 0
|
||||
log.WithField("root", fmt.Sprintf("%#x", k)).Debug("found index entry with incorrect parent root")
|
||||
|
||||
// Look up full block to get the correct parent root.
|
||||
encBlk := bb.Get(k)
|
||||
if encBlk == nil {
|
||||
return errors.Wrapf(ErrNotFound, "could not find block for corrupt finalized index entry %#x", k)
|
||||
}
|
||||
blk, err := unmarshalBlock(ctx, encBlk)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to decode block for root=%#x", k)
|
||||
}
|
||||
// Replace parent root in the index with the correct value and write it back.
|
||||
pr := blk.Block().ParentRoot()
|
||||
idxEntry.ParentRoot = pr[:]
|
||||
idxEnc, err := encode(ctx, idxEntry)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to encode finalized index entry for root=%#x", k)
|
||||
}
|
||||
if err := bkt.Put(k, idxEnc); err != nil {
|
||||
return errors.Wrapf(err, "failed to update finalized index entry for root=%#x", k)
|
||||
}
|
||||
log.WithField("root", fmt.Sprintf("%#x", k)).
|
||||
WithField("parentRoot", fmt.Sprintf("%#x", idxEntry.ParentRoot)).
|
||||
Debug("updated corrupt index entry with correct parent")
|
||||
}
|
||||
// Mark migration complete.
|
||||
return mb.Put(migrationFinalizedParent, migrationCompleted)
|
||||
}); updateErr != nil {
|
||||
log.WithError(updateErr).Errorf("could not run finalized parent root index repair migration")
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -70,12 +70,12 @@ func (s *Store) LastEpochWrittenForValidators(
|
||||
return attestedEpochs, err
|
||||
}
|
||||
|
||||
// SaveLastEpochsWrittenForValidators updates the latest epoch a slice
|
||||
// of validator indices has attested to.
|
||||
func (s *Store) SaveLastEpochsWrittenForValidators(
|
||||
// SaveLastEpochWrittenForValidators saves the latest epoch
|
||||
// that each validator has attested to in the provided map.
|
||||
func (s *Store) SaveLastEpochWrittenForValidators(
|
||||
ctx context.Context, epochByValIndex map[primitives.ValidatorIndex]primitives.Epoch,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochsWrittenForValidators")
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveLastEpochWrittenForValidators")
|
||||
defer span.End()
|
||||
|
||||
const batchSize = 10000
|
||||
@@ -157,7 +157,7 @@ func (s *Store) CheckAttesterDoubleVotes(
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
|
||||
encEpoch := encodeTargetEpoch(attToProcess.IndexedAttestation.Data.Target.Epoch)
|
||||
localDoubleVotes := []*slashertypes.AttesterDoubleVote{}
|
||||
localDoubleVotes := make([]*slashertypes.AttesterDoubleVote, 0)
|
||||
|
||||
for _, valIdx := range attToProcess.IndexedAttestation.AttestingIndices {
|
||||
// Check if there is signing root in the database for this combination
|
||||
@@ -166,7 +166,7 @@ func (s *Store) CheckAttesterDoubleVotes(
|
||||
validatorEpochKey := append(encEpoch, encIdx...)
|
||||
attRecordsKey := signingRootsBkt.Get(validatorEpochKey)
|
||||
|
||||
// An attestation record key is comprised of a signing root (32 bytes).
|
||||
// An attestation record key consists of a signing root (32 bytes).
|
||||
if len(attRecordsKey) < attestationRecordKeySize {
|
||||
// If there is no signing root for this combination,
|
||||
// then there is no double vote. We can continue to the next validator.
|
||||
@@ -697,7 +697,7 @@ func decodeSlasherChunk(enc []byte) ([]uint16, error) {
|
||||
}
|
||||
|
||||
// Encode attestation record to bytes.
|
||||
// The output encoded attestation record consists in the signing root concatened with the compressed attestation record.
|
||||
// The output encoded attestation record consists in the signing root concatenated with the compressed attestation record.
|
||||
func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byte, error) {
|
||||
if att == nil || att.IndexedAttestation == nil {
|
||||
return []byte{}, errors.New("nil proposal record")
|
||||
@@ -716,7 +716,7 @@ func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byt
|
||||
}
|
||||
|
||||
// Decode attestation record from bytes.
|
||||
// The input encoded attestation record consists in the signing root concatened with the compressed attestation record.
|
||||
// The input encoded attestation record consists in the signing root concatenated with the compressed attestation record.
|
||||
func decodeAttestationRecord(encoded []byte) (*slashertypes.IndexedAttestationWrapper, error) {
|
||||
if len(encoded) < rootSize {
|
||||
return nil, fmt.Errorf("wrong length for encoded attestation record, want minimum %d, got %d", rootSize, len(encoded))
|
||||
|
||||
@@ -89,7 +89,7 @@ func TestStore_LastEpochWrittenForValidators(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(attestedEpochs))
|
||||
|
||||
err = beaconDB.SaveLastEpochsWrittenForValidators(ctx, epochsByValidator)
|
||||
err = beaconDB.SaveLastEpochWrittenForValidators(ctx, epochsByValidator)
|
||||
require.NoError(t, err)
|
||||
|
||||
retrievedEpochs, err := beaconDB.LastEpochWrittenForValidators(ctx, indices)
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
// TestCleanup ensures that the cleanup function unregisters the prometheus.Collection
|
||||
// also tests the interchangability of the explicit prometheus Register/Unregister
|
||||
// also tests the interchangeability of the explicit prometheus Register/Unregister
|
||||
// and the implicit methods within the collector implementation
|
||||
func TestCleanup(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
@@ -32,11 +32,11 @@ func TestCleanup(t *testing.T) {
|
||||
assert.Equal(t, true, unregistered, "prometheus.Unregister failed to unregister PowchainCollector on final cleanup")
|
||||
}
|
||||
|
||||
// TestCancelation tests that canceling the context passed into
|
||||
// TestCancellation tests that canceling the context passed into
|
||||
// NewPowchainCollector cleans everything up as expected. This
|
||||
// does come at the cost of an extra channel cluttering up
|
||||
// PowchainCollector, just for this test.
|
||||
func TestCancelation(t *testing.T) {
|
||||
func TestCancellation(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
pc, err := NewPowchainCollector(ctx)
|
||||
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")
|
||||
|
||||
@@ -707,6 +707,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
|
||||
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
|
||||
@@ -90,6 +90,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peerstore:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_mplex//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
|
||||
@@ -24,6 +24,7 @@ type Config struct {
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
MetaDataDir string
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
MaxPeers uint
|
||||
|
||||
@@ -39,6 +39,11 @@ const (
|
||||
udp6
|
||||
)
|
||||
|
||||
type quicProtocol uint16
|
||||
|
||||
// quicProtocol is the "quic" key, which holds the QUIC port of the node.
|
||||
func (quicProtocol) ENRKey() string { return "quic" }
|
||||
|
||||
// RefreshENR uses an epoch to refresh the enr entry for our node
|
||||
// with the tracked committee ids for the epoch, allowing our node
|
||||
// to be dynamically discoverable by others given our tracked committee ids.
|
||||
@@ -100,14 +105,15 @@ func (s *Service) RefreshENR() {
|
||||
|
||||
// listen for new nodes watches for new nodes in the network and adds them to the peerstore.
|
||||
func (s *Service) listenForNewNodes() {
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
iterator = enode.Filter(iterator, s.filterPeer)
|
||||
iterator := enode.Filter(s.dv5Listener.RandomNodes(), s.filterPeer)
|
||||
defer iterator.Close()
|
||||
|
||||
for {
|
||||
// Exit if service's context is canceled
|
||||
// Exit if service's context is canceled.
|
||||
if s.ctx.Err() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if s.isPeerAtLimit(false /* inbound */) {
|
||||
// Pause the main loop for a period to stop looking
|
||||
// for new peers.
|
||||
@@ -115,16 +121,22 @@ func (s *Service) listenForNewNodes() {
|
||||
time.Sleep(pollingPeriod)
|
||||
continue
|
||||
}
|
||||
exists := iterator.Next()
|
||||
if !exists {
|
||||
|
||||
if exists := iterator.Next(); !exists {
|
||||
break
|
||||
}
|
||||
|
||||
node := iterator.Node()
|
||||
peerInfo, _, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to peer info")
|
||||
continue
|
||||
}
|
||||
|
||||
if peerInfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Make sure that peer is not dialed too often, for each connection attempt there's a backoff period.
|
||||
s.Peers().RandomizeBackOff(peerInfo.ID)
|
||||
go func(info *peer.AddrInfo) {
|
||||
@@ -167,8 +179,7 @@ func (s *Service) createListener(
|
||||
|
||||
// Listen to all network interfaces
|
||||
// for both ip protocols.
|
||||
networkVersion := "udp"
|
||||
conn, err := net.ListenUDP(networkVersion, udpAddr)
|
||||
conn, err := net.ListenUDP("udp", udpAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not listen to UDP")
|
||||
}
|
||||
@@ -178,6 +189,7 @@ func (s *Service) createListener(
|
||||
ipAddr,
|
||||
int(s.cfg.UDPPort),
|
||||
int(s.cfg.TCPPort),
|
||||
int(s.cfg.QUICPort),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create local node")
|
||||
@@ -209,7 +221,7 @@ func (s *Service) createListener(
|
||||
func (s *Service) createLocalNode(
|
||||
privKey *ecdsa.PrivateKey,
|
||||
ipAddr net.IP,
|
||||
udpPort, tcpPort int,
|
||||
udpPort, tcpPort, quicPort int,
|
||||
) (*enode.LocalNode, error) {
|
||||
db, err := enode.OpenDB("")
|
||||
if err != nil {
|
||||
@@ -220,9 +232,13 @@ func (s *Service) createLocalNode(
|
||||
ipEntry := enr.IP(ipAddr)
|
||||
udpEntry := enr.UDP(udpPort)
|
||||
tcpEntry := enr.TCP(tcpPort)
|
||||
quicEntry := quicProtocol(quicPort)
|
||||
|
||||
localNode.Set(ipEntry)
|
||||
localNode.Set(udpEntry)
|
||||
localNode.Set(tcpEntry)
|
||||
localNode.Set(quicEntry)
|
||||
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
@@ -277,7 +293,7 @@ func (s *Service) startDiscoveryV5(
|
||||
// filterPeer validates each node that we retrieve from our dht. We
|
||||
// try to ascertain that the peer can be a valid protocol peer.
|
||||
// Validity Conditions:
|
||||
// 1. Peer has a valid IP and TCP port set in their enr.
|
||||
// 1. Peer has a valid IP and a (QUIC and/or TCP) port set in their enr.
|
||||
// 2. Peer hasn't been marked as 'bad'.
|
||||
// 3. Peer is not currently active or connected.
|
||||
// 4. Peer is ready to receive incoming connections.
|
||||
@@ -294,17 +310,13 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ignore nodes with their TCP ports not set.
|
||||
if err := node.Record().Load(enr.WithEntry("tcp", new(enr.TCP))); err != nil {
|
||||
if !enr.IsNotFound(err) {
|
||||
log.WithError(err).Debug("Could not retrieve tcp port")
|
||||
}
|
||||
peerData, multiAddrs, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not convert to peer data")
|
||||
return false
|
||||
}
|
||||
|
||||
peerData, multiAddr, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not convert to peer data")
|
||||
if len(multiAddrs) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -337,6 +349,9 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// If the peer has 2 multiaddrs, favor the QUIC address, which is in first position.
|
||||
multiAddr := multiAddrs[0]
|
||||
|
||||
// Add peer to peer handler.
|
||||
s.peers.Add(nodeENR, peerData.ID, multiAddr, network.DirUnknown)
|
||||
|
||||
@@ -380,11 +395,11 @@ func PeersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get enode from string")
|
||||
}
|
||||
addr, err := convertToSingleMultiAddr(enodeAddr)
|
||||
nodeAddrs, err := convertToMultiAddrs(enodeAddr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get multiaddr")
|
||||
}
|
||||
allAddrs = append(allAddrs, addr)
|
||||
allAddrs = append(allAddrs, nodeAddrs...)
|
||||
}
|
||||
return allAddrs, nil
|
||||
}
|
||||
@@ -419,45 +434,141 @@ func parseGenericAddrs(addrs []string) (enodeString, multiAddrString []string) {
|
||||
}
|
||||
|
||||
func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
|
||||
var multiAddrs []ma.Multiaddr
|
||||
// Expect each node to have a TCP and a QUIC address.
|
||||
multiAddrs := make([]ma.Multiaddr, 0, 2*len(nodes))
|
||||
|
||||
for _, node := range nodes {
|
||||
// ignore nodes with no ip address stored
|
||||
// Skip nodes with no ip address stored.
|
||||
if node.IP() == nil {
|
||||
continue
|
||||
}
|
||||
multiAddr, err := convertToSingleMultiAddr(node)
|
||||
|
||||
// Get up to two multiaddrs (TCP and QUIC) for each node.
|
||||
nodeMultiAddrs, err := convertToMultiAddrs(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to multiAddr")
|
||||
log.WithError(err).Errorf("Could not convert to multiAddr node %s", node)
|
||||
continue
|
||||
}
|
||||
multiAddrs = append(multiAddrs, multiAddr)
|
||||
|
||||
multiAddrs = append(multiAddrs, nodeMultiAddrs...)
|
||||
}
|
||||
|
||||
return multiAddrs
|
||||
}
|
||||
|
||||
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, ma.Multiaddr, error) {
|
||||
multiAddr, err := convertToSingleMultiAddr(node)
|
||||
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
|
||||
multiAddrs, err := convertToMultiAddrs(node)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
info, err := peer.AddrInfoFromP2pAddr(multiAddr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
if len(multiAddrs) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
return info, multiAddr, nil
|
||||
|
||||
infos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "could not convert to peer info: %v", multiAddrs)
|
||||
}
|
||||
|
||||
if len(infos) > 1 {
|
||||
return nil, nil, errors.Errorf("infos contains %v elements, expected not more than 1", len(infos))
|
||||
}
|
||||
|
||||
if len(infos) == 0 {
|
||||
return nil, multiAddrs, nil
|
||||
}
|
||||
|
||||
return &infos[0], multiAddrs, nil
|
||||
}
|
||||
|
||||
func convertToSingleMultiAddr(node *enode.Node) (ma.Multiaddr, error) {
|
||||
// convertToMultiAddrs converts an enode.Node to a list of multiaddrs.
|
||||
// If the node has a both a QUIC and a TCP port set in their ENR, then
|
||||
// the multiaddr corresponding to the QUIC port is added first, followed
|
||||
// by the multiaddr corresponding to the TCP port.
|
||||
func convertToMultiAddrs(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
multiaddrs := make([]ma.Multiaddr, 0, 2)
|
||||
|
||||
// Retrieve the node public key.
|
||||
pubkey := node.Pubkey()
|
||||
assertedKey, err := ecdsaprysm.ConvertToInterfacePubkey(pubkey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get pubkey")
|
||||
}
|
||||
|
||||
// Compute the node ID from the public key.
|
||||
id, err := peer.IDFromPublicKey(assertedKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get peer id")
|
||||
}
|
||||
return multiAddressBuilderWithID(node.IP().String(), "tcp", uint(node.TCP()), id)
|
||||
|
||||
// If the QUIC entry is present in the ENR, build the corresponding multiaddress.
|
||||
port, ok, err := getPort(node, quic)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get QUIC port")
|
||||
}
|
||||
|
||||
if ok {
|
||||
addr, err := multiAddressBuilderWithID(node.IP(), quic, port, id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not build QUIC address")
|
||||
}
|
||||
|
||||
multiaddrs = append(multiaddrs, addr)
|
||||
}
|
||||
|
||||
// If the TCP entry is present in the ENR, build the corresponding multiaddress.
|
||||
port, ok, err = getPort(node, tcp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get TCP port")
|
||||
}
|
||||
|
||||
if ok {
|
||||
addr, err := multiAddressBuilderWithID(node.IP(), tcp, port, id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not build TCP address")
|
||||
}
|
||||
|
||||
multiaddrs = append(multiaddrs, addr)
|
||||
}
|
||||
|
||||
return multiaddrs, nil
|
||||
}
|
||||
|
||||
// getPort retrieves the port for a given node and protocol, as well as a boolean
|
||||
// indicating whether the port was found, and an error
|
||||
func getPort(node *enode.Node, protocol internetProtocol) (uint, bool, error) {
|
||||
var (
|
||||
port uint
|
||||
err error
|
||||
)
|
||||
|
||||
switch protocol {
|
||||
case tcp:
|
||||
var entry enr.TCP
|
||||
err = node.Load(&entry)
|
||||
port = uint(entry)
|
||||
case udp:
|
||||
var entry enr.UDP
|
||||
err = node.Load(&entry)
|
||||
port = uint(entry)
|
||||
case quic:
|
||||
var entry quicProtocol
|
||||
err = node.Load(&entry)
|
||||
port = uint(entry)
|
||||
default:
|
||||
return 0, false, errors.Errorf("invalid protocol: %v", protocol)
|
||||
}
|
||||
|
||||
if enr.IsNotFound(err) {
|
||||
return port, false, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, false, errors.Wrap(err, "could not get port")
|
||||
}
|
||||
|
||||
return port, true, nil
|
||||
}
|
||||
|
||||
func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
@@ -475,14 +586,14 @@ func convertToUdpMultiAddr(node *enode.Node) ([]ma.Multiaddr, error) {
|
||||
var ip4 enr.IPv4
|
||||
var ip6 enr.IPv6
|
||||
if node.Load(&ip4) == nil {
|
||||
address, ipErr := multiAddressBuilderWithID(net.IP(ip4).String(), "udp", uint(node.UDP()), id)
|
||||
address, ipErr := multiAddressBuilderWithID(net.IP(ip4), udp, uint(node.UDP()), id)
|
||||
if ipErr != nil {
|
||||
return nil, errors.Wrap(ipErr, "could not build IPv4 address")
|
||||
}
|
||||
addresses = append(addresses, address)
|
||||
}
|
||||
if node.Load(&ip6) == nil {
|
||||
address, ipErr := multiAddressBuilderWithID(net.IP(ip6).String(), "udp", uint(node.UDP()), id)
|
||||
address, ipErr := multiAddressBuilderWithID(net.IP(ip6), udp, uint(node.UDP()), id)
|
||||
if ipErr != nil {
|
||||
return nil, errors.Wrap(ipErr, "could not build IPv6 address")
|
||||
}
|
||||
|
||||
@@ -166,8 +166,9 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Define ports.
|
||||
const (
|
||||
udpPort = 2000
|
||||
tcpPort = 3000
|
||||
udpPort = 2000
|
||||
tcpPort = 3000
|
||||
quicPort = 3000
|
||||
)
|
||||
|
||||
// Create a private key.
|
||||
@@ -180,7 +181,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
cfg: tt.cfg,
|
||||
}
|
||||
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort)
|
||||
localNode, err := service.createLocalNode(privKey, address, udpPort, tcpPort, quicPort)
|
||||
if tt.expectedError {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
@@ -237,7 +238,7 @@ func TestMultiAddrsConversion_InvalidIPAddr(t *testing.T) {
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
}
|
||||
node, err := s.createLocalNode(pkey, addr, 0, 0)
|
||||
node, err := s.createLocalNode(pkey, addr, 0, 0, 0)
|
||||
require.NoError(t, err)
|
||||
multiAddr := convertToMultiAddr([]*enode.Node{node.Node()})
|
||||
assert.Equal(t, 0, len(multiAddr), "Invalid ip address converted successfully")
|
||||
@@ -248,8 +249,9 @@ func TestMultiAddrConversion_OK(t *testing.T) {
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
TCPPort: 0,
|
||||
UDPPort: 0,
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
|
||||
@@ -28,7 +28,8 @@ import (
|
||||
)
|
||||
|
||||
func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
port := 2000
|
||||
const port = 2000
|
||||
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
|
||||
@@ -53,7 +54,7 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
|
||||
var listeners []*discover.UDPv5
|
||||
for i := 1; i <= 5; i++ {
|
||||
port = 3000 + i
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
|
||||
@@ -98,13 +99,14 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
var addrs []ma.Multiaddr
|
||||
|
||||
for _, n := range nodes {
|
||||
if s.filterPeer(n) {
|
||||
addr, err := convertToSingleMultiAddr(n)
|
||||
addrs := make([]ma.Multiaddr, 0)
|
||||
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := convertToMultiAddrs(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, addr)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,10 +116,11 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
const port = 2000
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
logrus.SetLevel(logrus.TraceLevel)
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
@@ -138,7 +141,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
|
||||
var listeners []*discover.UDPv5
|
||||
for i := 1; i <= 5; i++ {
|
||||
port = 3000 + i
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
|
||||
@@ -188,13 +191,13 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
var addrs []ma.Multiaddr
|
||||
addrs := make([]ma.Multiaddr, 0, len(nodes))
|
||||
|
||||
for _, n := range nodes {
|
||||
if s.filterPeer(n) {
|
||||
addr, err := convertToSingleMultiAddr(n)
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := convertToMultiAddrs(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, addr)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -12,32 +13,32 @@ import (
|
||||
var log = logrus.WithField("prefix", "p2p")
|
||||
|
||||
func logIPAddr(id peer.ID, addrs ...ma.Multiaddr) {
|
||||
var correctAddr ma.Multiaddr
|
||||
for _, addr := range addrs {
|
||||
if strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/") {
|
||||
correctAddr = addr
|
||||
break
|
||||
if !(strings.Contains(addr.String(), "/ip4/") || strings.Contains(addr.String(), "/ip6/")) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if correctAddr != nil {
|
||||
|
||||
log.WithField(
|
||||
"multiAddr",
|
||||
correctAddr.String()+"/p2p/"+id.String(),
|
||||
addr.String()+"/p2p/"+id.String(),
|
||||
).Info("Node started p2p server")
|
||||
}
|
||||
}
|
||||
|
||||
func logExternalIPAddr(id peer.ID, addr string, port uint) {
|
||||
func logExternalIPAddr(id peer.ID, addr string, tcpPort, quicPort uint) {
|
||||
if addr != "" {
|
||||
multiAddr, err := MultiAddressBuilder(addr, port)
|
||||
multiAddrs, err := MultiAddressBuilder(net.ParseIP(addr), tcpPort, quicPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not create multiaddress")
|
||||
return
|
||||
}
|
||||
log.WithField(
|
||||
"multiAddr",
|
||||
multiAddr.String()+"/p2p/"+id.String(),
|
||||
).Info("Node started external p2p server")
|
||||
|
||||
for _, multiAddr := range multiAddrs {
|
||||
log.WithField(
|
||||
"multiAddr",
|
||||
multiAddr.String()+"/p2p/"+id.String(),
|
||||
).Info("Node started external p2p server")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,40 +11,62 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
||||
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
gomplex "github.com/libp2p/go-mplex"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
type internetProtocol string
|
||||
|
||||
const (
|
||||
udp = "udp"
|
||||
tcp = "tcp"
|
||||
quic = "quic"
|
||||
)
|
||||
|
||||
// MultiAddressBuilder takes in an ip address string and port to produce a go multiaddr format.
|
||||
func MultiAddressBuilder(ipAddr string, port uint) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
|
||||
func MultiAddressBuilder(ip net.IP, tcpPort, quicPort uint) ([]ma.Multiaddr, error) {
|
||||
ipType, err := extractIpType(ip)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to determine IP type")
|
||||
}
|
||||
if parsedIP.To4() != nil {
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
||||
|
||||
// Example: /ip4/1.2.3.4/udp/5678/quic-v1
|
||||
multiAddrQUIC, err := ma.NewMultiaddr(fmt.Sprintf("/%s/%s/udp/%d/quic-v1", ipType, ip, quicPort))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot produce QUIC multiaddr format from %s:%d", ip, tcpPort)
|
||||
}
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/%d", ipAddr, port))
|
||||
|
||||
// Example: /ip4/1.2.3.4./tcp/5678
|
||||
multiaddrStr := fmt.Sprintf("/%s/%s/tcp/%d", ipType, ip, tcpPort)
|
||||
multiAddrTCP, err := ma.NewMultiaddr(multiaddrStr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot produce TCP multiaddr format from %s:%d", ip, tcpPort)
|
||||
}
|
||||
|
||||
return []ma.Multiaddr{multiAddrTCP, multiAddrQUIC}, nil
|
||||
}
|
||||
|
||||
// buildOptions for the libp2p host.
|
||||
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Option, error) {
|
||||
cfg := s.cfg
|
||||
listen, err := MultiAddressBuilder(ip.String(), cfg.TCPPort)
|
||||
multiaddrs, err := MultiAddressBuilder(ip, cfg.TCPPort, cfg.QUICPort)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip.String(), cfg.TCPPort)
|
||||
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", ip, cfg.TCPPort)
|
||||
}
|
||||
if cfg.LocalIP != "" {
|
||||
if net.ParseIP(cfg.LocalIP) == nil {
|
||||
localIP := net.ParseIP(cfg.LocalIP)
|
||||
if localIP == nil {
|
||||
return nil, errors.Wrapf(err, "invalid local ip provided: %s:%d", cfg.LocalIP, cfg.TCPPort)
|
||||
}
|
||||
|
||||
listen, err = MultiAddressBuilder(cfg.LocalIP, cfg.TCPPort)
|
||||
multiaddrs, err = MultiAddressBuilder(localIP, cfg.TCPPort, cfg.QUICPort)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot produce multiaddr format from %s:%d", cfg.LocalIP, cfg.TCPPort)
|
||||
}
|
||||
@@ -58,14 +80,15 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
return nil, errors.Wrapf(err, "cannot get ID from public key: %s", ifaceKey.GetPublic().Type().String())
|
||||
}
|
||||
|
||||
log.Infof("Running node with peer id of %s ", id.String())
|
||||
log.WithField("peerId", id).Info("Running node with")
|
||||
|
||||
options := []libp2p.Option{
|
||||
privKeyOption(priKey),
|
||||
libp2p.ListenAddrs(listen),
|
||||
libp2p.ListenAddrs(multiaddrs...),
|
||||
libp2p.UserAgent(version.BuildData()),
|
||||
libp2p.ConnectionGater(s),
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.Transport(libp2pquic.NewTransport),
|
||||
libp2p.Transport(libp2ptcp.NewTCPTransport),
|
||||
libp2p.DefaultMuxers,
|
||||
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
|
||||
libp2p.Security(noise.ID, noise.New),
|
||||
@@ -75,23 +98,26 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
if cfg.EnableUPnP {
|
||||
options = append(options, libp2p.NATPortMap()) // Allow to use UPnP
|
||||
}
|
||||
|
||||
if cfg.RelayNodeAddr != "" {
|
||||
options = append(options, libp2p.AddrsFactory(withRelayAddrs(cfg.RelayNodeAddr)))
|
||||
} else {
|
||||
// Disable relay if it has not been set.
|
||||
options = append(options, libp2p.DisableRelay())
|
||||
}
|
||||
|
||||
if cfg.HostAddress != "" {
|
||||
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
external, err := MultiAddressBuilder(cfg.HostAddress, cfg.TCPPort)
|
||||
externalMultiaddrs, err := MultiAddressBuilder(net.ParseIP(cfg.HostAddress), cfg.TCPPort, cfg.QUICPort)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Unable to create external multiaddress")
|
||||
} else {
|
||||
addrs = append(addrs, external)
|
||||
addrs = append(addrs, externalMultiaddrs...)
|
||||
}
|
||||
return addrs
|
||||
}))
|
||||
}
|
||||
|
||||
if cfg.HostDNS != "" {
|
||||
options = append(options, libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
|
||||
external, err := ma.NewMultiaddr(fmt.Sprintf("/dns4/%s/tcp/%d", cfg.HostDNS, cfg.TCPPort))
|
||||
@@ -107,21 +133,47 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
if features.Get().DisableResourceManager {
|
||||
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
func multiAddressBuilderWithID(ipAddr, protocol string, port uint, id peer.ID) (ma.Multiaddr, error) {
|
||||
parsedIP := net.ParseIP(ipAddr)
|
||||
if parsedIP.To4() == nil && parsedIP.To16() == nil {
|
||||
return nil, errors.Errorf("invalid ip address provided: %s", ipAddr)
|
||||
func extractIpType(ip net.IP) (string, error) {
|
||||
if ip.To4() != nil {
|
||||
return "ip4", nil
|
||||
}
|
||||
if id.String() == "" {
|
||||
return nil, errors.New("empty peer id given")
|
||||
|
||||
if ip.To16() != nil {
|
||||
return "ip6", nil
|
||||
}
|
||||
if parsedIP.To4() != nil {
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
|
||||
|
||||
return "", errors.Errorf("provided IP address is neither IPv4 nor IPv6: %s", ip)
|
||||
}
|
||||
|
||||
func multiAddressBuilderWithID(ip net.IP, protocol internetProtocol, port uint, id peer.ID) (ma.Multiaddr, error) {
|
||||
var multiaddrStr string
|
||||
|
||||
if id == "" {
|
||||
return nil, errors.Errorf("empty peer id given: %s", id)
|
||||
}
|
||||
return ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/%s/%d/p2p/%s", ipAddr, protocol, port, id.String()))
|
||||
|
||||
ipType, err := extractIpType(ip)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to determine IP type")
|
||||
}
|
||||
|
||||
switch protocol {
|
||||
case udp, tcp:
|
||||
// Example with UDP: /ip4/1.2.3.4/udp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
|
||||
// Example with TCP: /ip6/1.2.3.4/tcp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
|
||||
multiaddrStr = fmt.Sprintf("/%s/%s/%s/%d/p2p/%s", ipType, ip, protocol, port, id)
|
||||
case quic:
|
||||
// Example: /ip4/1.2.3.4/udp/5678/quic-v1/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs
|
||||
multiaddrStr = fmt.Sprintf("/%s/%s/udp/%d/quic-v1/p2p/%s", ipType, ip, port, id)
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported protocol: %s", protocol)
|
||||
}
|
||||
|
||||
return ma.NewMultiaddr(multiaddrStr)
|
||||
}
|
||||
|
||||
// Adds a private key to the libp2p option if the option was provided.
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -88,30 +89,34 @@ func TestIPV6Support(t *testing.T) {
|
||||
lNode := enode.NewLocalNode(db, key)
|
||||
mockIPV6 := net.IP{0xff, 0x02, 0xAA, 0, 0x1F, 0, 0x2E, 0, 0, 0x36, 0x45, 0, 0, 0, 0, 0x02}
|
||||
lNode.Set(enr.IP(mockIPV6))
|
||||
ma, err := convertToSingleMultiAddr(lNode.Node())
|
||||
mas, err := convertToMultiAddrs(lNode.Node())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ipv6Exists := false
|
||||
for _, p := range ma.Protocols() {
|
||||
if p.Name == "ip4" {
|
||||
t.Error("Got ip4 address instead of ip6")
|
||||
|
||||
for _, ma := range mas {
|
||||
ipv6Exists := false
|
||||
for _, p := range ma.Protocols() {
|
||||
if p.Name == "ip4" {
|
||||
t.Error("Got ip4 address instead of ip6")
|
||||
}
|
||||
if p.Name == "ip6" {
|
||||
ipv6Exists = true
|
||||
}
|
||||
}
|
||||
if p.Name == "ip6" {
|
||||
ipv6Exists = true
|
||||
if !ipv6Exists {
|
||||
t.Error("Multiaddress did not have ipv6 protocol")
|
||||
}
|
||||
}
|
||||
if !ipv6Exists {
|
||||
t.Error("Multiaddress did not have ipv6 protocol")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultMultiplexers(t *testing.T) {
|
||||
var cfg libp2p.Config
|
||||
_ = cfg
|
||||
p2pCfg := &Config{
|
||||
TCPPort: 2000,
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
}
|
||||
svc := &Service{cfg: p2pCfg}
|
||||
@@ -127,5 +132,57 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
|
||||
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[0].ID)
|
||||
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[1].ID)
|
||||
|
||||
}
|
||||
|
||||
func TestMultiAddressBuilderWithID(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
ip net.IP
|
||||
protocol internetProtocol
|
||||
port uint
|
||||
id string
|
||||
|
||||
expectedMultiaddrStr string
|
||||
}{
|
||||
{
|
||||
name: "UDP",
|
||||
ip: net.IPv4(192, 168, 0, 1),
|
||||
protocol: udp,
|
||||
port: 5678,
|
||||
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
|
||||
|
||||
expectedMultiaddrStr: "/ip4/192.168.0.1/udp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
|
||||
},
|
||||
{
|
||||
name: "TCP",
|
||||
ip: net.IPv4(192, 168, 0, 1),
|
||||
protocol: tcp,
|
||||
port: 5678,
|
||||
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
|
||||
|
||||
expectedMultiaddrStr: "/ip4/192.168.0.1/tcp/5678/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
|
||||
},
|
||||
{
|
||||
name: "QUIC",
|
||||
ip: net.IPv4(192, 168, 0, 1),
|
||||
protocol: quic,
|
||||
port: 5678,
|
||||
id: "0025080212210204fb1ebb1aa467527d34306a4794a5171d6516405e720b909b7f816d63aef96a",
|
||||
|
||||
expectedMultiaddrStr: "/ip4/192.168.0.1/udp/5678/quic-v1/p2p/16Uiu2HAkum7hhuMpWqFj3yNLcmQBGmThmqw2ohaCRThXQuKU9ohs",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
id, err := hex.DecodeString(tt.id)
|
||||
require.NoError(t, err)
|
||||
|
||||
actualMultiaddr, err := multiAddressBuilderWithID(tt.ip, tt.protocol, tt.port, peer.ID(id))
|
||||
require.NoError(t, err)
|
||||
|
||||
actualMultiaddrStr := actualMultiaddr.String()
|
||||
require.Equal(t, tt.expectedMultiaddrStr, actualMultiaddrStr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func TestGossipParameters(t *testing.T) {
|
||||
pms := pubsubGossipParam()
|
||||
assert.Equal(t, gossipSubMcacheLen, pms.HistoryLength, "gossipSubMcacheLen")
|
||||
assert.Equal(t, gossipSubMcacheGossip, pms.HistoryGossip, "gossipSubMcacheGossip")
|
||||
assert.Equal(t, gossipSubSeenTTL, int(pubsub.TimeCacheDuration.Milliseconds()/pms.HeartbeatInterval.Milliseconds()), "gossipSubSeenTtl")
|
||||
assert.Equal(t, gossipSubSeenTTL, int(pubsub.TimeCacheDuration.Seconds()), "gossipSubSeenTtl")
|
||||
}
|
||||
|
||||
func TestFanoutParameters(t *testing.T) {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Package peers provides information about peers at the Ethereum consensus protocol level.
|
||||
//
|
||||
// "Protocol level" is the level above the network level, so this layer never sees or interacts with
|
||||
// (for example) hosts that are uncontactable due to being down, firewalled, etc. Instead, this works
|
||||
// (for example) hosts that are unreachable due to being down, firewalled, etc. Instead, this works
|
||||
// with peers that are contactable but may or may not be of the correct fork version, not currently
|
||||
// required due to the number of current connections, etc.
|
||||
//
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"context"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
@@ -59,8 +60,8 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
// ColocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
|
||||
ColocationLimit = 5
|
||||
// CollocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
|
||||
CollocationLimit = 5
|
||||
|
||||
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
|
||||
maxLimitBuffer = 150
|
||||
@@ -449,6 +450,32 @@ func (p *Status) InboundConnected() []peer.ID {
|
||||
return peers
|
||||
}
|
||||
|
||||
// InboundConnectedTCP returns the current batch of inbound peers that are connected using TCP.
|
||||
func (p *Status) InboundConnectedTCP() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), "tcp") {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// InboundConnectedTCP returns the current batch of inbound peers that are connected using QUIC.
|
||||
func (p *Status) InboundConnectedQUIC() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirInbound && strings.Contains(peerData.Address.String(), "quic") {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// Outbound returns the current batch of outbound peers.
|
||||
func (p *Status) Outbound() []peer.ID {
|
||||
p.store.RLock()
|
||||
@@ -475,7 +502,33 @@ func (p *Status) OutboundConnected() []peer.ID {
|
||||
return peers
|
||||
}
|
||||
|
||||
// Active returns the peers that are connecting or connected.
|
||||
// OutboundConnected returns the current batch of outbound peers that are connected using TCP.
|
||||
func (p *Status) OutboundConnectedTCP() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), "tcp") {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// OutboundConnected returns the current batch of outbound peers that are connected using QUIC.
|
||||
func (p *Status) OutboundConnectedQUIC() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
peers := make([]peer.ID, 0)
|
||||
for pid, peerData := range p.store.Peers() {
|
||||
if peerData.ConnState == PeerConnected && peerData.Direction == network.DirOutbound && strings.Contains(peerData.Address.String(), "quic") {
|
||||
peers = append(peers, pid)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
// Active returns the peers that are active (connecting or connected).
|
||||
func (p *Status) Active() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
@@ -514,7 +567,7 @@ func (p *Status) Disconnected() []peer.ID {
|
||||
return peers
|
||||
}
|
||||
|
||||
// Inactive returns the peers that are disconnecting or disconnected.
|
||||
// Inactive returns the peers that are inactive (disconnecting or disconnected).
|
||||
func (p *Status) Inactive() []peer.ID {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
@@ -548,7 +601,7 @@ func (p *Status) Prune() {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
// Default to old method if flag isnt enabled.
|
||||
// Default to old method if flag isn't enabled.
|
||||
if !features.Get().EnablePeerScorer {
|
||||
p.deprecatedPrune()
|
||||
return
|
||||
@@ -961,7 +1014,7 @@ func (p *Status) isfromBadIP(pid peer.ID) bool {
|
||||
return true
|
||||
}
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > ColocationLimit {
|
||||
if val > CollocationLimit {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1012,7 +1065,7 @@ func (p *Status) tallyIPTracker() {
|
||||
}
|
||||
|
||||
func sameIP(firstAddr, secondAddr ma.Multiaddr) bool {
|
||||
// Exit early if we do get nil multiaddresses
|
||||
// Exit early if we do get nil multi-addresses
|
||||
if firstAddr == nil || secondAddr == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -565,7 +565,7 @@ func TestPeerIPTracker(t *testing.T) {
|
||||
|
||||
badIP := "211.227.218.116"
|
||||
var badPeers []peer.ID
|
||||
for i := 0; i < peers.ColocationLimit+10; i++ {
|
||||
for i := 0; i < peers.CollocationLimit+10; i++ {
|
||||
port := strconv.Itoa(3000 + i)
|
||||
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
if err != nil {
|
||||
@@ -1111,6 +1111,74 @@ func TestInbound(t *testing.T) {
|
||||
assert.Equal(t, inbound.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestInboundConnected(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirInbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirInbound, peers.PeerConnecting)
|
||||
|
||||
result := p.InboundConnected()
|
||||
require.Equal(t, 1, len(result))
|
||||
assert.Equal(t, inbound.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestInboundConnectedTCP(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addrTCP, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
|
||||
addrQUIC, err := ma.NewMultiaddr("/ip4/192.168.1.3/udp/13000/quic-v1")
|
||||
require.NoError(t, err)
|
||||
|
||||
inboundTCP := createPeer(t, p, addrTCP, network.DirInbound, peers.PeerConnected)
|
||||
createPeer(t, p, addrQUIC, network.DirInbound, peers.PeerConnected)
|
||||
|
||||
result := p.InboundConnectedTCP()
|
||||
require.Equal(t, 1, len(result))
|
||||
assert.Equal(t, inboundTCP.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestInboundConnectedQUIC(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addrQUIC, err := ma.NewMultiaddr("/ip4/192.168.1.3/udp/13000/quic-v1")
|
||||
require.NoError(t, err)
|
||||
|
||||
addrTCP, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
|
||||
inboundQUIC := createPeer(t, p, addrQUIC, network.DirInbound, peers.PeerConnected)
|
||||
createPeer(t, p, addrTCP, network.DirInbound, peers.PeerConnected)
|
||||
|
||||
result := p.InboundConnectedQUIC()
|
||||
require.Equal(t, 1, len(result))
|
||||
assert.Equal(t, inboundQUIC.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestOutbound(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
@@ -1130,6 +1198,74 @@ func TestOutbound(t *testing.T) {
|
||||
assert.Equal(t, outbound.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestOutboundConnected(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
inbound := createPeer(t, p, addr, network.DirOutbound, peers.PeerConnected)
|
||||
createPeer(t, p, addr, network.DirOutbound, peers.PeerConnecting)
|
||||
|
||||
result := p.OutboundConnected()
|
||||
require.Equal(t, 1, len(result))
|
||||
assert.Equal(t, inbound.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestOutbondConnectedTCP(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addrTCP, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
|
||||
addrQUIC, err := ma.NewMultiaddr("/ip4/192.168.1.3/udp/13000/quic-v1")
|
||||
require.NoError(t, err)
|
||||
|
||||
outboundTCP := createPeer(t, p, addrTCP, network.DirOutbound, peers.PeerConnected)
|
||||
createPeer(t, p, addrQUIC, network.DirOutbound, peers.PeerConnected)
|
||||
|
||||
result := p.OutboundConnectedTCP()
|
||||
require.Equal(t, 1, len(result))
|
||||
assert.Equal(t, outboundTCP.String(), result[0].String())
|
||||
}
|
||||
|
||||
func TestOutboundConnectedQUIC(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
addrQUIC, err := ma.NewMultiaddr("/ip4/192.168.1.3/udp/13000/quic-v1")
|
||||
require.NoError(t, err)
|
||||
|
||||
addrTCP, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/33333")
|
||||
require.NoError(t, err)
|
||||
|
||||
outboundQUIC := createPeer(t, p, addrQUIC, network.DirOutbound, peers.PeerConnected)
|
||||
createPeer(t, p, addrTCP, network.DirOutbound, peers.PeerConnected)
|
||||
|
||||
result := p.OutboundConnectedQUIC()
|
||||
require.Equal(t, 1, len(result))
|
||||
assert.Equal(t, outboundQUIC.String(), result[0].String())
|
||||
}
|
||||
|
||||
// addPeer is a helper to add a peer with a given connection state)
|
||||
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState) peer.ID {
|
||||
// Set up some peers with different states
|
||||
|
||||
@@ -3,6 +3,7 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -25,7 +26,7 @@ const (
|
||||
// gossip parameters
|
||||
gossipSubMcacheLen = 6 // number of windows to retain full messages in cache for `IWANT` responses
|
||||
gossipSubMcacheGossip = 3 // number of windows to gossip about
|
||||
gossipSubSeenTTL = 550 // number of heartbeat intervals to retain message IDs
|
||||
gossipSubSeenTTL = 768 // number of seconds to retain message IDs ( 2 epochs)
|
||||
|
||||
// fanout ttl
|
||||
gossipSubFanoutTTL = 60000000000 // TTL for fanout maps for topics we are not subscribed to but have published to, in nano seconds
|
||||
@@ -130,7 +131,7 @@ func (s *Service) peerInspector(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) {
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a list of pubsub options to configure out router with.
|
||||
// pubsubOptions creates a list of options to configure our router with.
|
||||
func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
psOpts := []pubsub.Option{
|
||||
pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign),
|
||||
@@ -147,9 +148,35 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
pubsub.WithGossipSubParams(pubsubGossipParam()),
|
||||
pubsub.WithRawTracer(gossipTracer{host: s.host}),
|
||||
}
|
||||
|
||||
if len(s.cfg.StaticPeers) > 0 {
|
||||
directPeersAddrInfos, err := parsePeersEnr(s.cfg.StaticPeers)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not add direct peer option")
|
||||
return psOpts
|
||||
}
|
||||
psOpts = append(psOpts, pubsub.WithDirectPeers(directPeersAddrInfos))
|
||||
}
|
||||
|
||||
return psOpts
|
||||
}
|
||||
|
||||
// parsePeersEnr takes a list of raw ENRs and converts them into a list of AddrInfos.
|
||||
func parsePeersEnr(peers []string) ([]peer.AddrInfo, error) {
|
||||
addrs, err := PeersFromStringAddrs(peers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot convert peers raw ENRs into multiaddresses: %v", err)
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("Converting peers raw ENRs into multiaddresses resulted in an empty list")
|
||||
}
|
||||
directAddrInfos, err := peer.AddrInfosFromP2pAddrs(addrs...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot convert peers multiaddresses into AddrInfos: %v", err)
|
||||
}
|
||||
return directAddrInfos, nil
|
||||
}
|
||||
|
||||
// creates a custom gossipsub parameter set.
|
||||
func pubsubGossipParam() pubsub.GossipSubParams {
|
||||
gParams := pubsub.DefaultGossipSubParams()
|
||||
@@ -165,7 +192,8 @@ func pubsubGossipParam() pubsub.GossipSubParams {
|
||||
// to configure our message id time-cache rather than instantiating
|
||||
// it with a router instance.
|
||||
func setPubSubParameters() {
|
||||
pubsub.TimeCacheDuration = 550 * gossipSubHeartbeatInterval
|
||||
seenTtl := 2 * time.Second * time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
pubsub.TimeCacheDuration = seenTtl
|
||||
}
|
||||
|
||||
// convert from libp2p's internal schema to a compatible prysm protobuf format.
|
||||
|
||||
@@ -124,31 +124,34 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to build p2p options")
|
||||
}
|
||||
|
||||
// Sets mplex timeouts
|
||||
configureMplex()
|
||||
h, err := libp2p.New(opts...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to create p2p host")
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "failed to create p2p host")
|
||||
}
|
||||
|
||||
s.host = h
|
||||
|
||||
// Gossipsub registration is done before we add in any new peers
|
||||
// due to libp2p's gossipsub implementation not taking into
|
||||
// account previously added peers when creating the gossipsub
|
||||
// object.
|
||||
psOpts := s.pubsubOptions()
|
||||
|
||||
// Set the pubsub global parameters that we require.
|
||||
setPubSubParameters()
|
||||
|
||||
// Reinitialize them in the event we are running a custom config.
|
||||
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
|
||||
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
|
||||
gs, err := pubsub.NewGossipSub(s.ctx, s.host, psOpts...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to start pubsub")
|
||||
return nil, err
|
||||
return nil, errors.Wrapf(err, "failed to create p2p pubsub")
|
||||
}
|
||||
|
||||
s.pubsub = gs
|
||||
|
||||
s.peers = peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
@@ -213,7 +216,7 @@ func (s *Service) Start() {
|
||||
if len(s.cfg.StaticPeers) > 0 {
|
||||
addrs, err := PeersFromStringAddrs(s.cfg.StaticPeers)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not connect to static peer")
|
||||
log.WithError(err).Error("could not convert ENR to multiaddr")
|
||||
}
|
||||
// Set trusted peers for those that are provided as static addresses.
|
||||
pids := peerIdsFromMultiAddrs(addrs)
|
||||
@@ -232,11 +235,19 @@ func (s *Service) Start() {
|
||||
async.RunEvery(s.ctx, time.Duration(params.BeaconConfig().RespTimeout)*time.Second, s.updateMetrics)
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
|
||||
async.RunEvery(s.ctx, 1*time.Minute, func() {
|
||||
inboundQUICCount := len(s.peers.InboundConnectedQUIC())
|
||||
inboundTCPCount := len(s.peers.InboundConnectedTCP())
|
||||
outboundQUICCount := len(s.peers.OutboundConnectedQUIC())
|
||||
outboundTCPCount := len(s.peers.OutboundConnectedTCP())
|
||||
total := inboundQUICCount + inboundTCPCount + outboundQUICCount + outboundTCPCount
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"inbound": len(s.peers.InboundConnected()),
|
||||
"outbound": len(s.peers.OutboundConnected()),
|
||||
"activePeers": len(s.peers.Active()),
|
||||
}).Info("Peer summary")
|
||||
"inboundQUIC": inboundQUICCount,
|
||||
"inboundTCP": inboundTCPCount,
|
||||
"outboundQUIC": outboundQUICCount,
|
||||
"outboundTCP": outboundTCPCount,
|
||||
"total": total,
|
||||
}).Info("Connected peers")
|
||||
})
|
||||
|
||||
multiAddrs := s.host.Network().ListenAddresses()
|
||||
@@ -244,9 +255,10 @@ func (s *Service) Start() {
|
||||
|
||||
p2pHostAddress := s.cfg.HostAddress
|
||||
p2pTCPPort := s.cfg.TCPPort
|
||||
p2pQUICPort := s.cfg.QUICPort
|
||||
|
||||
if p2pHostAddress != "" {
|
||||
logExternalIPAddr(s.host.ID(), p2pHostAddress, p2pTCPPort)
|
||||
logExternalIPAddr(s.host.ID(), p2pHostAddress, p2pTCPPort, p2pQUICPort)
|
||||
verifyConnectivity(p2pHostAddress, p2pTCPPort, "tcp")
|
||||
}
|
||||
|
||||
|
||||
@@ -102,8 +102,9 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg := &Config{
|
||||
TCPPort: 2000,
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
ClockWaiter: cs,
|
||||
}
|
||||
s, err := NewService(context.Background(), cfg)
|
||||
@@ -147,8 +148,9 @@ func TestService_Start_NoDiscoverFlag(t *testing.T) {
|
||||
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg := &Config{
|
||||
TCPPort: 2000,
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
NoDiscovery: true, // <-- no s.dv5Listener is created
|
||||
ClockWaiter: cs,
|
||||
|
||||
@@ -93,6 +93,11 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if err := s.connectWithPeer(ctx, *info); err != nil {
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
genesisTime := time.Now()
|
||||
|
||||
bootNodeService := &Service{
|
||||
cfg: &Config{TCPPort: 2000, UDPPort: 3000},
|
||||
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
@@ -89,8 +89,9 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
service, err := NewService(ctx, &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNodeENR},
|
||||
MaxPeers: 30,
|
||||
TCPPort: uint(2000 + i),
|
||||
UDPPort: uint(3000 + i),
|
||||
UDPPort: uint(2000 + i),
|
||||
TCPPort: uint(3000 + i),
|
||||
QUICPort: uint(3000 + i),
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
@@ -133,8 +134,9 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNodeENR},
|
||||
MaxPeers: 30,
|
||||
TCPPort: 2010,
|
||||
UDPPort: 3010,
|
||||
UDPPort: 2010,
|
||||
TCPPort: 3010,
|
||||
QUICPort: 3010,
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, cfg)
|
||||
|
||||
@@ -50,7 +50,7 @@ func ensurePeerConnections(ctx context.Context, h host.Host, peers *peers.Status
|
||||
c := h.Network().ConnsToPeer(p.ID)
|
||||
if len(c) == 0 {
|
||||
if err := connectWithTimeout(ctx, h, p); err != nil {
|
||||
log.WithField("peer", p.ID).WithField("addrs", p.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
|
||||
log.WithField("peer", p.ID).WithField("addrs", p.Addrs).WithError(err).Errorf("failed to reconnect to peer")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,9 @@ go_library(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -35,7 +37,10 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["handlers_test.go"],
|
||||
srcs = [
|
||||
"handlers_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/server/structs:go_default_library",
|
||||
@@ -43,6 +48,8 @@ go_test(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen/mock:go_default_library",
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
dbutil "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/testutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
mockstategen "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen/mock"
|
||||
@@ -192,6 +193,7 @@ func BlockRewardTestSetup(t *testing.T, forkName string) (state.BeaconState, int
|
||||
}
|
||||
|
||||
func TestBlockRewards(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
phase0block, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
t.Run("phase 0", func(t *testing.T) {
|
||||
@@ -227,7 +229,10 @@ func TestBlockRewards(t *testing.T) {
|
||||
}},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
BlockRewardFetcher: &BlockRewardService{Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st))},
|
||||
BlockRewardFetcher: &BlockRewardService{
|
||||
Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st)),
|
||||
DB: db,
|
||||
},
|
||||
}
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/2"
|
||||
@@ -260,7 +265,10 @@ func TestBlockRewards(t *testing.T) {
|
||||
}},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
BlockRewardFetcher: &BlockRewardService{Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st))},
|
||||
BlockRewardFetcher: &BlockRewardService{
|
||||
Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st)),
|
||||
DB: db,
|
||||
},
|
||||
}
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/2"
|
||||
@@ -293,7 +301,10 @@ func TestBlockRewards(t *testing.T) {
|
||||
}},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
BlockRewardFetcher: &BlockRewardService{Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st))},
|
||||
BlockRewardFetcher: &BlockRewardService{
|
||||
Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st)),
|
||||
DB: db,
|
||||
},
|
||||
}
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/2"
|
||||
@@ -326,7 +337,10 @@ func TestBlockRewards(t *testing.T) {
|
||||
}},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
BlockRewardFetcher: &BlockRewardService{Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st))},
|
||||
BlockRewardFetcher: &BlockRewardService{
|
||||
Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st)),
|
||||
DB: db,
|
||||
},
|
||||
}
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/2"
|
||||
@@ -715,7 +729,9 @@ func TestSyncCommiteeRewards(t *testing.T) {
|
||||
}},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
BlockRewardFetcher: &BlockRewardService{Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st))},
|
||||
BlockRewardFetcher: &BlockRewardService{
|
||||
Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st)),
|
||||
DB: dbutil.SetupDB(t)},
|
||||
}
|
||||
|
||||
t.Run("ok - filtered vals", func(t *testing.T) {
|
||||
|
||||
@@ -8,7 +8,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
coreblocks "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -26,6 +28,7 @@ type BlockRewardsFetcher interface {
|
||||
// BlockRewardService implements BlockRewardsFetcher and can be declared to access the underlying functions
|
||||
type BlockRewardService struct {
|
||||
Replayer stategen.ReplayerBuilder
|
||||
DB db.HeadAccessDatabase
|
||||
}
|
||||
|
||||
// GetBlockRewardsData returns the BlockRewards object which is used for the BlockRewardsResponse and ProduceBlockV3.
|
||||
@@ -124,6 +127,22 @@ func (rs *BlockRewardService) GetStateForRewards(ctx context.Context, blk interf
|
||||
// We want to run several block processing functions that update the proposer's balance.
|
||||
// This will allow us to calculate proposer rewards for each operation (atts, slashings etc).
|
||||
// To do this, we replay the state up to the block's slot, but before processing the block.
|
||||
|
||||
// Try getting the state from the next slot cache first.
|
||||
_, prevSlotRoots, err := rs.DB.BlockRootsBySlot(ctx, slots.PrevSlot(blk.Slot()))
|
||||
if err != nil {
|
||||
return nil, &httputil.DefaultJsonError{
|
||||
Message: "Could not get roots for previous slot: " + err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
for _, r := range prevSlotRoots {
|
||||
s := transition.NextSlotState(r[:], blk.Slot())
|
||||
if s != nil {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
|
||||
st, err := rs.Replayer.ReplayerForSlot(slots.PrevSlot(blk.Slot())).ReplayToSlot(ctx, blk.Slot())
|
||||
if err != nil {
|
||||
return nil, &httputil.DefaultJsonError{
|
||||
|
||||
46
beacon-chain/rpc/eth/rewards/service_test.go
Normal file
46
beacon-chain/rpc/eth/rewards/service_test.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package rewards
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
dbutil "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestGetStateForRewards_NextSlotCacheHit(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := dbutil.SetupDB(t)
|
||||
|
||||
st, err := util.NewBeaconStateDeneb()
|
||||
require.NoError(t, err)
|
||||
b := util.HydrateSignedBeaconBlockDeneb(util.NewBeaconBlockDeneb())
|
||||
parent, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, parent))
|
||||
|
||||
r, err := parent.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, transition.UpdateNextSlotCache(ctx, r[:], st))
|
||||
|
||||
s := &BlockRewardService{
|
||||
Replayer: nil, // setting to nil because replayer must not be invoked
|
||||
DB: db,
|
||||
}
|
||||
b = util.HydrateSignedBeaconBlockDeneb(util.NewBeaconBlockDeneb())
|
||||
sbb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
sbb.SetSlot(parent.Block().Slot() + 1)
|
||||
result, err := s.GetStateForRewards(ctx, sbb.Block())
|
||||
require.NoError(t, err)
|
||||
_, lcs := transition.LastCachedState()
|
||||
expected, err := lcs.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
actual, err := result.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, expected, actual)
|
||||
}
|
||||
@@ -4,10 +4,10 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"aggregator.go",
|
||||
"duties.go",
|
||||
"attester.go",
|
||||
"blocks.go",
|
||||
"construct_generic_block.go",
|
||||
"duties.go",
|
||||
"exit.go",
|
||||
"log.go",
|
||||
"proposer.go",
|
||||
@@ -179,10 +179,10 @@ go_test(
|
||||
timeout = "moderate",
|
||||
srcs = [
|
||||
"aggregator_test.go",
|
||||
"duties_test.go",
|
||||
"attester_test.go",
|
||||
"blocks_test.go",
|
||||
"construct_generic_block_test.go",
|
||||
"duties_test.go",
|
||||
"exit_test.go",
|
||||
"proposer_altair_test.go",
|
||||
"proposer_attestations_test.go",
|
||||
@@ -201,6 +201,7 @@ go_test(
|
||||
"status_mainnet_test.go",
|
||||
"status_test.go",
|
||||
"sync_committee_test.go",
|
||||
"unblinder_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -27,11 +27,20 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// builderGetPayloadMissCount tracks the number of misses when validator tries to get a payload from builder
|
||||
var builderGetPayloadMissCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "builder_get_payload_miss_count",
|
||||
Help: "The number of get payload misses for validator requests to builder",
|
||||
})
|
||||
var (
|
||||
builderValueGweiGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "builder_value_gwei",
|
||||
Help: "Builder payload value in gwei",
|
||||
})
|
||||
localValueGweiGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "local_value_gwei",
|
||||
Help: "Local payload value in gwei",
|
||||
})
|
||||
builderGetPayloadMissCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "builder_get_payload_miss_count",
|
||||
Help: "The number of get payload misses for validator requests to builder",
|
||||
})
|
||||
)
|
||||
|
||||
// emptyTransactionsRoot represents the returned value of ssz.TransactionsRoot([][]byte{}) and
|
||||
// can be used as a constant to avoid recomputing this value in every call.
|
||||
@@ -92,6 +101,8 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
|
||||
"builderBoostFactor": builderBoostFactor,
|
||||
}).Warn("Proposer: both local boost and builder boost are using non default values")
|
||||
}
|
||||
builderValueGweiGauge.Set(float64(builderValueGwei))
|
||||
localValueGweiGauge.Set(float64(localValueGwei))
|
||||
|
||||
// If we can't get the builder value, just use local block.
|
||||
if higherValueBuilder && withdrawalsMatched { // Builder value is higher and withdrawals match.
|
||||
|
||||
@@ -13,18 +13,25 @@ import (
|
||||
)
|
||||
|
||||
func unblindBlobsSidecars(block interfaces.SignedBeaconBlock, bundle *enginev1.BlobsBundle) ([]*ethpb.BlobSidecar, error) {
|
||||
if block.Version() < version.Deneb || bundle == nil {
|
||||
if block.Version() < version.Deneb {
|
||||
return nil, nil
|
||||
}
|
||||
header, err := block.Header()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body := block.Block().Body()
|
||||
blockCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(blockCommitments) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
// Do not allow builders to provide no blob bundles for blocks which carry commitments.
|
||||
if bundle == nil {
|
||||
return nil, errors.New("no valid bundle provided")
|
||||
}
|
||||
header, err := block.Header()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Ensure there are equal counts of blobs/commitments/proofs.
|
||||
if len(bundle.KzgCommitments) != len(bundle.Blobs) {
|
||||
|
||||
34
beacon-chain/rpc/prysm/v1alpha1/validator/unblinder_test.go
Normal file
34
beacon-chain/rpc/prysm/v1alpha1/validator/unblinder_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
)
|
||||
|
||||
func TestUnblinder_UnblindBlobSidecars_InvalidBundle(t *testing.T) {
|
||||
wBlock, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockDeneb{
|
||||
Block: ðpb.BeaconBlockDeneb{
|
||||
Body: ðpb.BeaconBlockBodyDeneb{},
|
||||
},
|
||||
Signature: nil,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
_, err = unblindBlobsSidecars(wBlock, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
wBlock, err = consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockDeneb{
|
||||
Block: ðpb.BeaconBlockDeneb{
|
||||
Body: ðpb.BeaconBlockBodyDeneb{
|
||||
BlobKzgCommitments: [][]byte{[]byte("a"), []byte("b")},
|
||||
},
|
||||
},
|
||||
Signature: nil,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
_, err = unblindBlobsSidecars(wBlock, nil)
|
||||
assert.ErrorContains(t, "no valid bundle provided", err)
|
||||
|
||||
}
|
||||
@@ -215,7 +215,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlobStorage: s.cfg.BlobStorage,
|
||||
}
|
||||
rewardFetcher := &rewards.BlockRewardService{Replayer: ch}
|
||||
rewardFetcher := &rewards.BlockRewardService{Replayer: ch, DB: s.cfg.BeaconDB}
|
||||
coreService := &core.Service{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/prysmctl:__subpackages__",
|
||||
"//testing/slasher/simulator:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
@@ -27,6 +28,7 @@ go_library(
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/slasherkv:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/slasher/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
@@ -45,6 +47,7 @@ go_library(
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_x_exp//maps:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -208,8 +208,8 @@ func (m *MinSpanChunksSlice) CheckSlashable(
|
||||
}
|
||||
|
||||
if existingAttWrapper == nil {
|
||||
// This case should normally not happen. If this happen, it means we previously
|
||||
// recorded in our min/max DB an distance corresponding to an attestaiton, but WITHOUT
|
||||
// This case should normally not happen. If this happens, it means we previously
|
||||
// recorded in our min/max DB a distance corresponding to an attestation, but WITHOUT
|
||||
// recording the attestation itself. As a consequence, we say there is no surrounding vote,
|
||||
// but we log an error.
|
||||
fields := logrus.Fields{
|
||||
@@ -287,8 +287,8 @@ func (m *MaxSpanChunksSlice) CheckSlashable(
|
||||
}
|
||||
|
||||
if existingAttWrapper == nil {
|
||||
// This case should normally not happen. If this happen, it means we previously
|
||||
// recorded in our min/max DB an distance corresponding to an attestaiton, but WITHOUT
|
||||
// This case should normally not happen. If this happens, it means we previously
|
||||
// recorded in our min/max DB a distance corresponding to an attestation, but WITHOUT
|
||||
// recording the attestation itself. As a consequence, we say there is no surrounded vote,
|
||||
// but we log an error.
|
||||
fields := logrus.Fields{
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// Takes in a list of indexed attestation wrappers and returns any
|
||||
@@ -131,7 +132,7 @@ func (s *Service) checkSurroundVotes(
|
||||
}
|
||||
|
||||
// Update the latest updated epoch for all validators involved to the current chunk.
|
||||
indexes := s.params.validatorIndexesInChunk(validatorChunkIndex)
|
||||
indexes := s.params.ValidatorIndexesInChunk(validatorChunkIndex)
|
||||
for _, index := range indexes {
|
||||
s.latestEpochUpdatedForValidator[index] = currentEpoch
|
||||
}
|
||||
@@ -272,44 +273,20 @@ func (s *Service) updatedChunkByChunkIndex(
|
||||
|
||||
// minFirstEpochToUpdate is set to the smallest first epoch to update for all validators in the chunk
|
||||
// corresponding to the `validatorChunkIndex`.
|
||||
var minFirstEpochToUpdate *primitives.Epoch
|
||||
var (
|
||||
minFirstEpochToUpdate *primitives.Epoch
|
||||
neededChunkIndexesMap map[uint64]bool
|
||||
|
||||
neededChunkIndexesMap := map[uint64]bool{}
|
||||
err error
|
||||
)
|
||||
validatorIndexes := s.params.ValidatorIndexesInChunk(validatorChunkIndex)
|
||||
|
||||
validatorIndexes := s.params.validatorIndexesInChunk(validatorChunkIndex)
|
||||
for _, validatorIndex := range validatorIndexes {
|
||||
// Retrieve the first epoch to write for the validator index.
|
||||
isAnEpochToUpdate, firstEpochToUpdate, err := s.firstEpochToUpdate(validatorIndex, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get first epoch to write for validator index %d with current epoch %d", validatorIndex, currentEpoch)
|
||||
}
|
||||
|
||||
if !isAnEpochToUpdate {
|
||||
// If there is no epoch to write, skip.
|
||||
continue
|
||||
}
|
||||
|
||||
// If, for this validator index, the chunk corresponding to the first epoch to write
|
||||
// (and all following epochs until the current epoch) are already flagged as needed,
|
||||
// skip.
|
||||
if minFirstEpochToUpdate != nil && *minFirstEpochToUpdate <= firstEpochToUpdate {
|
||||
continue
|
||||
}
|
||||
|
||||
minFirstEpochToUpdate = &firstEpochToUpdate
|
||||
|
||||
// Add new needed chunk indexes to the map.
|
||||
for i := firstEpochToUpdate; i <= currentEpoch; i++ {
|
||||
chunkIndex := s.params.chunkIndex(i)
|
||||
neededChunkIndexesMap[chunkIndex] = true
|
||||
}
|
||||
if neededChunkIndexesMap, err = s.findNeededChunkIndexes(validatorIndexes, currentEpoch, minFirstEpochToUpdate); err != nil {
|
||||
return nil, errors.Wrap(err, "could not find the needed chunk indexed")
|
||||
}
|
||||
|
||||
// Get the list of needed chunk indexes.
|
||||
neededChunkIndexes := make([]uint64, 0, len(neededChunkIndexesMap))
|
||||
for chunkIndex := range neededChunkIndexesMap {
|
||||
neededChunkIndexes = append(neededChunkIndexes, chunkIndex)
|
||||
}
|
||||
// Transform the map of needed chunk indexes to a slice.
|
||||
neededChunkIndexes := maps.Keys(neededChunkIndexesMap)
|
||||
|
||||
// Retrieve needed chunks from the database.
|
||||
chunkByChunkIndex, err := s.loadChunksFromDisk(ctx, validatorChunkIndex, chunkKind, neededChunkIndexes)
|
||||
@@ -332,7 +309,7 @@ func (s *Service) updatedChunkByChunkIndex(
|
||||
epochToUpdate := firstEpochToUpdate
|
||||
|
||||
for epochToUpdate <= currentEpoch {
|
||||
// Get the chunk index for the ecpoh to write.
|
||||
// Get the chunk index for the epoch to write.
|
||||
chunkIndex := s.params.chunkIndex(epochToUpdate)
|
||||
|
||||
// Get the chunk corresponding to the chunk index from the `chunkByChunkIndex` map.
|
||||
@@ -363,6 +340,45 @@ func (s *Service) updatedChunkByChunkIndex(
|
||||
return chunkByChunkIndex, nil
|
||||
}
|
||||
|
||||
// findNeededChunkIndexes returns a map of chunk indexes
|
||||
// it loops over the validator indexes and finds the first epoch to update for each validator index.
|
||||
func (s *Service) findNeededChunkIndexes(
|
||||
validatorIndexes []primitives.ValidatorIndex,
|
||||
currentEpoch primitives.Epoch,
|
||||
minFirstEpochToUpdate *primitives.Epoch,
|
||||
) (map[uint64]bool, error) {
|
||||
neededChunkIndexesMap := map[uint64]bool{}
|
||||
|
||||
for _, validatorIndex := range validatorIndexes {
|
||||
// Retrieve the first epoch to write for the validator index.
|
||||
isAnEpochToUpdate, firstEpochToUpdate, err := s.firstEpochToUpdate(validatorIndex, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get first epoch to write for validator index %d with current epoch %d", validatorIndex, currentEpoch)
|
||||
}
|
||||
|
||||
if !isAnEpochToUpdate {
|
||||
// If there is no epoch to write, skip.
|
||||
continue
|
||||
}
|
||||
|
||||
// If, for this validator index, the chunk corresponding to the first epoch to write
|
||||
// (and all following epochs until the current epoch) are already flagged as needed,
|
||||
// skip.
|
||||
if minFirstEpochToUpdate != nil && *minFirstEpochToUpdate <= firstEpochToUpdate {
|
||||
continue
|
||||
}
|
||||
|
||||
minFirstEpochToUpdate = &firstEpochToUpdate
|
||||
|
||||
// Add new needed chunk indexes to the map.
|
||||
for i := firstEpochToUpdate; i <= currentEpoch; i++ {
|
||||
chunkIndex := s.params.chunkIndex(i)
|
||||
neededChunkIndexesMap[chunkIndex] = true
|
||||
}
|
||||
}
|
||||
return neededChunkIndexesMap, nil
|
||||
}
|
||||
|
||||
// firstEpochToUpdate, given a validator index and the current epoch, returns a boolean indicating
|
||||
// if there is an epoch to write. If it is the case, it returns the first epoch to write.
|
||||
func (s *Service) firstEpochToUpdate(validatorIndex primitives.ValidatorIndex, currentEpoch primitives.Epoch) (bool, primitives.Epoch, error) {
|
||||
|
||||
@@ -1059,7 +1059,7 @@ func Test_updatedChunkByChunkIndex(t *testing.T) {
|
||||
// Initialize the slasher database.
|
||||
slasherDB := dbtest.SetupSlasherDB(t)
|
||||
|
||||
// Intialize the slasher service.
|
||||
// Initialize the slasher service.
|
||||
service := &Service{
|
||||
params: &Parameters{
|
||||
chunkSize: tt.chunkSize,
|
||||
@@ -1502,7 +1502,7 @@ func runAttestationsBenchmark(b *testing.B, s *Service, numAtts, numValidators u
|
||||
|
||||
func Benchmark_checkSurroundVotes(b *testing.B) {
|
||||
const (
|
||||
// Approximatively the number of Holesky active validators on 2024-02-16
|
||||
// Approximately the number of Holesky active validators on 2024-02-16
|
||||
// This number is both a multiple of 32 (the number of slots per epoch) and 256 (the number of validators per chunk)
|
||||
validatorsCount = 1_638_400
|
||||
slotsPerEpoch = 32
|
||||
@@ -1526,7 +1526,7 @@ func Benchmark_checkSurroundVotes(b *testing.B) {
|
||||
// So for 1_638_400 validators with 32 slots per epoch, we would have 48_000 attestation wrappers per slot.
|
||||
// With 256 validators per chunk, we would have only 188 modified chunks.
|
||||
//
|
||||
// In this benchmark, we use the worst case scenario where attestating validators are evenly splitted across all validators chunks.
|
||||
// In this benchmark, we use the worst case scenario where attesting validators are evenly split across all validators chunks.
|
||||
// We also suppose that only one chunk per validator chunk index is modified.
|
||||
// For one given validator index, multiple chunk indexes could be modified.
|
||||
//
|
||||
|
||||
@@ -2,8 +2,11 @@ package slasher
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/slasherkv"
|
||||
slashertypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -76,7 +79,7 @@ func (s *Service) filterAttestations(
|
||||
continue
|
||||
}
|
||||
|
||||
// If an attestations's target epoch is in the future, we defer processing for later.
|
||||
// If an attestation's target epoch is in the future, we defer processing for later.
|
||||
if attWrapper.IndexedAttestation.Data.Target.Epoch > currentEpoch {
|
||||
validInFuture = append(validInFuture, attWrapper)
|
||||
continue
|
||||
@@ -159,3 +162,93 @@ func isDoubleProposal(incomingSigningRoot, existingSigningRoot [32]byte) bool {
|
||||
}
|
||||
return incomingSigningRoot != existingSigningRoot
|
||||
}
|
||||
|
||||
type GetChunkFromDatabaseFilters struct {
|
||||
ChunkKind slashertypes.ChunkKind
|
||||
ValidatorIndex primitives.ValidatorIndex
|
||||
SourceEpoch primitives.Epoch
|
||||
IsDisplayAllValidatorsInChunk bool
|
||||
IsDisplayAllEpochsInChunk bool
|
||||
}
|
||||
|
||||
// GetChunkFromDatabase Utility function aiming at retrieving a chunk from the
|
||||
// database.
|
||||
func GetChunkFromDatabase(
|
||||
ctx context.Context,
|
||||
dbPath string,
|
||||
filters GetChunkFromDatabaseFilters,
|
||||
params *Parameters,
|
||||
) (lastEpochForValidatorIndex primitives.Epoch, chunkIndex, validatorChunkIndex uint64, chunk Chunker, err error) {
|
||||
// init store
|
||||
d, err := slasherkv.NewKVStore(ctx, dbPath)
|
||||
if err != nil {
|
||||
return lastEpochForValidatorIndex, chunkIndex, validatorChunkIndex, chunk, fmt.Errorf("could not open database at path %s: %w", dbPath, err)
|
||||
}
|
||||
defer closeDB(d)
|
||||
|
||||
// init service
|
||||
s := Service{
|
||||
params: params,
|
||||
serviceCfg: &ServiceConfig{
|
||||
Database: d,
|
||||
},
|
||||
}
|
||||
|
||||
// variables
|
||||
validatorIndex := filters.ValidatorIndex
|
||||
sourceEpoch := filters.SourceEpoch
|
||||
chunkKind := filters.ChunkKind
|
||||
validatorChunkIndex = s.params.validatorChunkIndex(validatorIndex)
|
||||
chunkIndex = s.params.chunkIndex(sourceEpoch)
|
||||
|
||||
// before getting the chunk, we need to verify if the requested epoch is in database
|
||||
lastEpochForValidator, err := s.serviceCfg.Database.LastEpochWrittenForValidators(ctx, []primitives.ValidatorIndex{validatorIndex})
|
||||
if err != nil {
|
||||
return lastEpochForValidatorIndex,
|
||||
chunkIndex,
|
||||
validatorChunkIndex,
|
||||
chunk,
|
||||
fmt.Errorf("could not get last epoch written for validator %d: %w", validatorIndex, err)
|
||||
}
|
||||
|
||||
if len(lastEpochForValidator) == 0 {
|
||||
return lastEpochForValidatorIndex,
|
||||
chunkIndex,
|
||||
validatorChunkIndex,
|
||||
chunk,
|
||||
fmt.Errorf("could not get information at epoch %d for validator %d: there's no record found in slasher database",
|
||||
sourceEpoch, validatorIndex,
|
||||
)
|
||||
}
|
||||
lastEpochForValidatorIndex = lastEpochForValidator[0].Epoch
|
||||
|
||||
// if the epoch requested is within the range, we can proceed to get the chunk, otherwise return error
|
||||
atBestSmallestEpoch := lastEpochForValidatorIndex.Sub(uint64(params.historyLength))
|
||||
if sourceEpoch < atBestSmallestEpoch || sourceEpoch > lastEpochForValidatorIndex {
|
||||
return lastEpochForValidatorIndex,
|
||||
chunkIndex,
|
||||
validatorChunkIndex,
|
||||
chunk,
|
||||
fmt.Errorf("requested epoch %d is outside the slasher history length %d, data can be provided within the epoch range [%d:%d] for validator %d",
|
||||
sourceEpoch, params.historyLength, atBestSmallestEpoch, lastEpochForValidatorIndex, validatorIndex,
|
||||
)
|
||||
}
|
||||
|
||||
// fetch chunk from DB
|
||||
chunk, err = s.getChunkFromDatabase(ctx, chunkKind, validatorChunkIndex, chunkIndex)
|
||||
if err != nil {
|
||||
return lastEpochForValidatorIndex,
|
||||
chunkIndex,
|
||||
validatorChunkIndex,
|
||||
chunk,
|
||||
fmt.Errorf("could not get chunk at index %d: %w", chunkIndex, err)
|
||||
}
|
||||
|
||||
return lastEpochForValidatorIndex, chunkIndex, validatorChunkIndex, chunk, nil
|
||||
}
|
||||
|
||||
func closeDB(d *slasherkv.Store) {
|
||||
if err := d.Close(); err != nil {
|
||||
log.WithError(err).Error("could not close database")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,21 @@ type Parameters struct {
|
||||
historyLength primitives.Epoch // H - defines how many epochs we keep of min or max spans.
|
||||
}
|
||||
|
||||
// ChunkSize returns the chunk size.
|
||||
func (p *Parameters) ChunkSize() uint64 {
|
||||
return p.chunkSize
|
||||
}
|
||||
|
||||
// ValidatorChunkSize returns the validator chunk size.
|
||||
func (p *Parameters) ValidatorChunkSize() uint64 {
|
||||
return p.validatorChunkSize
|
||||
}
|
||||
|
||||
// HistoryLength returns the history length.
|
||||
func (p *Parameters) HistoryLength() primitives.Epoch {
|
||||
return p.historyLength
|
||||
}
|
||||
|
||||
// DefaultParams defines default values for slasher's important parameters, defined
|
||||
// based on optimization analysis for best and worst case scenarios for
|
||||
// slasher's performance.
|
||||
@@ -32,7 +47,15 @@ func DefaultParams() *Parameters {
|
||||
}
|
||||
}
|
||||
|
||||
// Validator min and max spans are split into chunks of length C = chunkSize.
|
||||
func NewParams(chunkSize, validatorChunkSize uint64, historyLength primitives.Epoch) *Parameters {
|
||||
return &Parameters{
|
||||
chunkSize: chunkSize,
|
||||
validatorChunkSize: validatorChunkSize,
|
||||
historyLength: historyLength,
|
||||
}
|
||||
}
|
||||
|
||||
// ChunkIndex Validator min and max spans are split into chunks of length C = chunkSize.
|
||||
// That is, if we are keeping N epochs worth of attesting history, finding what
|
||||
// chunk a certain epoch, e, falls into can be computed as (e % N) / C. For example,
|
||||
// if we are keeping 6 epochs worth of data, and we have chunks of size 2, then epoch
|
||||
@@ -139,9 +162,9 @@ func (p *Parameters) flatSliceID(validatorChunkIndex, chunkIndex uint64) []byte
|
||||
return ssz.MarshalUint64(make([]byte, 0), uint64(width.Mul(validatorChunkIndex).Add(chunkIndex)))
|
||||
}
|
||||
|
||||
// Given a validator chunk index, we determine all of the validator
|
||||
// ValidatorIndexesInChunk Given a validator chunk index, we determine all the validators
|
||||
// indices that will belong in that chunk.
|
||||
func (p *Parameters) validatorIndexesInChunk(validatorChunkIndex uint64) []primitives.ValidatorIndex {
|
||||
func (p *Parameters) ValidatorIndexesInChunk(validatorChunkIndex uint64) []primitives.ValidatorIndex {
|
||||
validatorIndices := make([]primitives.ValidatorIndex, 0)
|
||||
low := validatorChunkIndex * p.validatorChunkSize
|
||||
high := (validatorChunkIndex + 1) * p.validatorChunkSize
|
||||
|
||||
@@ -468,7 +468,7 @@ func TestParams_validatorIndicesInChunk(t *testing.T) {
|
||||
c := &Parameters{
|
||||
validatorChunkSize: tt.fields.validatorChunkSize,
|
||||
}
|
||||
if got := c.validatorIndexesInChunk(tt.validatorChunkIdx); !reflect.DeepEqual(got, tt.want) {
|
||||
if got := c.ValidatorIndexesInChunk(tt.validatorChunkIdx); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("validatorIndicesInChunk() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -161,7 +161,7 @@ func (s *Service) Stop() error {
|
||||
ctx, innerCancel := context.WithTimeout(context.Background(), shutdownTimeout)
|
||||
defer innerCancel()
|
||||
log.Info("Flushing last epoch written for each validator to disk, please wait")
|
||||
if err := s.serviceCfg.Database.SaveLastEpochsWrittenForValidators(
|
||||
if err := s.serviceCfg.Database.SaveLastEpochWrittenForValidators(
|
||||
ctx, s.latestEpochUpdatedForValidator,
|
||||
); err != nil {
|
||||
log.Error(err)
|
||||
|
||||
@@ -4,7 +4,10 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["types.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/prysmctl:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -14,6 +14,18 @@ const (
|
||||
MaxSpan
|
||||
)
|
||||
|
||||
// String returns the string representation of the chunk kind.
|
||||
func (c ChunkKind) String() string {
|
||||
switch c {
|
||||
case MinSpan:
|
||||
return "minspan"
|
||||
case MaxSpan:
|
||||
return "maxspan"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// IndexedAttestationWrapper contains an indexed attestation with its
|
||||
// data root to reduce duplicated computation.
|
||||
type IndexedAttestationWrapper struct {
|
||||
|
||||
@@ -12,14 +12,12 @@ go_library(
|
||||
"log.go",
|
||||
"round_robin.go",
|
||||
"service.go",
|
||||
"verification.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/initial-sync",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -41,7 +39,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
|
||||
@@ -312,7 +312,7 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
|
||||
|
||||
response.bwb, response.pid, response.err = f.fetchBlocksFromPeer(ctx, start, count, peers)
|
||||
if response.err == nil {
|
||||
bwb, err := f.fetchBlobsFromPeer(ctx, response.bwb, response.pid)
|
||||
bwb, err := f.fetchBlobsFromPeer(ctx, response.bwb, response.pid, peers)
|
||||
if err != nil {
|
||||
response.err = err
|
||||
}
|
||||
@@ -336,6 +336,11 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
Count: count,
|
||||
Step: 1,
|
||||
}
|
||||
bestPeers := f.hasSufficientBandwidth(peers, req.Count)
|
||||
// We append the best peers to the front so that higher capacity
|
||||
// peers are dialed first.
|
||||
peers = append(bestPeers, peers...)
|
||||
peers = dedupPeers(peers)
|
||||
for i := 0; i < len(peers); i++ {
|
||||
p := peers[i]
|
||||
blocks, err := f.requestBlocks(ctx, req, p)
|
||||
@@ -472,7 +477,7 @@ func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) e
|
||||
}
|
||||
|
||||
// fetchBlobsFromPeer fetches blocks from a single randomly selected peer.
|
||||
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.BlockWithROBlobs, pid peer.ID) ([]blocks2.BlockWithROBlobs, error) {
|
||||
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.BlockWithROBlobs, pid peer.ID, peers []peer.ID) ([]blocks2.BlockWithROBlobs, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlobsFromPeer")
|
||||
defer span.End()
|
||||
if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch {
|
||||
@@ -487,13 +492,30 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.Bl
|
||||
if req == nil {
|
||||
return bwb, nil
|
||||
}
|
||||
// Request blobs from the same peer that gave us the blob batch.
|
||||
blobs, err := f.requestBlobs(ctx, req, pid)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not request blobs by range")
|
||||
peers = f.filterPeers(ctx, peers, peersPercentagePerRequest)
|
||||
// We dial the initial peer first to ensure that we get the desired set of blobs.
|
||||
wantedPeers := append([]peer.ID{pid}, peers...)
|
||||
bestPeers := f.hasSufficientBandwidth(wantedPeers, req.Count)
|
||||
// We append the best peers to the front so that higher capacity
|
||||
// peers are dialed first. If all of them fail, we fallback to the
|
||||
// initial peer we wanted to request blobs from.
|
||||
peers = append(bestPeers, pid)
|
||||
for i := 0; i < len(peers); i++ {
|
||||
p := peers[i]
|
||||
blobs, err := f.requestBlobs(ctx, req, p)
|
||||
if err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Could not request blobs by range from peer")
|
||||
continue
|
||||
}
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
||||
robs, err := verifyAndPopulateBlobs(bwb, blobs, blobWindowStart)
|
||||
if err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Invalid BeaconBlobsByRange response")
|
||||
continue
|
||||
}
|
||||
return robs, err
|
||||
}
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(pid)
|
||||
return verifyAndPopulateBlobs(bwb, blobs, blobWindowStart)
|
||||
return nil, errNoPeersAvailable
|
||||
}
|
||||
|
||||
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
|
||||
@@ -606,6 +628,18 @@ func (f *blocksFetcher) waitForBandwidth(pid peer.ID, count uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *blocksFetcher) hasSufficientBandwidth(peers []peer.ID, count uint64) []peer.ID {
|
||||
filteredPeers := []peer.ID{}
|
||||
for _, p := range peers {
|
||||
if uint64(f.rateLimiter.Remaining(p.String())) < count {
|
||||
continue
|
||||
}
|
||||
copiedP := p
|
||||
filteredPeers = append(filteredPeers, copiedP)
|
||||
}
|
||||
return filteredPeers
|
||||
}
|
||||
|
||||
// Determine how long it will take for us to have the required number of blocks allowed by our rate limiter.
|
||||
// We do this by calculating the duration till the rate limiter can request these blocks without exceeding
|
||||
// the provided bandwidth limits per peer.
|
||||
@@ -626,3 +660,18 @@ func timeToWait(wanted, rem, capacity int64, timeTillEmpty time.Duration) time.D
|
||||
expectedTime := int64(timeTillEmpty) * blocksNeeded / currentNumBlks
|
||||
return time.Duration(expectedTime)
|
||||
}
|
||||
|
||||
// deduplicates the provided peer list.
|
||||
func dedupPeers(peers []peer.ID) []peer.ID {
|
||||
newPeerList := make([]peer.ID, 0, len(peers))
|
||||
peerExists := make(map[peer.ID]bool)
|
||||
|
||||
for i := range peers {
|
||||
if peerExists[peers[i]] {
|
||||
continue
|
||||
}
|
||||
newPeerList = append(newPeerList, peers[i])
|
||||
peerExists[peers[i]] = true
|
||||
}
|
||||
return newPeerList
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
dbtest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
p2pm "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
@@ -1166,3 +1167,22 @@ func TestBatchLimit(t *testing.T) {
|
||||
|
||||
assert.Equal(t, params.BeaconConfig().MaxRequestBlocksDeneb, uint64(maxBatchLimit()))
|
||||
}
|
||||
|
||||
func TestBlockFetcher_HasSufficientBandwidth(t *testing.T) {
|
||||
bf := newBlocksFetcher(context.Background(), &blocksFetcherConfig{})
|
||||
currCap := bf.rateLimiter.Capacity()
|
||||
wantedAmt := currCap - 100
|
||||
bf.rateLimiter.Add(peer.ID("a").String(), wantedAmt)
|
||||
bf.rateLimiter.Add(peer.ID("c").String(), wantedAmt)
|
||||
bf.rateLimiter.Add(peer.ID("f").String(), wantedAmt)
|
||||
bf.rateLimiter.Add(peer.ID("d").String(), wantedAmt)
|
||||
|
||||
receivedPeers := bf.hasSufficientBandwidth([]peer.ID{"a", "b", "c", "d", "e", "f"}, 110)
|
||||
for _, p := range receivedPeers {
|
||||
switch p {
|
||||
case "a", "c", "f", "d":
|
||||
t.Errorf("peer has exceeded capacity: %s", p)
|
||||
}
|
||||
}
|
||||
assert.Equal(t, 2, len(receivedPeers))
|
||||
}
|
||||
|
||||
@@ -280,7 +280,7 @@ func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, slot
|
||||
}
|
||||
// We need to fetch the blobs for the given alt-chain if any exist, so that we can try to verify and import
|
||||
// the blocks.
|
||||
bwb, err := f.fetchBlobsFromPeer(ctx, altBlocks, pid)
|
||||
bwb, err := f.fetchBlobsFromPeer(ctx, altBlocks, pid, []peer.ID{pid})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findForkWithPeer")
|
||||
}
|
||||
@@ -302,7 +302,7 @@ func (f *blocksFetcher) findAncestor(ctx context.Context, pid peer.ID, b interfa
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "received invalid blocks in findAncestor")
|
||||
}
|
||||
bwb, err = f.fetchBlobsFromPeer(ctx, bwb, pid)
|
||||
bwb, err = f.fetchBlobsFromPeer(ctx, bwb, pid, []peer.ID{pid})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findAncestor")
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -167,7 +168,7 @@ func (s *Service) processFetchedDataRegSync(
|
||||
if len(bwb) == 0 {
|
||||
return
|
||||
}
|
||||
bv := newBlobBatchVerifier(s.newBlobVerifier)
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
batchFields := logrus.Fields{
|
||||
"firstSlot": data.bwb[0].Block.Block().Slot(),
|
||||
@@ -326,7 +327,7 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
|
||||
errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot())
|
||||
}
|
||||
|
||||
bv := newBlobBatchVerifier(s.newBlobVerifier)
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
s.logBatchSyncStatus(genesis, first, len(bwb))
|
||||
for _, bb := range bwb {
|
||||
|
||||
@@ -340,7 +340,7 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
if len(sidecars) != len(req) {
|
||||
continue
|
||||
}
|
||||
bv := newBlobBatchVerifier(s.newBlobVerifier)
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
current := s.clock.CurrentSlot()
|
||||
if err := avs.Persist(current, sidecars...); err != nil {
|
||||
@@ -362,3 +362,9 @@ func shufflePeers(pids []peer.ID) {
|
||||
pids[i], pids[j] = pids[j], pids[i]
|
||||
})
|
||||
}
|
||||
|
||||
func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.NewBlobVerifier {
|
||||
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
|
||||
return ini.NewBlobVerifier(b, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -150,6 +150,10 @@ var (
|
||||
Help: "Time to verify gossiped blob sidecars",
|
||||
},
|
||||
)
|
||||
pendingAttCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "gossip_pending_attestations_total",
|
||||
Help: "increased when receiving a new pending attestation",
|
||||
})
|
||||
|
||||
// Sync committee verification performance.
|
||||
syncMessagesForUnknownBlocks = promauto.NewCounter(
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"sync"
|
||||
@@ -157,7 +158,8 @@ func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb
|
||||
|
||||
// This defines how pending attestations is saved in the map. The key is the
|
||||
// root of the missing block. The value is the list of pending attestations
|
||||
// that voted for that block root.
|
||||
// that voted for that block root. The caller of this function is responsible
|
||||
// for not sending repeated attestations to the pending queue.
|
||||
func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof) {
|
||||
root := bytesutil.ToBytes32(att.Message.Aggregate.Data.BeaconBlockRoot)
|
||||
|
||||
@@ -175,20 +177,37 @@ func (s *Service) savePendingAtt(att *ethpb.SignedAggregateAttestationAndProof)
|
||||
|
||||
_, ok := s.blkRootToPendingAtts[root]
|
||||
if !ok {
|
||||
pendingAttCount.Inc()
|
||||
s.blkRootToPendingAtts[root] = []*ethpb.SignedAggregateAttestationAndProof{att}
|
||||
return
|
||||
}
|
||||
|
||||
// Skip if the attestation from the same aggregator already exists in the pending queue.
|
||||
// Skip if the attestation from the same aggregator already exists in
|
||||
// the pending queue.
|
||||
for _, a := range s.blkRootToPendingAtts[root] {
|
||||
if a.Message.AggregatorIndex == att.Message.AggregatorIndex {
|
||||
if attsAreEqual(att, a) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
pendingAttCount.Inc()
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], att)
|
||||
}
|
||||
|
||||
func attsAreEqual(a, b *ethpb.SignedAggregateAttestationAndProof) bool {
|
||||
if a.Signature != nil {
|
||||
return b.Signature != nil && a.Message.AggregatorIndex == b.Message.AggregatorIndex
|
||||
}
|
||||
if b.Signature != nil {
|
||||
return false
|
||||
}
|
||||
if a.Message.Aggregate.Data.Slot != b.Message.Aggregate.Data.Slot {
|
||||
return false
|
||||
}
|
||||
if a.Message.Aggregate.Data.CommitteeIndex != b.Message.Aggregate.Data.CommitteeIndex {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(a.Message.Aggregate.AggregationBits, b.Message.Aggregate.AggregationBits)
|
||||
}
|
||||
|
||||
// This validates the pending attestations in the queue are still valid.
|
||||
// If not valid, a node will remove it in the queue in place. The validity
|
||||
// check specifies the pending attestation could not fall one epoch behind
|
||||
|
||||
@@ -399,7 +399,7 @@ func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
|
||||
assert.Equal(t, 0, len(s.blkRootToPendingAtts), "Did not delete block keys")
|
||||
}
|
||||
|
||||
func TestValidatePendingAtts_NoDuplicatingAggregatorIndex(t *testing.T) {
|
||||
func TestValidatePendingAtts_NoDuplicatingAtts(t *testing.T) {
|
||||
s := &Service{
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
}
|
||||
@@ -420,7 +420,7 @@ func TestValidatePendingAtts_NoDuplicatingAggregatorIndex(t *testing.T) {
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 3, BeaconBlockRoot: r2[:]}}}})
|
||||
Data: ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r2[:]}}}})
|
||||
|
||||
assert.Equal(t, 1, len(s.blkRootToPendingAtts[r1]), "Did not save pending atts")
|
||||
assert.Equal(t, 1, len(s.blkRootToPendingAtts[r2]), "Did not save pending atts")
|
||||
|
||||
@@ -646,7 +646,7 @@ func TestService_BatchRootRequest(t *testing.T) {
|
||||
b4Root, err := b4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Send in duplicated roots to also test deduplicaton.
|
||||
// Send in duplicated roots to also test deduplication.
|
||||
sentRoots := p2ptypes.BeaconBlockByRootsReq{b2Root, b2Root, b3Root, b3Root, b4Root, b5Root}
|
||||
expectedRoots := p2ptypes.BeaconBlockByRootsReq{b2Root, b3Root, b4Root, b5Root}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
@@ -142,7 +143,30 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
// it successfully writes a response. We don't blindly call
|
||||
// Close here because we may have only written a partial
|
||||
// response.
|
||||
// About the special case for quic-v1, please see:
|
||||
// https://github.com/quic-go/quic-go/issues/3291
|
||||
defer func() {
|
||||
isQuic := false
|
||||
multiaddr.ForEach(stream.Conn().RemoteMultiaddr(), func(c multiaddr.Component) bool {
|
||||
pCode := c.Protocol().Code
|
||||
if pCode == multiaddr.P_QUIC || pCode == multiaddr.P_QUIC_V1 {
|
||||
isQuic = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// We special case for QUIC connections as unlike yamux where a reset is a no-op for a successful close. An abrupt
|
||||
// stream termination can lead to the remote peer dropping the inbound data. For that reason, we only reset streams
|
||||
// in the event there is an error while closing them.
|
||||
if isQuic {
|
||||
if err := stream.Close(); err != nil {
|
||||
_err := stream.Reset()
|
||||
_ = _err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
_err := stream.Reset()
|
||||
_ = _err
|
||||
}()
|
||||
|
||||
@@ -151,14 +151,14 @@ func (s *Service) sendAndSaveBlobSidecars(ctx context.Context, request types.Blo
|
||||
if len(sidecars) != len(request) {
|
||||
return fmt.Errorf("received %d blob sidecars, expected %d for RPC", len(sidecars), len(request))
|
||||
}
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.PendingQueueSidecarRequirements)
|
||||
for _, sidecar := range sidecars {
|
||||
if err := verify.BlobAlignsWithBlock(sidecar, RoBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(blobFields(sidecar)).Debug("Received blob sidecar RPC")
|
||||
}
|
||||
|
||||
vscs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
vscs, err := bv.VerifiedROBlobs(ctx, RoBlock, sidecars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -42,9 +42,10 @@ func (s *Service) goodbyeRPCHandler(_ context.Context, msg interface{}, stream l
|
||||
return fmt.Errorf("wrong message type for goodbye, got %T, wanted *uint64", msg)
|
||||
}
|
||||
if err := s.rateLimiter.validateRequest(stream, 1); err != nil {
|
||||
return err
|
||||
log.WithError(err).Debug("Goodbye message from rate-limited peer.")
|
||||
} else {
|
||||
s.rateLimiter.add(stream, 1)
|
||||
}
|
||||
s.rateLimiter.add(stream, 1)
|
||||
log := log.WithField("Reason", goodbyeMessage(*m))
|
||||
log.WithField("peer", stream.Conn().RemotePeer()).Trace("Peer has sent a goodbye message")
|
||||
s.cfg.p2p.Peers().SetNextValidTime(stream.Conn().RemotePeer(), goodByeBackoff(*m))
|
||||
|
||||
@@ -219,7 +219,7 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
require.NotEqual(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
require.NotEqual(t, 0, len(r.cfg.p2p.Host().Mux().Protocols()))
|
||||
|
||||
// Both pubsub and rpc topcis should be unsubscribed.
|
||||
// Both pubsub and rpc topics should be unsubscribed.
|
||||
require.NoError(t, r.Stop())
|
||||
|
||||
// Sleep to allow pubsub topics to be deregistered.
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"batch.go",
|
||||
"blob.go",
|
||||
"cache.go",
|
||||
"error.go",
|
||||
@@ -45,6 +46,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"batch_test.go",
|
||||
"blob_test.go",
|
||||
"cache_test.go",
|
||||
"initializer_test.go",
|
||||
@@ -69,5 +71,6 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package initialsync
|
||||
package verification
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
)
|
||||
@@ -20,21 +18,17 @@ var (
|
||||
ErrBatchBlockRootMismatch = errors.New("Sidecar block header root does not match signed block")
|
||||
)
|
||||
|
||||
func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.NewBlobVerifier {
|
||||
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
|
||||
return ini.NewBlobVerifier(b, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
func newBlobBatchVerifier(newVerifier verification.NewBlobVerifier) *BlobBatchVerifier {
|
||||
// NewBlobBatchVerifier initializes a blob batch verifier. It requires the caller to correctly specify
|
||||
// verification Requirements and to also pass in a NewBlobVerifier, which is a callback function that
|
||||
// returns a new BlobVerifier for handling a single blob in the batch.
|
||||
func NewBlobBatchVerifier(newVerifier NewBlobVerifier, reqs []Requirement) *BlobBatchVerifier {
|
||||
return &BlobBatchVerifier{
|
||||
verifyKzg: kzg.Verify,
|
||||
newVerifier: newVerifier,
|
||||
reqs: reqs,
|
||||
}
|
||||
}
|
||||
|
||||
type kzgVerifier func(b ...blocks.ROBlob) error
|
||||
|
||||
// BlobBatchVerifier solves problems that come from verifying batches of blobs from RPC.
|
||||
// First: we only update forkchoice after the entire batch has completed, so the n+1 elements in the batch
|
||||
// won't be in forkchoice yet.
|
||||
@@ -42,18 +36,17 @@ type kzgVerifier func(b ...blocks.ROBlob) error
|
||||
// method to BlobVerifier to verify the kzg commitments of all blob sidecars for a block together, then using the cached
|
||||
// result of the batch verification when verifying the individual blobs.
|
||||
type BlobBatchVerifier struct {
|
||||
verifyKzg kzgVerifier
|
||||
newVerifier verification.NewBlobVerifier
|
||||
verifyKzg roblobCommitmentVerifier
|
||||
newVerifier NewBlobVerifier
|
||||
reqs []Requirement
|
||||
}
|
||||
|
||||
var _ das.BlobBatchVerifier = &BlobBatchVerifier{}
|
||||
|
||||
// VerifiedROBlobs satisfies the das.BlobBatchVerifier interface, used by das.AvailabilityStore.
|
||||
func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.ROBlock, scs []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) {
|
||||
if len(scs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
// We assume the proposer was validated wrt the block in batch block processing before performing the DA check.
|
||||
|
||||
// We assume the proposer is validated wrt the block in batch block processing before performing the DA check.
|
||||
// So at this stage we just need to make sure the value being signed and signature bytes match the block.
|
||||
for i := range scs {
|
||||
if blk.Signature() != bytesutil.ToBytes96(scs[i].SignedBlockHeader.Signature) {
|
||||
@@ -71,7 +64,7 @@ func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.
|
||||
}
|
||||
vs := make([]blocks.VerifiedROBlob, len(scs))
|
||||
for i := range scs {
|
||||
vb, err := batch.verifyOneBlob(ctx, scs[i])
|
||||
vb, err := batch.verifyOneBlob(scs[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -80,13 +73,13 @@ func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
func (batch *BlobBatchVerifier) verifyOneBlob(ctx context.Context, sc blocks.ROBlob) (blocks.VerifiedROBlob, error) {
|
||||
func (batch *BlobBatchVerifier) verifyOneBlob(sc blocks.ROBlob) (blocks.VerifiedROBlob, error) {
|
||||
vb := blocks.VerifiedROBlob{}
|
||||
bv := batch.newVerifier(sc, verification.InitsyncSidecarRequirements)
|
||||
bv := batch.newVerifier(sc, batch.reqs)
|
||||
// We can satisfy the following 2 requirements immediately because VerifiedROBlobs always verifies commitments
|
||||
// and block signature for all blobs in the batch before calling verifyOneBlob.
|
||||
bv.SatisfyRequirement(verification.RequireSidecarKzgProofVerified)
|
||||
bv.SatisfyRequirement(verification.RequireValidProposerSignature)
|
||||
bv.SatisfyRequirement(RequireSidecarKzgProofVerified)
|
||||
bv.SatisfyRequirement(RequireValidProposerSignature)
|
||||
|
||||
if err := bv.BlobIndexInBounds(); err != nil {
|
||||
return vb, err
|
||||
189
beacon-chain/verification/batch_test.go
Normal file
189
beacon-chain/verification/batch_test.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBatchVerifier(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mockCV := func(err error) roblobCommitmentVerifier {
|
||||
return func(...blocks.ROBlob) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var invCmtErr = errors.New("mock invalid commitment")
|
||||
type vbcbt func() (blocks.VerifiedROBlob, error)
|
||||
vbcb := func(bl blocks.ROBlob, err error) vbcbt {
|
||||
return func() (blocks.VerifiedROBlob, error) {
|
||||
return blocks.VerifiedROBlob{ROBlob: bl}, err
|
||||
}
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
nv func() NewBlobVerifier
|
||||
cv roblobCommitmentVerifier
|
||||
bandb func(t *testing.T, n int) (blocks.ROBlock, []blocks.ROBlob)
|
||||
err error
|
||||
nblobs int
|
||||
reqs []Requirement
|
||||
}{
|
||||
{
|
||||
name: "no blobs",
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
},
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: vbcb(bl, nil)}
|
||||
}
|
||||
},
|
||||
nblobs: 0,
|
||||
},
|
||||
{
|
||||
name: "happy path",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: vbcb(bl, nil)}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
},
|
||||
nblobs: 3,
|
||||
},
|
||||
{
|
||||
name: "partial batch",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: vbcb(bl, nil)}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
// Add extra blobs to the block that we won't return
|
||||
blk, blbs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb+3)
|
||||
return blk, blbs[0:3]
|
||||
},
|
||||
nblobs: 3,
|
||||
},
|
||||
{
|
||||
name: "invalid commitment",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
}
|
||||
},
|
||||
cv: mockCV(invCmtErr),
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
},
|
||||
err: invCmtErr,
|
||||
nblobs: 1,
|
||||
},
|
||||
{
|
||||
name: "signature mismatch",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
blk, blbs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
blbs[0].SignedBlockHeader.Signature = []byte("wrong")
|
||||
return blk, blbs
|
||||
},
|
||||
err: ErrBatchSignatureMismatch,
|
||||
nblobs: 2,
|
||||
},
|
||||
{
|
||||
name: "root mismatch",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
blk, blbs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
wr, err := blocks.NewROBlobWithRoot(blbs[0].BlobSidecar, bytesutil.ToBytes32([]byte("wrong")))
|
||||
require.NoError(t, err)
|
||||
blbs[0] = wr
|
||||
return blk, blbs
|
||||
},
|
||||
err: ErrBatchBlockRootMismatch,
|
||||
nblobs: 1,
|
||||
},
|
||||
{
|
||||
name: "idx oob",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{
|
||||
ErrBlobIndexInBounds: ErrBlobIndexInvalid,
|
||||
cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
},
|
||||
nblobs: 1,
|
||||
err: ErrBlobIndexInvalid,
|
||||
},
|
||||
{
|
||||
name: "inclusion proof invalid",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{
|
||||
ErrSidecarInclusionProven: ErrSidecarInclusionProofInvalid,
|
||||
cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
return util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, nb)
|
||||
},
|
||||
nblobs: 1,
|
||||
err: ErrSidecarInclusionProofInvalid,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
blk, blbs := c.bandb(t, c.nblobs)
|
||||
reqs := c.reqs
|
||||
if reqs == nil {
|
||||
reqs = InitsyncSidecarRequirements
|
||||
}
|
||||
bbv := NewBlobBatchVerifier(c.nv(), reqs)
|
||||
if c.cv == nil {
|
||||
bbv.verifyKzg = mockCV(nil)
|
||||
} else {
|
||||
bbv.verifyKzg = c.cv
|
||||
}
|
||||
vb, err := bbv.VerifiedROBlobs(ctx, blk, blbs)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.nblobs, len(vb))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -70,6 +70,9 @@ var InitsyncSidecarRequirements = requirementList(GossipSidecarRequirements).exc
|
||||
// BackfillSidecarRequirements is the same as InitsyncSidecarRequirements.
|
||||
var BackfillSidecarRequirements = requirementList(InitsyncSidecarRequirements).excluding()
|
||||
|
||||
// PendingQueueSidecarRequirements is the same as InitsyncSidecarRequirements, used by the pending blocks queue.
|
||||
var PendingQueueSidecarRequirements = requirementList(InitsyncSidecarRequirements).excluding()
|
||||
|
||||
var (
|
||||
ErrBlobInvalid = errors.New("blob failed verification")
|
||||
// ErrBlobIndexInvalid means RequireBlobIndexInBounds failed.
|
||||
@@ -123,7 +126,7 @@ func (bv *ROBlobVerifier) VerifiedROBlob() (blocks.VerifiedROBlob, error) {
|
||||
// For example, when batch syncing, forkchoice is only updated at the end of the batch. So the checks that use
|
||||
// forkchoice, like descends from finalized or parent seen, would necessarily fail. Allowing the caller to
|
||||
// assert the requirement has been satisfied ensures we have an easy way to audit which piece of code is satisfying
|
||||
// a requireent outside of this package.
|
||||
// a requirement outside of this package.
|
||||
func (bv *ROBlobVerifier) SatisfyRequirement(req Requirement) {
|
||||
bv.recordResult(req, nil)
|
||||
}
|
||||
@@ -160,6 +163,7 @@ func (bv *ROBlobVerifier) NotFromFutureSlot() (err error) {
|
||||
earliestStart := bv.clock.SlotStart(bv.blob.Slot()).Add(-1 * params.BeaconConfig().MaximumGossipClockDisparityDuration())
|
||||
// If the system time is still before earliestStart, we consider the blob from a future slot and return an error.
|
||||
if bv.clock.Now().Before(earliestStart) {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("sidecar slot is too far in the future")
|
||||
return ErrFromFutureSlot
|
||||
}
|
||||
return nil
|
||||
@@ -176,6 +180,7 @@ func (bv *ROBlobVerifier) SlotAboveFinalized() (err error) {
|
||||
return errors.Wrapf(ErrSlotNotAfterFinalized, "error computing epoch start slot for finalized checkpoint (%d) %s", fcp.Epoch, err.Error())
|
||||
}
|
||||
if bv.blob.Slot() <= fSlot {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("sidecar slot is not after finalized checkpoint")
|
||||
return ErrSlotNotAfterFinalized
|
||||
}
|
||||
return nil
|
||||
@@ -225,6 +230,7 @@ func (bv *ROBlobVerifier) SidecarParentSeen(parentSeen func([32]byte) bool) (err
|
||||
if bv.fc.HasNode(bv.blob.ParentRoot()) {
|
||||
return nil
|
||||
}
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("parent root has not been seen")
|
||||
return ErrSidecarParentNotSeen
|
||||
}
|
||||
|
||||
@@ -233,6 +239,7 @@ func (bv *ROBlobVerifier) SidecarParentSeen(parentSeen func([32]byte) bool) (err
|
||||
func (bv *ROBlobVerifier) SidecarParentValid(badParent func([32]byte) bool) (err error) {
|
||||
defer bv.recordResult(RequireSidecarParentValid, &err)
|
||||
if badParent != nil && badParent(bv.blob.ParentRoot()) {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("parent root is invalid")
|
||||
return ErrSidecarParentInvalid
|
||||
}
|
||||
return nil
|
||||
@@ -258,6 +265,7 @@ func (bv *ROBlobVerifier) SidecarParentSlotLower() (err error) {
|
||||
func (bv *ROBlobVerifier) SidecarDescendsFromFinalized() (err error) {
|
||||
defer bv.recordResult(RequireSidecarDescendsFromFinalized, &err)
|
||||
if !bv.fc.HasNode(bv.blob.ParentRoot()) {
|
||||
log.WithFields(logging.BlobFields(bv.blob)).Debug("parent root not in forkchoice")
|
||||
return ErrSidecarNotFinalizedDescendent
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -39,7 +39,7 @@ func TestResultList(t *testing.T) {
|
||||
func TestExportedBlobSanityCheck(t *testing.T) {
|
||||
// make sure all requirement lists contain the bare minimum checks
|
||||
sanity := []Requirement{RequireValidProposerSignature, RequireSidecarKzgProofVerified, RequireBlobIndexInBounds, RequireSidecarInclusionProven}
|
||||
reqs := [][]Requirement{GossipSidecarRequirements, SpectestSidecarRequirements, InitsyncSidecarRequirements, BackfillSidecarRequirements}
|
||||
reqs := [][]Requirement{GossipSidecarRequirements, SpectestSidecarRequirements, InitsyncSidecarRequirements, BackfillSidecarRequirements, PendingQueueSidecarRequirements}
|
||||
for i := range reqs {
|
||||
r := reqs[i]
|
||||
reqMap := make(map[Requirement]struct{})
|
||||
|
||||
@@ -29,6 +29,7 @@ var (
|
||||
Name: "local-block-value-boost",
|
||||
Usage: "A percentage boost for local block construction as a Uint64. This is used to prioritize local block construction over relay/builder block construction" +
|
||||
"Boost is an additional percentage to multiple local block value. Use builder block if: builder_bid_value * 100 > local_block_value * (local-block-value-boost + 100)",
|
||||
Value: 10,
|
||||
}
|
||||
// ExecutionEngineEndpoint provides an HTTP access endpoint to connect to an execution client on the execution layer
|
||||
ExecutionEngineEndpoint = &cli.StringFlag{
|
||||
|
||||
@@ -90,6 +90,7 @@ var appFlags = []cli.Flag{
|
||||
cmd.StaticPeers,
|
||||
cmd.RelayNode,
|
||||
cmd.P2PUDPPort,
|
||||
cmd.P2PQUICPort,
|
||||
cmd.P2PTCPPort,
|
||||
cmd.P2PIP,
|
||||
cmd.P2PHost,
|
||||
|
||||
@@ -9,7 +9,7 @@ var (
|
||||
backfillWorkerCountName = "backfill-worker-count"
|
||||
|
||||
// EnableExperimentalBackfill enables backfill for checkpoint synced nodes.
|
||||
// This flag will be removed onced backfill is enabled by default.
|
||||
// This flag will be removed once backfill is enabled by default.
|
||||
EnableExperimentalBackfill = &cli.BoolFlag{
|
||||
Name: "enable-experimental-backfill",
|
||||
Usage: "Backfill is still experimental at this time. " +
|
||||
|
||||
@@ -55,6 +55,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
cmd.BootstrapNode,
|
||||
cmd.RelayNode,
|
||||
cmd.P2PUDPPort,
|
||||
cmd.P2PQUICPort,
|
||||
cmd.P2PTCPPort,
|
||||
cmd.DataDirFlag,
|
||||
cmd.VerbosityFlag,
|
||||
|
||||
12
cmd/flags.go
12
cmd/flags.go
@@ -112,13 +112,19 @@ var (
|
||||
// P2PUDPPort defines the port to be used by discv5.
|
||||
P2PUDPPort = &cli.IntFlag{
|
||||
Name: "p2p-udp-port",
|
||||
Usage: "The port used by discv5.",
|
||||
Usage: "The UDP port used by the discovery service discv5.",
|
||||
Value: 12000,
|
||||
}
|
||||
// P2PTCPPort defines the port to be used by libp2p.
|
||||
// P2PQUICPort defines the QUIC port to be used by libp2p.
|
||||
P2PQUICPort = &cli.IntFlag{
|
||||
Name: "p2p-quic-port",
|
||||
Usage: "The QUIC port used by libp2p.",
|
||||
Value: 13000,
|
||||
}
|
||||
// P2PTCPPort defines the TCP port to be used by libp2p.
|
||||
P2PTCPPort = &cli.IntFlag{
|
||||
Name: "p2p-tcp-port",
|
||||
Usage: "The port used by libp2p.",
|
||||
Usage: "The TCP port used by libp2p.",
|
||||
Value: 13000,
|
||||
}
|
||||
// P2PIP defines the local IP to be used by libp2p.
|
||||
|
||||
@@ -6,13 +6,18 @@ go_library(
|
||||
"buckets.go",
|
||||
"cmd.go",
|
||||
"query.go",
|
||||
"span.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/cmd/prysmctl/db",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/slasher:go_default_library",
|
||||
"//beacon-chain/slasher/types:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_jedib0t_go_pretty_v6//table:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
|
||||
@@ -9,6 +9,7 @@ var Commands = []*cli.Command{
|
||||
Subcommands: []*cli.Command{
|
||||
queryCmd,
|
||||
bucketsCmd,
|
||||
spanCmd,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
304
cmd/prysmctl/db/span.go
Normal file
304
cmd/prysmctl/db/span.go
Normal file
@@ -0,0 +1,304 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jedib0t/go-pretty/v6/table"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const DefaultChunkKind = types.MinSpan
|
||||
|
||||
var (
|
||||
f = struct {
|
||||
Path string
|
||||
ValidatorIndex uint64
|
||||
Epoch uint64
|
||||
ChunkKind string
|
||||
ChunkSize uint64
|
||||
ValidatorChunkSize uint64
|
||||
HistoryLength uint64
|
||||
IsDisplayAllValidatorsInChunk bool
|
||||
IsDisplayAllEpochsInChunk bool
|
||||
}{}
|
||||
|
||||
slasherDefaultParams = slasher.DefaultParams()
|
||||
)
|
||||
|
||||
var spanCmd = &cli.Command{
|
||||
Name: "slasher-span-display",
|
||||
Usage: "visualise values in db span bucket",
|
||||
Action: func(c *cli.Context) error {
|
||||
if err := spanAction(c); err != nil {
|
||||
return errors.Wrapf(err, "visualise values in db span bucket failed")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "db-path-directory",
|
||||
Usage: "path to directory containing slasher.db",
|
||||
Destination: &f.Path,
|
||||
Required: true,
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "validator-index",
|
||||
Usage: "filter by validator index",
|
||||
Destination: &f.ValidatorIndex,
|
||||
Required: true,
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "epoch",
|
||||
Usage: "filter by epoch",
|
||||
Destination: &f.Epoch,
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "chunk-kind",
|
||||
Usage: "chunk kind to query (maxspan|minspan)",
|
||||
Destination: &f.ChunkKind,
|
||||
Value: DefaultChunkKind.String(),
|
||||
DefaultText: DefaultChunkKind.String(),
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "chunk-size",
|
||||
Usage: "chunk size to query",
|
||||
Destination: &f.ChunkSize,
|
||||
DefaultText: fmt.Sprintf("%d", slasherDefaultParams.ChunkSize()),
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "validator-chunk-size",
|
||||
Usage: "validator chunk size to query",
|
||||
Destination: &f.ValidatorChunkSize,
|
||||
DefaultText: fmt.Sprintf("%d", slasherDefaultParams.ValidatorChunkSize()),
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "history-length",
|
||||
Usage: "history length to query",
|
||||
Destination: &f.HistoryLength,
|
||||
DefaultText: fmt.Sprintf("%d", slasherDefaultParams.HistoryLength()),
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "display-all-validators-in-chunk",
|
||||
Usage: "display all validators in chunk",
|
||||
Destination: &f.IsDisplayAllValidatorsInChunk,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "display-all-epochs-in-chunk",
|
||||
Usage: "display all epochs in chunk",
|
||||
Destination: &f.IsDisplayAllEpochsInChunk,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func spanAction(cliCtx *cli.Context) error {
|
||||
var (
|
||||
chunk slasher.Chunker
|
||||
validatorChunkIdx uint64
|
||||
lastEpochForValidatorIndex primitives.Epoch
|
||||
|
||||
err error
|
||||
)
|
||||
|
||||
// context
|
||||
ctx := cliCtx.Context
|
||||
|
||||
// variables
|
||||
chunkKind := getChunkKind()
|
||||
params := getSlasherParams()
|
||||
i := primitives.ValidatorIndex(f.ValidatorIndex)
|
||||
epoch := primitives.Epoch(f.Epoch)
|
||||
|
||||
// display configuration
|
||||
fmt.Printf("############################# SLASHER PARAMS ###############################\n")
|
||||
fmt.Printf("# Chunk Size: %d\n", params.ChunkSize())
|
||||
fmt.Printf("# Validator Chunk Size: %d\n", params.ValidatorChunkSize())
|
||||
fmt.Printf("# History Length: %d\n", params.HistoryLength())
|
||||
fmt.Printf("# DB: %s\n", f.Path)
|
||||
fmt.Printf("# Chunk Kind: %s\n", chunkKind)
|
||||
fmt.Printf("# Validator: %d\n", i)
|
||||
fmt.Printf("# Epoch: %d\n", epoch)
|
||||
fmt.Printf("############################################################################\n")
|
||||
|
||||
// fetch chunk in database
|
||||
if lastEpochForValidatorIndex, _, validatorChunkIdx, chunk, err = slasher.GetChunkFromDatabase(
|
||||
ctx,
|
||||
f.Path,
|
||||
slasher.GetChunkFromDatabaseFilters{
|
||||
ChunkKind: chunkKind,
|
||||
ValidatorIndex: i,
|
||||
SourceEpoch: epoch,
|
||||
IsDisplayAllValidatorsInChunk: f.IsDisplayAllValidatorsInChunk,
|
||||
IsDisplayAllEpochsInChunk: f.IsDisplayAllEpochsInChunk,
|
||||
},
|
||||
params,
|
||||
); err != nil {
|
||||
return errors.Wrapf(err, "could not get chunk from database")
|
||||
}
|
||||
|
||||
// fetch information related to chunk
|
||||
fmt.Printf("\n################################ CHUNK #####################################\n")
|
||||
firstValidator := params.ValidatorIndexesInChunk(validatorChunkIdx)[0]
|
||||
firstEpoch := epoch - (epoch.Mod(params.ChunkSize()))
|
||||
fmt.Printf("# First validator in chunk: %d\n", firstValidator)
|
||||
fmt.Printf("# First epoch in chunk: %d\n\n", firstEpoch)
|
||||
fmt.Printf("# Last epoch found in database for validator(%d): %d\n", i, lastEpochForValidatorIndex)
|
||||
fmt.Printf("############################################################################\n\n")
|
||||
|
||||
// init table
|
||||
tw := table.NewWriter()
|
||||
|
||||
minLowerBound := lastEpochForValidatorIndex.Sub(uint64(params.HistoryLength()))
|
||||
if f.IsDisplayAllValidatorsInChunk {
|
||||
if f.IsDisplayAllEpochsInChunk {
|
||||
// display all validators and epochs in chunk
|
||||
|
||||
// headers
|
||||
addEpochsHeader(tw, params.ChunkSize(), firstEpoch)
|
||||
|
||||
// rows
|
||||
b := chunk.Chunk()
|
||||
c := uint64(0)
|
||||
for z := uint64(0); z < uint64(len(b)); z += params.ChunkSize() {
|
||||
end := z + params.ChunkSize()
|
||||
if end > uint64(len(b)) {
|
||||
end = uint64(len(b))
|
||||
}
|
||||
subChunk := b[z:end]
|
||||
|
||||
row := make(table.Row, params.ChunkSize()+1)
|
||||
title := firstValidator + primitives.ValidatorIndex(c)
|
||||
row[0] = title
|
||||
for y, span := range subChunk {
|
||||
row[y+1] = getSpanOrNonApplicable(firstEpoch, y, minLowerBound, lastEpochForValidatorIndex, span)
|
||||
}
|
||||
tw.AppendRow(row)
|
||||
|
||||
c++
|
||||
}
|
||||
} else {
|
||||
// display all validators but only the requested epoch in chunk
|
||||
indexEpochInChunk := epoch - firstEpoch
|
||||
|
||||
// headers
|
||||
addEpochsHeader(tw, 1, firstEpoch)
|
||||
|
||||
// rows
|
||||
b := chunk.Chunk()
|
||||
c := uint64(0)
|
||||
for z := uint64(0); z < uint64(len(b)); z += params.ChunkSize() {
|
||||
end := z + params.ChunkSize()
|
||||
if end > uint64(len(b)) {
|
||||
end = uint64(len(b))
|
||||
}
|
||||
subChunk := b[z:end]
|
||||
|
||||
row := make(table.Row, 2)
|
||||
title := firstValidator + primitives.ValidatorIndex(c)
|
||||
row[0] = title
|
||||
row[1] = subChunk[indexEpochInChunk]
|
||||
tw.AppendRow(row)
|
||||
|
||||
c++
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if f.IsDisplayAllEpochsInChunk {
|
||||
// display only the requested validator with all epochs in chunk
|
||||
|
||||
// headers
|
||||
addEpochsHeader(tw, params.ChunkSize(), firstEpoch)
|
||||
|
||||
// rows
|
||||
b := chunk.Chunk()
|
||||
validatorFirstEpochIdx := uint64(i.Mod(params.ValidatorChunkSize())) * params.ChunkSize()
|
||||
subChunk := b[validatorFirstEpochIdx : validatorFirstEpochIdx+params.ChunkSize()]
|
||||
row := make(table.Row, params.ChunkSize()+1)
|
||||
title := i
|
||||
row[0] = title
|
||||
for y, span := range subChunk {
|
||||
row[y+1] = getSpanOrNonApplicable(firstEpoch, y, minLowerBound, lastEpochForValidatorIndex, span)
|
||||
}
|
||||
tw.AppendRow(row)
|
||||
} else {
|
||||
// display only the requested validator and epoch in chunk
|
||||
|
||||
// headers
|
||||
addEpochsHeader(tw, 1, epoch)
|
||||
|
||||
// rows
|
||||
b := chunk.Chunk()
|
||||
validatorFirstEpochIdx := uint64(i.Mod(params.ValidatorChunkSize())) * params.ChunkSize()
|
||||
subChunk := b[validatorFirstEpochIdx : validatorFirstEpochIdx+params.ChunkSize()]
|
||||
row := make(table.Row, 2)
|
||||
title := i
|
||||
row[0] = title
|
||||
indexEpochInChunk := epoch - firstEpoch
|
||||
row[1] = subChunk[indexEpochInChunk]
|
||||
tw.AppendRow(row)
|
||||
}
|
||||
}
|
||||
|
||||
// display table
|
||||
displayTable(tw)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getSpanOrNonApplicable checks if there's some epoch that are not correct in chunk due to the round robin
|
||||
// nature of 2D chunking when an epoch gets overwritten by an epoch eg. (params.historyLength + next_epoch) > params.historyLength
|
||||
// if we are out of the range, we display a n/a value otherwise the span value
|
||||
func getSpanOrNonApplicable(firstEpoch primitives.Epoch, y int, minLowerBound primitives.Epoch, lastEpochForValidatorIndex primitives.Epoch, span uint16) string {
|
||||
if firstEpoch.Add(uint64(y)) < minLowerBound || firstEpoch.Add(uint64(y)) > lastEpochForValidatorIndex {
|
||||
return "-"
|
||||
}
|
||||
return fmt.Sprintf("%d", span)
|
||||
}
|
||||
|
||||
func displayTable(tw table.Writer) {
|
||||
tw.AppendSeparator()
|
||||
fmt.Println(tw.Render())
|
||||
}
|
||||
|
||||
func addEpochsHeader(tw table.Writer, nbEpoch uint64, firstEpoch primitives.Epoch) {
|
||||
header := table.Row{"Validator / Epoch"}
|
||||
for y := 0; uint64(y) < nbEpoch; y++ {
|
||||
header = append(header, firstEpoch+primitives.Epoch(y))
|
||||
}
|
||||
tw.AppendHeader(header)
|
||||
}
|
||||
|
||||
func getChunkKind() types.ChunkKind {
|
||||
chunkKind := types.MinSpan
|
||||
if f.ChunkKind == "maxspan" {
|
||||
chunkKind = types.MaxSpan
|
||||
}
|
||||
return chunkKind
|
||||
}
|
||||
|
||||
func getSlasherParams() *slasher.Parameters {
|
||||
var (
|
||||
chunkSize, validatorChunkSize uint64
|
||||
historyLength primitives.Epoch
|
||||
)
|
||||
if f.ChunkSize != 0 && f.ChunkSize != slasherDefaultParams.ChunkSize() {
|
||||
chunkSize = f.ChunkSize
|
||||
} else {
|
||||
chunkSize = slasherDefaultParams.ChunkSize()
|
||||
}
|
||||
if f.ValidatorChunkSize != 0 && f.ValidatorChunkSize != slasherDefaultParams.ValidatorChunkSize() {
|
||||
validatorChunkSize = f.ValidatorChunkSize
|
||||
} else {
|
||||
validatorChunkSize = slasherDefaultParams.ValidatorChunkSize()
|
||||
}
|
||||
if f.HistoryLength != 0 && f.HistoryLength != uint64(slasherDefaultParams.HistoryLength()) {
|
||||
historyLength = primitives.Epoch(f.HistoryLength)
|
||||
} else {
|
||||
historyLength = slasherDefaultParams.HistoryLength()
|
||||
}
|
||||
return slasher.NewParams(chunkSize, validatorChunkSize, historyLength)
|
||||
}
|
||||
@@ -43,6 +43,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -14,7 +14,8 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
||||
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
@@ -43,7 +44,7 @@ type client struct {
|
||||
nodeClient pb.NodeClient
|
||||
}
|
||||
|
||||
func newClient(beaconEndpoints []string, clientPort uint) (*client, error) {
|
||||
func newClient(beaconEndpoints []string, tcpPort, quicPort uint) (*client, error) {
|
||||
ipAdd := ipAddr()
|
||||
priv, err := privKey()
|
||||
if err != nil {
|
||||
@@ -53,15 +54,16 @@ func newClient(beaconEndpoints []string, clientPort uint) (*client, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set up p2p metadata")
|
||||
}
|
||||
listen, err := p2p.MultiAddressBuilder(ipAdd.String(), clientPort)
|
||||
multiaddrs, err := p2p.MultiAddressBuilder(ipAdd, tcpPort, quicPort)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set up listening multiaddr")
|
||||
}
|
||||
options := []libp2p.Option{
|
||||
privKeyOption(priv),
|
||||
libp2p.ListenAddrs(listen),
|
||||
libp2p.ListenAddrs(multiaddrs...),
|
||||
libp2p.UserAgent(version.BuildData()),
|
||||
libp2p.Transport(tcp.NewTCPTransport),
|
||||
libp2p.Transport(libp2pquic.NewTransport),
|
||||
libp2p.Transport(libp2ptcp.NewTCPTransport),
|
||||
}
|
||||
options = append(options, libp2p.Security(noise.ID, noise.New))
|
||||
options = append(options, libp2p.Ping(false))
|
||||
|
||||
@@ -22,11 +22,12 @@ import (
|
||||
)
|
||||
|
||||
var requestBlobsFlags = struct {
|
||||
Peers string
|
||||
ClientPort uint
|
||||
APIEndpoints string
|
||||
StartSlot uint64
|
||||
Count uint64
|
||||
Peers string
|
||||
ClientPortTCP uint
|
||||
ClientPortQUIC uint
|
||||
APIEndpoints string
|
||||
StartSlot uint64
|
||||
Count uint64
|
||||
}{}
|
||||
|
||||
var requestBlobsCmd = &cli.Command{
|
||||
@@ -47,9 +48,16 @@ var requestBlobsCmd = &cli.Command{
|
||||
Value: "",
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "client-port",
|
||||
Usage: "port to use for the client as a libp2p host",
|
||||
Destination: &requestBlobsFlags.ClientPort,
|
||||
Name: "client-port-tcp",
|
||||
Aliases: []string{"client-port"},
|
||||
Usage: "TCP port to use for the client as a libp2p host",
|
||||
Destination: &requestBlobsFlags.ClientPortTCP,
|
||||
Value: 13001,
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "client-port-quic",
|
||||
Usage: "QUIC port to use for the client as a libp2p host",
|
||||
Destination: &requestBlobsFlags.ClientPortQUIC,
|
||||
Value: 13001,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@@ -60,13 +68,13 @@ var requestBlobsCmd = &cli.Command{
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "start-slot",
|
||||
Usage: "start slot for blocks by range request. If unset, will use start_slot(current_epoch-1)",
|
||||
Usage: "start slot for blobs by range request. If unset, will use start_slot(current_epoch-1)",
|
||||
Destination: &requestBlobsFlags.StartSlot,
|
||||
Value: 0,
|
||||
},
|
||||
&cli.Uint64Flag{
|
||||
Name: "count",
|
||||
Usage: "number of blocks to request, (default 32)",
|
||||
Usage: "number of blobs to request, (default 32)",
|
||||
Destination: &requestBlobsFlags.Count,
|
||||
Value: 32,
|
||||
},
|
||||
@@ -90,7 +98,7 @@ func cliActionRequestBlobs(cliCtx *cli.Context) error {
|
||||
allAPIEndpoints = strings.Split(requestBlobsFlags.APIEndpoints, ",")
|
||||
}
|
||||
var err error
|
||||
c, err := newClient(allAPIEndpoints, requestBlobsFlags.ClientPort)
|
||||
c, err := newClient(allAPIEndpoints, requestBlobsFlags.ClientPortTCP, requestBlobsFlags.ClientPortQUIC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -23,12 +23,14 @@ import (
|
||||
)
|
||||
|
||||
var requestBlocksFlags = struct {
|
||||
Peers string
|
||||
ClientPort uint
|
||||
APIEndpoints string
|
||||
StartSlot uint64
|
||||
Count uint64
|
||||
Step uint64
|
||||
Network string
|
||||
Peers string
|
||||
ClientPortTCP uint
|
||||
ClientPortQUIC uint
|
||||
APIEndpoints string
|
||||
StartSlot uint64
|
||||
Count uint64
|
||||
Step uint64
|
||||
}{}
|
||||
|
||||
var requestBlocksCmd = &cli.Command{
|
||||
@@ -42,6 +44,12 @@ var requestBlocksCmd = &cli.Command{
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
cmd.ChainConfigFileFlag,
|
||||
&cli.StringFlag{
|
||||
Name: "network",
|
||||
Usage: "network to run on (mainnet, sepolia, holesky)",
|
||||
Destination: &requestBlocksFlags.Network,
|
||||
Value: "mainnet",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "peer-multiaddrs",
|
||||
Usage: "comma-separated, peer multiaddr(s) to connect to for p2p requests",
|
||||
@@ -49,9 +57,16 @@ var requestBlocksCmd = &cli.Command{
|
||||
Value: "",
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "client-port",
|
||||
Usage: "port to use for the client as a libp2p host",
|
||||
Destination: &requestBlocksFlags.ClientPort,
|
||||
Name: "client-port-tcp",
|
||||
Aliases: []string{"client-port"},
|
||||
Usage: "TCP port to use for the client as a libp2p host",
|
||||
Destination: &requestBlocksFlags.ClientPortTCP,
|
||||
Value: 13001,
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "client-port-quic",
|
||||
Usage: "QUIC port to use for the client as a libp2p host",
|
||||
Destination: &requestBlocksFlags.ClientPortQUIC,
|
||||
Value: 13001,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@@ -82,6 +97,21 @@ var requestBlocksCmd = &cli.Command{
|
||||
}
|
||||
|
||||
func cliActionRequestBlocks(cliCtx *cli.Context) error {
|
||||
switch requestBlocksFlags.Network {
|
||||
case params.SepoliaName:
|
||||
if err := params.SetActive(params.SepoliaConfig()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
case params.HoleskyName:
|
||||
if err := params.SetActive(params.HoleskyConfig()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
case params.MainnetName:
|
||||
// Do nothing
|
||||
default:
|
||||
log.Fatalf("Unknown network provided: %s", requestBlocksFlags.Network)
|
||||
}
|
||||
|
||||
if cliCtx.IsSet(cmd.ChainConfigFileFlag.Name) {
|
||||
chainConfigFileName := cliCtx.String(cmd.ChainConfigFileFlag.Name)
|
||||
if err := params.LoadChainConfigFile(chainConfigFileName, nil); err != nil {
|
||||
@@ -98,7 +128,7 @@ func cliActionRequestBlocks(cliCtx *cli.Context) error {
|
||||
allAPIEndpoints = strings.Split(requestBlocksFlags.APIEndpoints, ",")
|
||||
}
|
||||
var err error
|
||||
c, err := newClient(allAPIEndpoints, requestBlocksFlags.ClientPort)
|
||||
c, err := newClient(allAPIEndpoints, requestBlocksFlags.ClientPortTCP, requestBlocksFlags.ClientPortQUIC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -312,7 +312,7 @@ var (
|
||||
Usage: "(Work in progress): Enables the web portal for the validator client.",
|
||||
Value: false,
|
||||
}
|
||||
// SlashingProtectionExportDirFlag allows specifying the outpt directory
|
||||
// SlashingProtectionExportDirFlag allows specifying the output directory
|
||||
// for a validator's slashing protection history.
|
||||
SlashingProtectionExportDirFlag = &cli.StringFlag{
|
||||
Name: "slashing-protection-export-dir",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
@@ -34,10 +35,7 @@ func TestAllFlagsExistInHelp(t *testing.T) {
|
||||
}
|
||||
|
||||
func doesFlagExist(flag cli.Flag, flags []cli.Flag) bool {
|
||||
for _, f := range flags {
|
||||
if f.String() == flag.String() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.ContainsFunc(flags, func(f cli.Flag) bool {
|
||||
return f.String() == flag.String()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -109,8 +109,8 @@ var (
|
||||
}
|
||||
enableDoppelGangerProtection = &cli.BoolFlag{
|
||||
Name: "enable-doppelganger",
|
||||
Usage: `Enables the validator to perform a doppelganger check.
|
||||
This is not "a foolproof method to find duplicate instances in the network.
|
||||
Usage: `Enables the validator to perform a doppelganger check.
|
||||
This is not a foolproof method to find duplicate instances in the network.
|
||||
Your validator will still be vulnerable if it is being run in unsafe configurations.`,
|
||||
}
|
||||
disableStakinContractCheck = &cli.BoolFlag{
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -401,10 +402,5 @@ func assertYamlFieldsMatch(t *testing.T, name string, fields []string, c1, c2 *p
|
||||
}
|
||||
|
||||
func isPlaceholderField(field string) bool {
|
||||
for _, f := range placeholderFields {
|
||||
if f == field {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(placeholderFields, field)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user