Fixed spelling mistakes in comments (#13833)

This commit is contained in:
Sammy Rosso
2024-04-01 13:12:20 +02:00
committed by GitHub
parent 38f208d70d
commit 2b4bb5d890
33 changed files with 55 additions and 55 deletions

View File

@@ -88,7 +88,7 @@ func TestToggle(t *testing.T) {
}
}
func TestToogleMultipleTimes(t *testing.T) {
func TestToggleMultipleTimes(t *testing.T) {
t.Parallel()
v := New()
@@ -101,16 +101,16 @@ func TestToogleMultipleTimes(t *testing.T) {
expected := i%2 != 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toogle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
t.Fatalf("AtomicBool.Toggle() doesn't work after %d calls, expected: %v, got %v", i, expected, v.IsSet())
}
if pre == v.IsSet() {
t.Fatalf("AtomicBool.Toogle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
t.Fatalf("AtomicBool.Toggle() returned wrong value at the %dth calls, expected: %v, got %v", i, !v.IsSet(), pre)
}
}
}
func TestToogleAfterOverflow(t *testing.T) {
func TestToggleAfterOverflow(t *testing.T) {
t.Parallel()
var value int32 = math.MaxInt32
@@ -122,7 +122,7 @@ func TestToogleAfterOverflow(t *testing.T) {
v.Toggle()
expected := math.MaxInt32%2 == 0
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toogle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toggle() doesn't work after overflow, expected: %v, got %v", expected, v.IsSet())
}
// make sure overflow happened
@@ -135,7 +135,7 @@ func TestToogleAfterOverflow(t *testing.T) {
v.Toggle()
expected = !expected
if v.IsSet() != expected {
t.Fatalf("AtomicBool.Toogle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
t.Fatalf("AtomicBool.Toggle() doesn't work after the second call after overflow, expected: %v, got %v", expected, v.IsSet())
}
}

View File

@@ -63,7 +63,7 @@ func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, err
return results, nil
}
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelisation.
// calculateChunkSize calculates a suitable chunk size for the purposes of parallelization.
func calculateChunkSize(items int) int {
// Start with a simple even split
chunkSize := items / runtime.GOMAXPROCS(0)

View File

@@ -60,7 +60,7 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcu
// logNonCanonicalBlockReceived prints a message informing that the received
// block is not the head of the chain. It requires the caller holds a lock on
// Foprkchoice.
// Forkchoice.
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
if err != nil {

View File

@@ -804,7 +804,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
assert.NoError(t, err)
// Perform this in a non-sensical ordering
// Perform this in a nonsensical ordering
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 10, [32]byte{}, 0))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0))
require.NoError(t, dc.InsertFinalizedDeposits(context.Background(), 3, [32]byte{}, 0))

View File

@@ -784,7 +784,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
depositTrie, err := trie.GenerateTrieFromItems(trieItems, params.BeaconConfig().DepositContractTreeDepth)
assert.NoError(t, err)
// Perform this in a non-sensical ordering
// Perform this in a nonsensical ordering
err = dc.InsertFinalizedDeposits(context.Background(), 1, [32]byte{}, 0)
require.NoError(t, err)
err = dc.InsertFinalizedDeposits(context.Background(), 2, [32]byte{}, 0)

View File

@@ -22,7 +22,7 @@ var balanceCache = cache.NewEffectiveBalanceCache()
// """
// Return the combined effective balance of the ``indices``.
// ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero.
// Math safe up to ~10B ETH, afterwhich this overflows uint64.
// Math safe up to ~10B ETH, after which this overflows uint64.
// """
// return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices])))
func TotalBalance(state state.ReadOnlyValidators, indices []primitives.ValidatorIndex) uint64 {

View File

@@ -59,7 +59,7 @@ func ComputeDomainAndSign(st state.ReadOnlyBeaconState, epoch primitives.Epoch,
return ComputeDomainAndSignWithoutState(st.Fork(), epoch, domain, st.GenesisValidatorsRoot(), obj, key)
}
// ComputeDomainAndSignWithoutState offers the same functionalit as ComputeDomainAndSign without the need to provide a BeaconState.
// ComputeDomainAndSignWithoutState offers the same functionality as ComputeDomainAndSign without the need to provide a BeaconState.
// This is particularly helpful for signing values in tests.
func ComputeDomainAndSignWithoutState(fork *ethpb.Fork, epoch primitives.Epoch, domain [4]byte, vr []byte, obj fssz.HashRoot, key bls.SecretKey) ([]byte, error) {
// EIP-7044: Beginning in Deneb, fix the fork version to Capella for signed exits.

View File

@@ -697,7 +697,7 @@ func decodeSlasherChunk(enc []byte) ([]uint16, error) {
}
// Encode attestation record to bytes.
// The output encoded attestation record consists in the signing root concatened with the compressed attestation record.
// The output encoded attestation record consists in the signing root concatenated with the compressed attestation record.
func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byte, error) {
if att == nil || att.IndexedAttestation == nil {
return []byte{}, errors.New("nil proposal record")
@@ -716,7 +716,7 @@ func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byt
}
// Decode attestation record from bytes.
// The input encoded attestation record consists in the signing root concatened with the compressed attestation record.
// The input encoded attestation record consists in the signing root concatenated with the compressed attestation record.
func decodeAttestationRecord(encoded []byte) (*slashertypes.IndexedAttestationWrapper, error) {
if len(encoded) < rootSize {
return nil, fmt.Errorf("wrong length for encoded attestation record, want minimum %d, got %d", rootSize, len(encoded))

View File

@@ -10,7 +10,7 @@ import (
)
// TestCleanup ensures that the cleanup function unregisters the prometheus.Collection
// also tests the interchangability of the explicit prometheus Register/Unregister
// also tests the interchangeability of the explicit prometheus Register/Unregister
// and the implicit methods within the collector implementation
func TestCleanup(t *testing.T) {
ctx := context.Background()
@@ -32,11 +32,11 @@ func TestCleanup(t *testing.T) {
assert.Equal(t, true, unregistered, "prometheus.Unregister failed to unregister PowchainCollector on final cleanup")
}
// TestCancelation tests that canceling the context passed into
// TestCancellation tests that canceling the context passed into
// NewPowchainCollector cleans everything up as expected. This
// does come at the cost of an extra channel cluttering up
// PowchainCollector, just for this test.
func TestCancelation(t *testing.T) {
func TestCancellation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
pc, err := NewPowchainCollector(ctx)
assert.NoError(t, err, "Unexpected error calling NewPowchainCollector")

View File

@@ -1,7 +1,7 @@
// Package peers provides information about peers at the Ethereum consensus protocol level.
//
// "Protocol level" is the level above the network level, so this layer never sees or interacts with
// (for example) hosts that are uncontactable due to being down, firewalled, etc. Instead, this works
// (for example) hosts that are unreachable due to being down, firewalled, etc. Instead, this works
// with peers that are contactable but may or may not be of the correct fork version, not currently
// required due to the number of current connections, etc.
//
@@ -59,8 +59,8 @@ const (
)
const (
// ColocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
ColocationLimit = 5
// CollocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
CollocationLimit = 5
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
maxLimitBuffer = 150
@@ -548,7 +548,7 @@ func (p *Status) Prune() {
p.store.Lock()
defer p.store.Unlock()
// Default to old method if flag isnt enabled.
// Default to old method if flag isn't enabled.
if !features.Get().EnablePeerScorer {
p.deprecatedPrune()
return
@@ -961,7 +961,7 @@ func (p *Status) isfromBadIP(pid peer.ID) bool {
return true
}
if val, ok := p.ipTracker[ip.String()]; ok {
if val > ColocationLimit {
if val > CollocationLimit {
return true
}
}
@@ -1012,7 +1012,7 @@ func (p *Status) tallyIPTracker() {
}
func sameIP(firstAddr, secondAddr ma.Multiaddr) bool {
// Exit early if we do get nil multiaddresses
// Exit early if we do get nil multi-addresses
if firstAddr == nil || secondAddr == nil {
return false
}

View File

@@ -565,7 +565,7 @@ func TestPeerIPTracker(t *testing.T) {
badIP := "211.227.218.116"
var badPeers []peer.ID
for i := 0; i < peers.ColocationLimit+10; i++ {
for i := 0; i < peers.CollocationLimit+10; i++ {
port := strconv.Itoa(3000 + i)
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
if err != nil {

View File

@@ -208,8 +208,8 @@ func (m *MinSpanChunksSlice) CheckSlashable(
}
if existingAttWrapper == nil {
// This case should normally not happen. If this happen, it means we previously
// recorded in our min/max DB an distance corresponding to an attestaiton, but WITHOUT
// This case should normally not happen. If this happens, it means we previously
// recorded in our min/max DB a distance corresponding to an attestation, but WITHOUT
// recording the attestation itself. As a consequence, we say there is no surrounding vote,
// but we log an error.
fields := logrus.Fields{
@@ -287,8 +287,8 @@ func (m *MaxSpanChunksSlice) CheckSlashable(
}
if existingAttWrapper == nil {
// This case should normally not happen. If this happen, it means we previously
// recorded in our min/max DB an distance corresponding to an attestaiton, but WITHOUT
// This case should normally not happen. If this happens, it means we previously
// recorded in our min/max DB a distance corresponding to an attestation, but WITHOUT
// recording the attestation itself. As a consequence, we say there is no surrounded vote,
// but we log an error.
fields := logrus.Fields{

View File

@@ -1059,7 +1059,7 @@ func Test_updatedChunkByChunkIndex(t *testing.T) {
// Initialize the slasher database.
slasherDB := dbtest.SetupSlasherDB(t)
// Intialize the slasher service.
// Initialize the slasher service.
service := &Service{
params: &Parameters{
chunkSize: tt.chunkSize,
@@ -1502,7 +1502,7 @@ func runAttestationsBenchmark(b *testing.B, s *Service, numAtts, numValidators u
func Benchmark_checkSurroundVotes(b *testing.B) {
const (
// Approximatively the number of Holesky active validators on 2024-02-16
// Approximately the number of Holesky active validators on 2024-02-16
// This number is both a multiple of 32 (the number of slots per epoch) and 256 (the number of validators per chunk)
validatorsCount = 1_638_400
slotsPerEpoch = 32
@@ -1526,7 +1526,7 @@ func Benchmark_checkSurroundVotes(b *testing.B) {
// So for 1_638_400 validators with 32 slots per epoch, we would have 48_000 attestation wrappers per slot.
// With 256 validators per chunk, we would have only 188 modified chunks.
//
// In this benchmark, we use the worst case scenario where attestating validators are evenly splitted across all validators chunks.
// In this benchmark, we use the worst case scenario where attesting validators are evenly split across all validators chunks.
// We also suppose that only one chunk per validator chunk index is modified.
// For one given validator index, multiple chunk indexes could be modified.
//

View File

@@ -79,7 +79,7 @@ func (s *Service) filterAttestations(
continue
}
// If an attestations's target epoch is in the future, we defer processing for later.
// If an attestation's target epoch is in the future, we defer processing for later.
if attWrapper.IndexedAttestation.Data.Target.Epoch > currentEpoch {
validInFuture = append(validInFuture, attWrapper)
continue

View File

@@ -646,7 +646,7 @@ func TestService_BatchRootRequest(t *testing.T) {
b4Root, err := b4.Block.HashTreeRoot()
require.NoError(t, err)
// Send in duplicated roots to also test deduplicaton.
// Send in duplicated roots to also test deduplication.
sentRoots := p2ptypes.BeaconBlockByRootsReq{b2Root, b2Root, b3Root, b3Root, b4Root, b5Root}
expectedRoots := p2ptypes.BeaconBlockByRootsReq{b2Root, b3Root, b4Root, b5Root}

View File

@@ -219,7 +219,7 @@ func TestSyncService_StopCleanly(t *testing.T) {
require.NotEqual(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
require.NotEqual(t, 0, len(r.cfg.p2p.Host().Mux().Protocols()))
// Both pubsub and rpc topcis should be unsubscribed.
// Both pubsub and rpc topics should be unsubscribed.
require.NoError(t, r.Stop())
// Sleep to allow pubsub topics to be deregistered.

View File

@@ -126,7 +126,7 @@ func (bv *ROBlobVerifier) VerifiedROBlob() (blocks.VerifiedROBlob, error) {
// For example, when batch syncing, forkchoice is only updated at the end of the batch. So the checks that use
// forkchoice, like descends from finalized or parent seen, would necessarily fail. Allowing the caller to
// assert the requirement has been satisfied ensures we have an easy way to audit which piece of code is satisfying
// a requireent outside of this package.
// a requirement outside of this package.
func (bv *ROBlobVerifier) SatisfyRequirement(req Requirement) {
bv.recordResult(req, nil)
}

View File

@@ -9,7 +9,7 @@ var (
backfillWorkerCountName = "backfill-worker-count"
// EnableExperimentalBackfill enables backfill for checkpoint synced nodes.
// This flag will be removed onced backfill is enabled by default.
// This flag will be removed once backfill is enabled by default.
EnableExperimentalBackfill = &cli.BoolFlag{
Name: "enable-experimental-backfill",
Usage: "Backfill is still experimental at this time. " +

View File

@@ -312,7 +312,7 @@ var (
Usage: "(Work in progress): Enables the web portal for the validator client.",
Value: false,
}
// SlashingProtectionExportDirFlag allows specifying the outpt directory
// SlashingProtectionExportDirFlag allows specifying the output directory
// for a validator's slashing protection history.
SlashingProtectionExportDirFlag = &cli.StringFlag{
Name: "slashing-protection-export-dir",

View File

@@ -108,7 +108,7 @@ type Settings struct {
DefaultConfig *Option
}
// ShouldBeSaved goes through checks to see if the value should be saveable
// ShouldBeSaved goes through checks to see if the value should be savable
// Pseudocode: conditions for being saved into the database
// 1. settings are not nil
// 2. proposeconfig is not nil (this defines specific settings for each validator key), default config can be nil in this case and fall back to beacon node settings

View File

@@ -24,7 +24,7 @@ type Node[T any] struct {
next *Node[T]
}
// Copy returns a copy of the origina list.
// Copy returns a copy of the original list.
func (l *List[T]) Copy() *List[T] {
if l == nil {
return nil

View File

@@ -10,7 +10,7 @@ import (
// During deepValueEqual, must keep track of checks that are
// in progress. The comparison algorithm assumes that all
// checks in progress are true when it reencounters them.
// checks in progress are true when it re-encounters them.
// Visited comparisons are stored in a map indexed by visit.
type visit struct {
a1 unsafe.Pointer // #nosec G103 -- Test use only
@@ -27,7 +27,7 @@ type visit struct {
// intricacies when determining equality of empty values.
//
// Tests for deep equality using reflected types. The map argument tracks
// comparisons that have already been seen, which allows short circuiting on
// comparisons that have already been seen, which allows short-circuiting on
// recursive types.
func deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool {
if !v1.IsValid() || !v2.IsValid() {
@@ -273,7 +273,7 @@ func deepValueBaseTypeEqual(v1, v2 reflect.Value) bool {
// In general DeepEqual is a recursive relaxation of Go's == operator.
// However, this idea is impossible to implement without some inconsistency.
// Specifically, it is possible for a value to be unequal to itself,
// either because it is of func type (uncomparable in general)
// either because it is of func type (incomparable in general)
// or because it is a floating-point NaN value (not equal to itself in floating-point comparison),
// or because it is an array, struct, or interface containing
// such a value.

View File

@@ -1133,7 +1133,7 @@ fi
if [ "$ft_search" = "1" ];
then
# detect bower comoponents location
# detect bower components location
bower_components="bower_components"
bower_rc=$(cd "$git_root" && cat .bowerrc 2>/dev/null || echo "")
if [ "$bower_rc" != "" ];

View File

@@ -20,7 +20,7 @@ type APIMessage struct {
// CommonStats represent generic metrics that are expected on both
// beaconnode and validator metric types. This type is used for
// marshaling metrics to the POST body sent to the metrics collcetor.
// marshaling metrics to the POST body sent to the metrics collector.
// Note that some metrics are labeled NA because they are expected
// to be present with their zero-value when not supported by a client.
type CommonStats struct {
@@ -42,7 +42,7 @@ type CommonStats struct {
// BeaconNodeStats embeds CommonStats and represents metrics specific to
// the beacon-node process. This type is used to marshal metrics data
// to the POST body sent to the metrics collcetor. To make the connection
// to the POST body sent to the metrics collector. To make the connection
// to client-stats clear, BeaconNodeStats is also used by prometheus
// collection code introduced to support client-stats.
// Note that some metrics are labeled NA because they are expected
@@ -66,7 +66,7 @@ type BeaconNodeStats struct {
// ValidatorStats embeds CommonStats and represents metrics specific to
// the validator process. This type is used to marshal metrics data
// to the POST body sent to the metrics collcetor.
// to the POST body sent to the metrics collector.
// Note that some metrics are labeled NA because they are expected
// to be present with their zero-value when not supported by a client.
type ValidatorStats struct {

View File

@@ -127,7 +127,7 @@ func feeRecipientIsPresent(_ *types.EvaluationContext, conns ...*grpc.ClientConn
// In e2e we generate deterministic keys by validator index, and then use a slice of their public key bytes
// as the fee recipient, so that this will also be deterministic, so this test can statelessly verify it.
// These should be the only keys we see.
// Otherwise something has changed in e2e and this test needs to be updated.
// Otherwise, something has changed in e2e and this test needs to be updated.
_, knownKey := valkeys[pk]
if !knownKey {
log.WithField("pubkey", pk).

View File

@@ -245,7 +245,7 @@ func activatesDepositedValidators(ec *e2etypes.EvaluationContext, conns ...*grpc
}
}
// Make sure every post-genesis deposit has been proecssed, resulting in a validator.
// Make sure every post-genesis deposit has been processed, resulting in a validator.
if len(expected) > 0 {
return fmt.Errorf("missing %d validators for post-genesis deposits", len(expected))
}

View File

@@ -27,7 +27,7 @@ type doubleAttestationHelper struct {
committee []primitives.ValidatorIndex
}
// Initializes helper with details needed to make a double attestation for testint purposes
// Initializes helper with details needed to make a double attestation for testing purposes
// Populates the committee of that is responsible for the
func (h *doubleAttestationHelper) setup(ctx context.Context) error {
chainHead, err := h.beaconClient.GetChainHead(ctx, &emptypb.Empty{})

View File

@@ -208,7 +208,7 @@ func GenerateFullBlockCapella(
return &ethpb.SignedBeaconBlockCapella{Block: block, Signature: signature.Marshal()}, nil
}
// GenerateBLSToExecutionChange generates a valid bls to exec changae for validator `val` and its private key `priv` with the given beacon state `st`.
// GenerateBLSToExecutionChange generates a valid bls to exec change for validator `val` and its private key `priv` with the given beacon state `st`.
func GenerateBLSToExecutionChange(st state.BeaconState, priv bls.SecretKey, val primitives.ValidatorIndex) (*ethpb.SignedBLSToExecutionChange, error) {
cred := indexToHash(uint64(val))
pubkey := priv.PublicKey().Marshal()

View File

@@ -260,7 +260,7 @@ func SyncCommitteePeriodStartEpoch(e primitives.Epoch) (primitives.Epoch, error)
return primitives.Epoch(startEpoch), nil
}
// SecondsSinceSlotStart returns the number of seconds transcurred since the
// SecondsSinceSlotStart returns the number of seconds elapsed since the
// given slot start time
func SecondsSinceSlotStart(s primitives.Slot, genesisTime, timeStamp uint64) (uint64, error) {
if timeStamp < genesisTime+uint64(s)*params.BeaconConfig().SecondsPerSlot {

View File

@@ -19,7 +19,7 @@ var (
ErrEmptyMnemonic = errors.New("phrase cannot be empty")
)
// WalletRecover uses a menmonic seed phrase to recover a wallet into the path provided.
// WalletRecover uses a mnemonic seed phrase to recover a wallet into the path provided.
func (acm *CLIManager) WalletRecover(ctx context.Context) (*wallet.Wallet, error) {
// Ensure that the wallet directory does not contain a wallet already
dirExists, err := wallet.Exists(acm.walletDir)

View File

@@ -228,7 +228,7 @@ func TestCheckDoppelGanger_Nominal(t *testing.T) {
{Index: "22222", Validator: &structs.Validator{Pubkey: stringPubKey2}}, // not recent - duplicate on previous epoch
{Index: "33333", Validator: &structs.Validator{Pubkey: stringPubKey3}}, // not recent - duplicate on current epoch
{Index: "44444", Validator: &structs.Validator{Pubkey: stringPubKey4}}, // not recent - duplicate on both previous and current epoch
// No "55555" sicee corresponding validator does not exist
// No "55555" since corresponding validator does not exist
{Index: "66666", Validator: &structs.Validator{Pubkey: stringPubKey6}}, // not recent - not duplicate
},
},

View File

@@ -59,7 +59,7 @@ func TestDB_ConvertDatabase(t *testing.T) {
signingRootBytes = signingRoot[:]
}
// Create database directoriy path.
// Create database directory path.
datadir := t.TempDir()
// Run source DB preparation.

View File

@@ -60,7 +60,7 @@ func MockAttestingAndProposalHistories(pubkeys [][fieldparams.BLSPubkeyLength]by
gen := rand.NewGenerator()
for v := 0; v < numValidators; v++ {
latestTarget := primitives.Epoch(gen.Intn(int(params.BeaconConfig().WeakSubjectivityPeriod) / 1000))
// If 0, we change the value to 1 as the we compute source by doing (target-1)
// If 0, we change the value to 1 as we compute source by doing (target-1)
// to prevent any underflows in this setup helper.
if latestTarget == 0 {
latestTarget = 1