mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Clean up: fix typos (#11165)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
This commit is contained in:
@@ -2249,7 +2249,7 @@ func TestStore_NoViableHead_NewPayload_DoublyLinkedTree(t *testing.T) {
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Cehck
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
// also that the node is optimistic
|
||||
require.Equal(t, firstInvalidRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
@@ -2409,7 +2409,7 @@ func TestStore_NoViableHead_NewPayload_Protoarray(t *testing.T) {
|
||||
err = service.onBlock(ctx, wsb, root)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Cehck
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
// also that the node is optimistic
|
||||
require.Equal(t, firstInvalidRoot, service.ForkChoicer().CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
|
||||
@@ -309,13 +309,13 @@ func Test_SyncRewards(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
proposerReward, participarntReward, err := altair.SyncRewards(tt.activeBalance)
|
||||
proposerReward, participantReward, err := altair.SyncRewards(tt.activeBalance)
|
||||
if (err != nil) && (tt.errString != "") {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
return
|
||||
}
|
||||
require.Equal(t, tt.wantProposerReward, proposerReward)
|
||||
require.Equal(t, tt.wantParticipantReward, participarntReward)
|
||||
require.Equal(t, tt.wantParticipantReward, participantReward)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,11 +68,11 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
}
|
||||
|
||||
// Modified in Altair and Bellatrix.
|
||||
proportionalSlashingMultipler, err := state.ProportionalSlashingMultiplier()
|
||||
proportionalSlashingMultiplier, err := state.ProportionalSlashingMultiplier()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = e.ProcessSlashings(state, proportionalSlashingMultipler)
|
||||
state, err = e.ProcessSlashings(state, proportionalSlashingMultiplier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func ProcessRandaoNoVerify(
|
||||
}
|
||||
blockRandaoReveal := hash.Hash(randaoReveal)
|
||||
if len(blockRandaoReveal) != len(latestMixSlice) {
|
||||
return nil, errors.New("blockRandaoReveal length doesnt match latestMixSlice length")
|
||||
return nil, errors.New("blockRandaoReveal length doesn't match latestMixSlice length")
|
||||
}
|
||||
for i, x := range blockRandaoReveal {
|
||||
latestMixSlice[i] ^= x
|
||||
|
||||
@@ -39,7 +39,7 @@ func TestUpdateValidator_InclusionOnlyCountsPrevEpoch(t *testing.T) {
|
||||
record := &precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true}
|
||||
a := ðpb.PendingAttestation{InclusionDelay: 1, ProposerIndex: 2}
|
||||
|
||||
// Verify inclusion info doesnt get updated.
|
||||
// Verify inclusion info doesn't get updated.
|
||||
vp = precompute.UpdateValidator(vp, record, []uint64{0}, a, 100)
|
||||
wanted := &precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true, InclusionSlot: e}
|
||||
wantedVp := []*precompute.Validator{wanted}
|
||||
|
||||
@@ -310,7 +310,7 @@ func validateBlockConsistency(execBlock *pb.ExecutionBlock, jsonMap map[string]i
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(jsonVal, bVal) {
|
||||
return errors.Errorf("fields dont match, %v and %v are not equal for field %s", jsonVal, bVal, field.Name)
|
||||
return errors.Errorf("fields don't match, %v and %v are not equal for field %s", jsonVal, bVal, field.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -158,8 +158,8 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
secondsIntoSlot := (timeNow - s.genesisTime) % params.BeaconConfig().SecondsPerSlot
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
boostTreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
if currentSlot == slot && secondsIntoSlot < boostTreshold {
|
||||
boostThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
if currentSlot == slot && secondsIntoSlot < boostThreshold {
|
||||
s.proposerBoostLock.Lock()
|
||||
s.proposerBoostRoot = root
|
||||
s.proposerBoostLock.Unlock()
|
||||
|
||||
@@ -515,8 +515,8 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
secondsIntoSlot := (timeNow - s.genesisTime) % params.BeaconConfig().SecondsPerSlot
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
boostTreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
if currentSlot == slot && secondsIntoSlot < boostTreshold {
|
||||
boostThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
if currentSlot == slot && secondsIntoSlot < boostThreshold {
|
||||
s.proposerBoostLock.Lock()
|
||||
s.proposerBoostRoot = root
|
||||
s.proposerBoostLock.Unlock()
|
||||
|
||||
@@ -184,7 +184,7 @@ func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb
|
||||
}
|
||||
}
|
||||
|
||||
// processUnaggregatedAttestation logs when the beacon node observes an anngregated attestation from tracked validator.
|
||||
// processUnaggregatedAttestation logs when the beacon node observes an aggregated attestation from tracked validator.
|
||||
func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.AggregateAttestationAndProof) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
@@ -171,7 +171,7 @@ func (s *Service) logAggregatedPerformance() {
|
||||
"AverageInclusionDistance": fmt.Sprintf("%.1f", percentDistance),
|
||||
"TotalProposedBlocks": p.totalProposedCount,
|
||||
"TotalAggregations": p.totalAggregations,
|
||||
"TotalSyncContributions": p.totalSyncComitteeContributions,
|
||||
"TotalSyncContributions": p.totalSyncCommitteeContributions,
|
||||
}).Info("Aggregated performance since launch")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ func (s *Service) processSyncCommitteeContribution(contribution *ethpb.SignedCon
|
||||
defer s.Unlock()
|
||||
if s.trackedIndex(idx) {
|
||||
aggPerf := s.aggregatedPerformance[idx]
|
||||
aggPerf.totalSyncComitteeAggregations++
|
||||
aggPerf.totalSyncCommitteeAggregations++
|
||||
s.aggregatedPerformance[idx] = aggPerf
|
||||
|
||||
log.WithField("ValidatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed")
|
||||
@@ -58,7 +58,7 @@ func (s *Service) processSyncAggregate(state state.BeaconState, blk interfaces.B
|
||||
s.latestPerformance[validatorIdx] = latestPerf
|
||||
|
||||
aggPerf := s.aggregatedPerformance[validatorIdx]
|
||||
aggPerf.totalSyncComitteeContributions += uint64(contrib)
|
||||
aggPerf.totalSyncCommitteeContributions += uint64(contrib)
|
||||
s.aggregatedPerformance[validatorIdx] = aggPerf
|
||||
|
||||
syncCommitteeContributionCounter.WithLabelValues(
|
||||
|
||||
@@ -41,18 +41,18 @@ type ValidatorLatestPerformance struct {
|
||||
// ValidatorAggregatedPerformance keeps track of the accumulated performance of
|
||||
// the tracked validator since start of monitor service.
|
||||
type ValidatorAggregatedPerformance struct {
|
||||
startEpoch types.Epoch
|
||||
startBalance uint64
|
||||
totalAttestedCount uint64
|
||||
totalRequestedCount uint64
|
||||
totalDistance uint64
|
||||
totalCorrectSource uint64
|
||||
totalCorrectTarget uint64
|
||||
totalCorrectHead uint64
|
||||
totalProposedCount uint64
|
||||
totalAggregations uint64
|
||||
totalSyncComitteeContributions uint64
|
||||
totalSyncComitteeAggregations uint64
|
||||
startEpoch types.Epoch
|
||||
startBalance uint64
|
||||
totalAttestedCount uint64
|
||||
totalRequestedCount uint64
|
||||
totalDistance uint64
|
||||
totalCorrectSource uint64
|
||||
totalCorrectTarget uint64
|
||||
totalCorrectHead uint64
|
||||
totalProposedCount uint64
|
||||
totalAggregations uint64
|
||||
totalSyncCommitteeContributions uint64
|
||||
totalSyncCommitteeAggregations uint64
|
||||
}
|
||||
|
||||
// ValidatorMonitorConfig contains the list of validator indices that the
|
||||
|
||||
@@ -66,17 +66,17 @@ func setupService(t *testing.T) *Service {
|
||||
}
|
||||
aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{
|
||||
1: {
|
||||
startEpoch: 0,
|
||||
startBalance: 31700000000,
|
||||
totalAttestedCount: 12,
|
||||
totalRequestedCount: 15,
|
||||
totalDistance: 14,
|
||||
totalCorrectHead: 8,
|
||||
totalCorrectSource: 11,
|
||||
totalCorrectTarget: 12,
|
||||
totalProposedCount: 1,
|
||||
totalSyncComitteeContributions: 0,
|
||||
totalSyncComitteeAggregations: 0,
|
||||
startEpoch: 0,
|
||||
startBalance: 31700000000,
|
||||
totalAttestedCount: 12,
|
||||
totalRequestedCount: 15,
|
||||
totalDistance: 14,
|
||||
totalCorrectHead: 8,
|
||||
totalCorrectSource: 11,
|
||||
totalCorrectTarget: 12,
|
||||
totalProposedCount: 1,
|
||||
totalSyncCommitteeContributions: 0,
|
||||
totalSyncCommitteeAggregations: 0,
|
||||
},
|
||||
2: {},
|
||||
12: {},
|
||||
|
||||
@@ -332,7 +332,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
// We dont use our internal subscribe method
|
||||
// We don't use our internal subscribe method
|
||||
// due to using floodsub over here.
|
||||
tpHandle, err := p2.JoinTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -740,7 +740,7 @@ func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch types.Epoch) (types
|
||||
return targetEpoch, potentialPIDs
|
||||
}
|
||||
|
||||
// PeersToPrune selects the most sutiable inbound peers
|
||||
// PeersToPrune selects the most suitable inbound peers
|
||||
// to disconnect the host peer from. As of this moment
|
||||
// the pruning relies on simple heuristics such as
|
||||
// bad response count. In the future scoring will be used
|
||||
|
||||
@@ -666,7 +666,7 @@ func TestConcurrentPeerLimitHolds(t *testing.T) {
|
||||
},
|
||||
},
|
||||
})
|
||||
assert.Equal(t, true, uint64(p.MaxPeerLimit()) > p.ConnectedPeerLimit(), "max peer limit doesnt exceed connected peer limit")
|
||||
assert.Equal(t, true, uint64(p.MaxPeerLimit()) > p.ConnectedPeerLimit(), "max peer limit doesn't exceed connected peer limit")
|
||||
}
|
||||
|
||||
func TestAtInboundPeerLimit(t *testing.T) {
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// We have to declare this again here to prevent a circular dependancy
|
||||
// We have to declare this again here to prevent a circular dependency
|
||||
// with the main p2p package.
|
||||
const metatadataV1Topic = "/eth2/beacon_chain/req/metadata/1"
|
||||
const metatadataV2Topic = "/eth2/beacon_chain/req/metadata/2"
|
||||
@@ -107,7 +107,7 @@ func (p *TestP2P) ReceiveRPC(topic string, msg proto.Message) {
|
||||
|
||||
castedMsg, ok := msg.(ssz.Marshaler)
|
||||
if !ok {
|
||||
p.t.Fatalf("%T doesnt support ssz marshaler", msg)
|
||||
p.t.Fatalf("%T doesn't support ssz marshaler", msg)
|
||||
}
|
||||
n, err := p.Encoding().EncodeWithMaxLength(s, castedMsg)
|
||||
if err != nil {
|
||||
@@ -139,7 +139,7 @@ func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
|
||||
|
||||
castedMsg, ok := msg.(ssz.Marshaler)
|
||||
if !ok {
|
||||
p.t.Fatalf("%T doesnt support ssz marshaler", msg)
|
||||
p.t.Fatalf("%T doesn't support ssz marshaler", msg)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, castedMsg); err != nil {
|
||||
@@ -305,7 +305,7 @@ func (p *TestP2P) AddDisconnectionHandler(f func(ctx context.Context, id peer.ID
|
||||
func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid peer.ID) (network.Stream, error) {
|
||||
t := topic
|
||||
if t == "" {
|
||||
return nil, fmt.Errorf("protocol doesnt exist for proto message: %v", msg)
|
||||
return nil, fmt.Errorf("protocol doesn't exist for proto message: %v", msg)
|
||||
}
|
||||
stream, err := p.BHost.NewStream(ctx, pid, core.ProtocolID(t+p.Encoding().ProtocolSuffix()))
|
||||
if err != nil {
|
||||
@@ -315,7 +315,7 @@ func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid p
|
||||
if topic != metatadataV1Topic && topic != metatadataV2Topic {
|
||||
castedMsg, ok := msg.(ssz.Marshaler)
|
||||
if !ok {
|
||||
p.t.Fatalf("%T doesnt support ssz marshaler", msg)
|
||||
p.t.Fatalf("%T doesn't support ssz marshaler", msg)
|
||||
}
|
||||
if _, err := p.Encoding().EncodeWithMaxLength(stream, castedMsg); err != nil {
|
||||
_err := stream.Reset()
|
||||
|
||||
@@ -209,7 +209,7 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) {
|
||||
// HistoricalRootsLimit = 8192
|
||||
//
|
||||
// More background: https://github.com/prysmaticlabs/prysm/issues/2153
|
||||
// This test breaks if it doesnt use mainnet config
|
||||
// This test breaks if it doesn't use mainnet config
|
||||
|
||||
// Ensure HistoricalRootsLimit matches scenario
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
@@ -91,7 +91,7 @@ func TestWaitForActivation_ContextClosed(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWaitForActivation_ValidatorOriginallyExists(t *testing.T) {
|
||||
// This test breaks if it doesnt use mainnet config
|
||||
// This test breaks if it doesn't use mainnet config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig().Copy())
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -234,7 +234,7 @@ func TestValidatorStatus_Pending(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidatorStatus_Active(t *testing.T) {
|
||||
// This test breaks if it doesnt use mainnet config
|
||||
// This test breaks if it doesn't use mainnet config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig().Copy())
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -455,7 +455,7 @@ func TestParams_validatorIndicesInChunk(t *testing.T) {
|
||||
want: []types.ValidatorIndex{6, 7, 8},
|
||||
},
|
||||
{
|
||||
name: "0 validator chunk size returs empty",
|
||||
name: "0 validator chunk size returns empty",
|
||||
fields: &Parameters{
|
||||
validatorChunkSize: 0,
|
||||
},
|
||||
|
||||
@@ -96,7 +96,7 @@ func (s *Service) run() {
|
||||
|
||||
log.Info("Completed chain sync, starting slashing detection")
|
||||
|
||||
// Get the latest eopch written for each validator from disk on startup.
|
||||
// Get the latest epoch written for each validator from disk on startup.
|
||||
headState, err := s.serviceCfg.HeadStateFetcher.HeadState(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch head state")
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// SyncCommitteeRoot computes the HashTreeRoot Merkleization of a commitee root.
|
||||
// SyncCommitteeRoot computes the HashTreeRoot Merkleization of a committee root.
|
||||
// a SyncCommitteeRoot struct according to the eth2
|
||||
// Simple Serialize specification.
|
||||
func SyncCommitteeRoot(committee *ethpb.SyncCommittee) ([32]byte, error) {
|
||||
|
||||
@@ -1065,7 +1065,7 @@ func TestService_isBlockQueueable(t *testing.T) {
|
||||
genesisTime := uint64(currentTime.Unix() - int64(params.BeaconConfig().SecondsPerSlot))
|
||||
blockSlot := types.Slot(1)
|
||||
|
||||
// slot time within MAXIMUM_GOSSIP_CLOCK_DISPARITY, so dont queue the block.
|
||||
// slot time within MAXIMUM_GOSSIP_CLOCK_DISPARITY, so don't queue the block.
|
||||
receivedTime := currentTime.Add(-400 * time.Millisecond)
|
||||
result := isBlockQueueable(genesisTime, blockSlot, receivedTime)
|
||||
assert.Equal(t, false, result)
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
// ErrEmpty is returned for queues with no items
|
||||
var ErrEmpty = errors.New("queue is empty")
|
||||
|
||||
// ErrDuplicateItem is returned when the queue attmepts to push an item to a key that
|
||||
// ErrDuplicateItem is returned when the queue attempts to push an item to a key that
|
||||
// already exists. The queue does not attempt to update, instead returns this
|
||||
// error. If an Item needs to be updated or replaced, pop the item first.
|
||||
var ErrDuplicateItem = errors.New("duplicate item")
|
||||
|
||||
@@ -95,7 +95,7 @@ message ProposerSlashing {
|
||||
}
|
||||
|
||||
// Attestor slashings are proofs that a slashable offense has been committed by
|
||||
// attestating to two conflicting pieces of information by the same validator.
|
||||
// attesting to two conflicting pieces of information by the same validator.
|
||||
message AttesterSlashing {
|
||||
// First conflicting attestation.
|
||||
IndexedAttestation attestation_1 = 1;
|
||||
|
||||
@@ -418,7 +418,7 @@ func BeaconStateBellatrixToProto(st state.BeaconState) (*ethpbv2.BeaconStateBell
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get next sync committee")
|
||||
}
|
||||
sourceLatestExecutionPaylodHeader, err := st.LatestExecutionPayloadHeader()
|
||||
sourceLatestExecutionPayloadHeader, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get latest execution payload header")
|
||||
}
|
||||
@@ -478,20 +478,20 @@ func BeaconStateBellatrixToProto(st state.BeaconState) (*ethpbv2.BeaconStateBell
|
||||
AggregatePubkey: bytesutil.SafeCopyBytes(sourceNextSyncCommittee.AggregatePubkey),
|
||||
},
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: bytesutil.SafeCopyBytes(sourceLatestExecutionPaylodHeader.ParentHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(sourceLatestExecutionPaylodHeader.FeeRecipient),
|
||||
StateRoot: bytesutil.SafeCopyBytes(sourceLatestExecutionPaylodHeader.StateRoot),
|
||||
ReceiptsRoot: bytesutil.SafeCopyBytes(sourceLatestExecutionPaylodHeader.ReceiptsRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(sourceLatestExecutionPaylodHeader.LogsBloom),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(sourceLatestExecutionPaylodHeader.PrevRandao),
|
||||
BlockNumber: sourceLatestExecutionPaylodHeader.BlockNumber,
|
||||
GasLimit: sourceLatestExecutionPaylodHeader.GasLimit,
|
||||
GasUsed: sourceLatestExecutionPaylodHeader.GasUsed,
|
||||
Timestamp: sourceLatestExecutionPaylodHeader.Timestamp,
|
||||
ExtraData: bytesutil.SafeCopyBytes(sourceLatestExecutionPaylodHeader.ExtraData),
|
||||
BaseFeePerGas: bytesutil.SafeCopyBytes(sourceLatestExecutionPaylodHeader.BaseFeePerGas),
|
||||
BlockHash: bytesutil.SafeCopyBytes(sourceLatestExecutionPaylodHeader.BlockHash),
|
||||
TransactionsRoot: bytesutil.SafeCopyBytes(sourceLatestExecutionPaylodHeader.TransactionsRoot),
|
||||
ParentHash: bytesutil.SafeCopyBytes(sourceLatestExecutionPayloadHeader.ParentHash),
|
||||
FeeRecipient: bytesutil.SafeCopyBytes(sourceLatestExecutionPayloadHeader.FeeRecipient),
|
||||
StateRoot: bytesutil.SafeCopyBytes(sourceLatestExecutionPayloadHeader.StateRoot),
|
||||
ReceiptsRoot: bytesutil.SafeCopyBytes(sourceLatestExecutionPayloadHeader.ReceiptsRoot),
|
||||
LogsBloom: bytesutil.SafeCopyBytes(sourceLatestExecutionPayloadHeader.LogsBloom),
|
||||
PrevRandao: bytesutil.SafeCopyBytes(sourceLatestExecutionPayloadHeader.PrevRandao),
|
||||
BlockNumber: sourceLatestExecutionPayloadHeader.BlockNumber,
|
||||
GasLimit: sourceLatestExecutionPayloadHeader.GasLimit,
|
||||
GasUsed: sourceLatestExecutionPayloadHeader.GasUsed,
|
||||
Timestamp: sourceLatestExecutionPayloadHeader.Timestamp,
|
||||
ExtraData: bytesutil.SafeCopyBytes(sourceLatestExecutionPayloadHeader.ExtraData),
|
||||
BaseFeePerGas: bytesutil.SafeCopyBytes(sourceLatestExecutionPayloadHeader.BaseFeePerGas),
|
||||
BlockHash: bytesutil.SafeCopyBytes(sourceLatestExecutionPayloadHeader.BlockHash),
|
||||
TransactionsRoot: bytesutil.SafeCopyBytes(sourceLatestExecutionPayloadHeader.TransactionsRoot),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -203,7 +203,7 @@ message ProposerSlashing {
|
||||
}
|
||||
|
||||
// Attestor slashings are proofs that a slashable offense has been committed by
|
||||
// attestating to two conflicting pieces of information by the same validator.
|
||||
// attesting to two conflicting pieces of information by the same validator.
|
||||
message AttesterSlashing {
|
||||
// First conflicting attestation.
|
||||
IndexedAttestation attestation_1 = 1;
|
||||
|
||||
@@ -238,7 +238,7 @@ func (v *ValidatorNode) Start(ctx context.Context) error {
|
||||
}
|
||||
if v.config.UseWeb3RemoteSigner {
|
||||
args = append(args, fmt.Sprintf("--%s=http://localhost:%d", flags.Web3SignerURLFlag.Name, Web3RemoteSignerPort))
|
||||
// Write the pubkeys as comma seperated hex strings with 0x prefix.
|
||||
// Write the pubkeys as comma separated hex strings with 0x prefix.
|
||||
// See: https://docs.teku.consensys.net/en/latest/HowTo/External-Signer/Use-External-Signer/
|
||||
args = append(args, fmt.Sprintf("--%s=%s", flags.Web3SignerPublicValidatorKeysFlag.Name, strings.Join(validatorHexPubKeys, ",")))
|
||||
} else {
|
||||
|
||||
@@ -58,7 +58,7 @@ func e2eMinimal(t *testing.T, cfgo ...types.E2EConfigOpt) *testRunner {
|
||||
ev.AllNodesHaveSameHead,
|
||||
ev.ValidatorSyncParticipation,
|
||||
ev.FeeRecipientIsPresent,
|
||||
//ev.TransactionsPresent, TODO: Renable Transaction evaluator once it tx pool issues are fixed.
|
||||
//ev.TransactionsPresent, TODO: Re-enable Transaction evaluator once it tx pool issues are fixed.
|
||||
}
|
||||
testConfig := &types.E2EConfig{
|
||||
BeaconFlags: []string{
|
||||
@@ -128,7 +128,7 @@ func e2eMainnet(t *testing.T, usePrysmSh, useMultiClient bool, cfgo ...types.E2E
|
||||
ev.FinishedSyncing,
|
||||
ev.AllNodesHaveSameHead,
|
||||
ev.FeeRecipientIsPresent,
|
||||
//ev.TransactionsPresent, TODO: Renable Transaction evaluator once it tx pool issues are fixed.
|
||||
//ev.TransactionsPresent, TODO: Re-enable Transaction evaluator once it tx pool issues are fixed.
|
||||
}
|
||||
testConfig := &types.E2EConfig{
|
||||
BeaconFlags: []string{
|
||||
|
||||
@@ -101,7 +101,7 @@ func generateMarshalledFullStateAndBlock() error {
|
||||
|
||||
conf := &util.BlockGenConfig{}
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
// Small offset for the beacon state so we dont process a block on an epoch.
|
||||
// Small offset for the beacon state so we don't process a block on an epoch.
|
||||
slotOffset := types.Slot(2)
|
||||
block, err := util.GenerateFullBlock(beaconState, privs, conf, slotsPerEpoch+slotOffset)
|
||||
if err != nil {
|
||||
|
||||
@@ -63,7 +63,7 @@ func (acm *AccountsCLIManager) Delete(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAccount permforms the deletion on the Keymanager.
|
||||
// DeleteAccount performs the deletion on the Keymanager.
|
||||
func DeleteAccount(ctx context.Context, cfg *DeleteConfig) error {
|
||||
if len(cfg.DeletePublicKeys) == 1 {
|
||||
log.Info("Deleting account...")
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
// selectAccounts Ask user to select accounts via an interactive userprompt.
|
||||
// selectAccounts Ask user to select accounts via an interactive user prompt.
|
||||
func selectAccounts(selectionPrompt string, pubKeys [][fieldparams.BLSPubkeyLength]byte) (filteredPubKeys []bls.PublicKey, err error) {
|
||||
pubKeyStrings := make([]string, len(pubKeys))
|
||||
for i, pk := range pubKeys {
|
||||
|
||||
@@ -202,7 +202,7 @@ func pruneProposalHistoryBySlot(valBucket *bolt.Bucket, newestSlot types.Slot) e
|
||||
return errors.Wrapf(err, "could not prune epoch %d in proposal history", epoch)
|
||||
}
|
||||
} else {
|
||||
// If starting from the oldest, we dont find anything prunable, stop pruning.
|
||||
// If starting from the oldest, we don't find anything prunable, stop pruning.
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -165,7 +165,7 @@ func validateMetadata(ctx context.Context, validatorDB db.Database, interchangeJ
|
||||
return nil
|
||||
}
|
||||
if !bytes.Equal(dbGvr, gvr[:]) {
|
||||
return errors.New("genesis validators root doesnt match the one that is stored in slashing protection db. " +
|
||||
return errors.New("genesis validators root doesn't match the one that is stored in slashing protection db. " +
|
||||
"Please make sure you import the protection data that is relevant to the chain you are on")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -281,7 +281,7 @@ func Test_validateMetadataGenesisValidatorsRoot(t *testing.T) {
|
||||
require.NoError(t, validatorDB.SaveGenesisValidatorsRoot(ctx, tt.dbGenesisValidatorsRoot))
|
||||
err := validateMetadata(ctx, validatorDB, tt.interchangeJSON)
|
||||
if tt.wantErr {
|
||||
require.ErrorContains(t, "genesis validators root doesnt match the one that is stored", err)
|
||||
require.ErrorContains(t, "genesis validators root doesn't match the one that is stored", err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user