mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 23:48:06 -05:00
Fix a bunch of deepsource warnings (#11814)
This commit is contained in:
@@ -877,7 +877,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
|||||||
}
|
}
|
||||||
service, err := NewService(ctx, opts...)
|
service, err := NewService(ctx, opts...)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
genesisStateRoot := [32]byte{}
|
var genesisStateRoot [32]byte
|
||||||
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||||
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
util.SaveBlock(t, ctx, beaconDB, genesisBlk)
|
||||||
genesisRoot, err := genesisBlk.Block.HashTreeRoot()
|
genesisRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ func TestStore_OnBlock(t *testing.T) {
|
|||||||
|
|
||||||
service, err := NewService(ctx, opts...)
|
service, err := NewService(ctx, opts...)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
genesisStateRoot := [32]byte{}
|
var genesisStateRoot [32]byte
|
||||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||||
@@ -362,7 +362,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||||
|
|
||||||
genesisStateRoot := [32]byte{}
|
var genesisStateRoot [32]byte
|
||||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||||
@@ -421,7 +421,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
service.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||||
|
|
||||||
genesisStateRoot := [32]byte{}
|
var genesisStateRoot [32]byte
|
||||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||||
@@ -1004,7 +1004,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
|||||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||||
assert.NoError(t, gs.SetEth1DepositIndex(8))
|
assert.NoError(t, gs.SetEth1DepositIndex(8))
|
||||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||||
zeroSig := [96]byte{}
|
var zeroSig [96]byte
|
||||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||||
root := []byte(strconv.Itoa(int(i)))
|
root := []byte(strconv.Itoa(int(i)))
|
||||||
assert.NoError(t, depositCache.InsertDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
assert.NoError(t, depositCache.InsertDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
||||||
@@ -1042,7 +1042,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
|||||||
assert.NoError(t, gs2.SetEth1Data(ðpb.Eth1Data{DepositCount: 15}))
|
assert.NoError(t, gs2.SetEth1Data(ðpb.Eth1Data{DepositCount: 15}))
|
||||||
assert.NoError(t, gs2.SetEth1DepositIndex(13))
|
assert.NoError(t, gs2.SetEth1DepositIndex(13))
|
||||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}, gs2))
|
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k', '2'}, gs2))
|
||||||
zeroSig := [96]byte{}
|
var zeroSig [96]byte
|
||||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||||
root := []byte(strconv.Itoa(int(i)))
|
root := []byte(strconv.Itoa(int(i)))
|
||||||
assert.NoError(t, depositCache.InsertDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
assert.NoError(t, depositCache.InsertDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
||||||
|
|||||||
2
beacon-chain/cache/payload_id.go
vendored
2
beacon-chain/cache/payload_id.go
vendored
@@ -57,7 +57,7 @@ func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot types.Slot, vId
|
|||||||
ids, ok := f.slotToProposerAndPayloadIDs[k]
|
ids, ok := f.slotToProposerAndPayloadIDs[k]
|
||||||
// Ok to overwrite if the slot is already set but the cached payload ID is not set.
|
// Ok to overwrite if the slot is already set but the cached payload ID is not set.
|
||||||
// This combats the re-org case where payload assignment could change at the start of the epoch.
|
// This combats the re-org case where payload assignment could change at the start of the epoch.
|
||||||
byte8 := [vIdLength]byte{}
|
var byte8 [vIdLength]byte
|
||||||
if !ok || (ok && bytes.Equal(ids[vIdLength:], byte8[:])) {
|
if !ok || (ok && bytes.Equal(ids[vIdLength:], byte8[:])) {
|
||||||
f.slotToProposerAndPayloadIDs[k] = bs
|
f.slotToProposerAndPayloadIDs[k] = bs
|
||||||
}
|
}
|
||||||
|
|||||||
2
beacon-chain/cache/payload_id_test.go
vendored
2
beacon-chain/cache/payload_id_test.go
vendored
@@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
func TestValidatorPayloadIDsCache_GetAndSaveValidatorPayloadIDs(t *testing.T) {
|
func TestValidatorPayloadIDsCache_GetAndSaveValidatorPayloadIDs(t *testing.T) {
|
||||||
cache := NewProposerPayloadIDsCache()
|
cache := NewProposerPayloadIDsCache()
|
||||||
r := [32]byte{}
|
var r [32]byte
|
||||||
i, p, ok := cache.GetProposerPayloadIDs(0, r)
|
i, p, ok := cache.GetProposerPayloadIDs(0, r)
|
||||||
require.Equal(t, false, ok)
|
require.Equal(t, false, ok)
|
||||||
require.Equal(t, types.ValidatorIndex(0), i)
|
require.Equal(t, types.ValidatorIndex(0), i)
|
||||||
|
|||||||
@@ -256,7 +256,7 @@ func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
|
|||||||
},
|
},
|
||||||
AggregationBits: aggBits,
|
AggregationBits: aggBits,
|
||||||
}
|
}
|
||||||
zeroSig := [96]byte{}
|
var zeroSig [96]byte
|
||||||
att.Signature = zeroSig[:]
|
att.Signature = zeroSig[:]
|
||||||
|
|
||||||
ckp := beaconState.CurrentJustifiedCheckpoint()
|
ckp := beaconState.CurrentJustifiedCheckpoint()
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ func TestVerifyAttestationNoVerifySignature_IncorrectSourceEpoch(t *testing.T) {
|
|||||||
AggregationBits: aggBits,
|
AggregationBits: aggBits,
|
||||||
}
|
}
|
||||||
|
|
||||||
zeroSig := [96]byte{}
|
var zeroSig [96]byte
|
||||||
att.Signature = zeroSig[:]
|
att.Signature = zeroSig[:]
|
||||||
|
|
||||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ func TestProcessAttestationsNoVerify_OK(t *testing.T) {
|
|||||||
AggregationBits: aggBits,
|
AggregationBits: aggBits,
|
||||||
}
|
}
|
||||||
|
|
||||||
zeroSig := [fieldparams.BLSSignatureLength]byte{}
|
var zeroSig [fieldparams.BLSSignatureLength]byte
|
||||||
att.Signature = zeroSig[:]
|
att.Signature = zeroSig[:]
|
||||||
|
|
||||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||||
@@ -144,7 +144,7 @@ func TestVerifyAttestationNoVerifySignature_OK(t *testing.T) {
|
|||||||
AggregationBits: aggBits,
|
AggregationBits: aggBits,
|
||||||
}
|
}
|
||||||
|
|
||||||
zeroSig := [fieldparams.BLSSignatureLength]byte{}
|
var zeroSig [fieldparams.BLSSignatureLength]byte
|
||||||
att.Signature = zeroSig[:]
|
att.Signature = zeroSig[:]
|
||||||
|
|
||||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||||
@@ -172,7 +172,7 @@ func TestVerifyAttestationNoVerifySignature_BadAttIdx(t *testing.T) {
|
|||||||
},
|
},
|
||||||
AggregationBits: aggBits,
|
AggregationBits: aggBits,
|
||||||
}
|
}
|
||||||
zeroSig := [fieldparams.BLSSignatureLength]byte{}
|
var zeroSig [fieldparams.BLSSignatureLength]byte
|
||||||
att.Signature = zeroSig[:]
|
att.Signature = zeroSig[:]
|
||||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()+params.BeaconConfig().MinAttestationInclusionDelay))
|
require.NoError(t, beaconState.SetSlot(beaconState.Slot()+params.BeaconConfig().MinAttestationInclusionDelay))
|
||||||
ckp := beaconState.CurrentJustifiedCheckpoint()
|
ckp := beaconState.CurrentJustifiedCheckpoint()
|
||||||
|
|||||||
@@ -55,9 +55,9 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
|||||||
func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
|
func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
|
||||||
fuzzer := fuzz.NewWithSeed(0)
|
fuzzer := fuzz.NewWithSeed(0)
|
||||||
var ba []byte
|
var ba []byte
|
||||||
pubkey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubkey [fieldparams.BLSPubkeyLength]byte
|
||||||
sig := [96]byte{}
|
var sig [96]byte
|
||||||
domain := [4]byte{}
|
var domain [4]byte
|
||||||
var p []byte
|
var p []byte
|
||||||
var s []byte
|
var s []byte
|
||||||
var d []byte
|
var d []byte
|
||||||
|
|||||||
@@ -281,7 +281,7 @@ func ProcessPayloadHeader(st state.BeaconState, header interfaces.ExecutionData)
|
|||||||
|
|
||||||
// GetBlockPayloadHash returns the hash of the execution payload of the block
|
// GetBlockPayloadHash returns the hash of the execution payload of the block
|
||||||
func GetBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
|
func GetBlockPayloadHash(blk interfaces.BeaconBlock) ([32]byte, error) {
|
||||||
payloadHash := [32]byte{}
|
var payloadHash [32]byte
|
||||||
if IsPreBellatrixVersion(blk.Version()) {
|
if IsPreBellatrixVersion(blk.Version()) {
|
||||||
return payloadHash, nil
|
return payloadHash, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -206,7 +206,7 @@ func ComputeDomain(domainType [DomainByteLength]byte, forkVersion, genesisValida
|
|||||||
if genesisValidatorsRoot == nil {
|
if genesisValidatorsRoot == nil {
|
||||||
genesisValidatorsRoot = params.BeaconConfig().ZeroHash[:]
|
genesisValidatorsRoot = params.BeaconConfig().ZeroHash[:]
|
||||||
}
|
}
|
||||||
forkBytes := [ForkVersionByteLength]byte{}
|
var forkBytes [ForkVersionByteLength]byte
|
||||||
copy(forkBytes[:], forkVersion)
|
copy(forkBytes[:], forkVersion)
|
||||||
|
|
||||||
forkDataRoot, err := computeForkDataRoot(forkBytes[:], genesisValidatorsRoot)
|
forkDataRoot, err := computeForkDataRoot(forkBytes[:], genesisValidatorsRoot)
|
||||||
|
|||||||
@@ -114,9 +114,9 @@ func TestSigningRoot_ComputeForkDigest(t *testing.T) {
|
|||||||
func TestFuzzverifySigningRoot_10000(_ *testing.T) {
|
func TestFuzzverifySigningRoot_10000(_ *testing.T) {
|
||||||
fuzzer := fuzz.NewWithSeed(0)
|
fuzzer := fuzz.NewWithSeed(0)
|
||||||
st := ðpb.BeaconState{}
|
st := ðpb.BeaconState{}
|
||||||
pubkey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubkey [fieldparams.BLSPubkeyLength]byte
|
||||||
sig := [96]byte{}
|
var sig [96]byte
|
||||||
domain := [4]byte{}
|
var domain [4]byte
|
||||||
var p []byte
|
var p []byte
|
||||||
var s []byte
|
var s []byte
|
||||||
var d []byte
|
var d []byte
|
||||||
|
|||||||
@@ -212,7 +212,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
|
|||||||
err = beaconState.SetSlashings(make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector))
|
err = beaconState.SetSlashings(make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cp := beaconState.CurrentJustifiedCheckpoint()
|
cp := beaconState.CurrentJustifiedCheckpoint()
|
||||||
mockRoot := [32]byte{}
|
var mockRoot [32]byte
|
||||||
copy(mockRoot[:], "hello-world")
|
copy(mockRoot[:], "hello-world")
|
||||||
cp.Root = mockRoot[:]
|
cp.Root = mockRoot[:]
|
||||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
||||||
|
|||||||
@@ -98,7 +98,7 @@ func TestStore_SaveBackfillBlockRoot(t *testing.T) {
|
|||||||
_, err := db.BackfillBlockRoot(ctx)
|
_, err := db.BackfillBlockRoot(ctx)
|
||||||
require.ErrorIs(t, err, ErrNotFoundBackfillBlockRoot)
|
require.ErrorIs(t, err, ErrNotFoundBackfillBlockRoot)
|
||||||
|
|
||||||
expected := [32]byte{}
|
var expected [32]byte
|
||||||
copy(expected[:], []byte{0x23})
|
copy(expected[:], []byte{0x23})
|
||||||
err = db.SaveBackfillBlockRoot(ctx, expected)
|
err = db.SaveBackfillBlockRoot(ctx, expected)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -129,7 +129,7 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
|||||||
return errNilBlockHeader
|
return errNilBlockHeader
|
||||||
}
|
}
|
||||||
parentRoot := bytesutil.ToBytes32(bh.ParentRoot)
|
parentRoot := bytesutil.ToBytes32(bh.ParentRoot)
|
||||||
payloadHash := [32]byte{}
|
var payloadHash [32]byte
|
||||||
if state.Version() >= version.Bellatrix {
|
if state.Version() >= version.Bellatrix {
|
||||||
ph, err := state.LatestExecutionPayloadHeader()
|
ph, err := state.LatestExecutionPayloadHeader()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -578,7 +578,7 @@ func TestStore_InsertOptimisticChain(t *testing.T) {
|
|||||||
blks := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
blks := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||||
blk := util.NewBeaconBlock()
|
blk := util.NewBeaconBlock()
|
||||||
blk.Block.Slot = 1
|
blk.Block.Slot = 1
|
||||||
pr := [32]byte{}
|
var pr [32]byte
|
||||||
blk.Block.ParentRoot = pr[:]
|
blk.Block.ParentRoot = pr[:]
|
||||||
root, err := blk.Block.HashTreeRoot()
|
root, err := blk.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -414,7 +414,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
|||||||
func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
root := [32]byte{'A'}
|
root := [32]byte{'A'}
|
||||||
zeroHash := [32]byte{}
|
var zeroHash [32]byte
|
||||||
|
|
||||||
t.Run("does not boost block from different slot", func(t *testing.T) {
|
t.Run("does not boost block from different slot", func(t *testing.T) {
|
||||||
f := setup(0, 0)
|
f := setup(0, 0)
|
||||||
|
|||||||
@@ -360,7 +360,7 @@ func TestForkChoice_HighestReceivedBlockSlotRoot(t *testing.T) {
|
|||||||
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||||
f := setup(1, 1)
|
f := setup(1, 1)
|
||||||
s := f.store
|
s := f.store
|
||||||
b := [32]byte{}
|
var b [32]byte
|
||||||
|
|
||||||
// Make sure it doesn't underflow
|
// Make sure it doesn't underflow
|
||||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
s.genesisTime = uint64(time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||||
|
|||||||
@@ -103,7 +103,7 @@ func TestProcessUnaggregatedAttestationStateCached(t *testing.T) {
|
|||||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||||
|
|
||||||
root := [32]byte{}
|
var root [32]byte
|
||||||
copy(root[:], "hello-world")
|
copy(root[:], "hello-world")
|
||||||
|
|
||||||
att := ðpb.Attestation{
|
att := ðpb.Attestation{
|
||||||
@@ -175,7 +175,7 @@ func TestProcessAggregatedAttestationStateCached(t *testing.T) {
|
|||||||
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||||
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
require.NoError(t, state.SetCurrentParticipationBits(participation))
|
||||||
|
|
||||||
root := [32]byte{}
|
var root [32]byte
|
||||||
copy(root[:], "hello-world")
|
copy(root[:], "hello-world")
|
||||||
|
|
||||||
att := ðpb.AggregateAttestationAndProof{
|
att := ðpb.AggregateAttestationAndProof{
|
||||||
|
|||||||
@@ -170,7 +170,7 @@ func TestProcessProposedBlock(t *testing.T) {
|
|||||||
hook := logTest.NewGlobal()
|
hook := logTest.NewGlobal()
|
||||||
s := setupService(t)
|
s := setupService(t)
|
||||||
beaconState, _ := util.DeterministicGenesisState(t, 256)
|
beaconState, _ := util.DeterministicGenesisState(t, 256)
|
||||||
root := [32]byte{}
|
var root [32]byte
|
||||||
copy(root[:], "hello-world")
|
copy(root[:], "hello-world")
|
||||||
wb, err := blocks.NewBeaconBlock(tt.block)
|
wb, err := blocks.NewBeaconBlock(tt.block)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -95,8 +95,8 @@ func TestNodeStart_Ok_registerDeterministicGenesisService(t *testing.T) {
|
|||||||
genesisState, _, err := interop.GenerateGenesisState(context.Background(), 0, numValidators)
|
genesisState, _, err := interop.GenerateGenesisState(context.Background(), 0, numValidators)
|
||||||
require.NoError(t, err, "Could not generate genesis beacon state")
|
require.NoError(t, err, "Could not generate genesis beacon state")
|
||||||
for i := uint64(1); i < 2; i++ {
|
for i := uint64(1); i < 2; i++ {
|
||||||
someRoot := [32]byte{}
|
var someRoot [32]byte
|
||||||
someKey := [fieldparams.BLSPubkeyLength]byte{}
|
var someKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||||
copy(someKey[:], strconv.Itoa(int(i)))
|
copy(someKey[:], strconv.Itoa(int(i)))
|
||||||
genesisState.Validators = append(genesisState.Validators, ðpb.Validator{
|
genesisState.Validators = append(genesisState.Validators, ðpb.Validator{
|
||||||
|
|||||||
@@ -120,7 +120,7 @@ func TestBatchAttestations_Single(t *testing.T) {
|
|||||||
priv, err := bls.RandKey()
|
priv, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
sig := priv.Sign([]byte("dummy_test_data"))
|
sig := priv.Sign([]byte("dummy_test_data"))
|
||||||
mockRoot := [32]byte{}
|
var mockRoot [32]byte
|
||||||
d := ðpb.AttestationData{
|
d := ðpb.AttestationData{
|
||||||
BeaconBlockRoot: mockRoot[:],
|
BeaconBlockRoot: mockRoot[:],
|
||||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||||
@@ -162,7 +162,7 @@ func TestAggregateAndSaveForkChoiceAtts_Single(t *testing.T) {
|
|||||||
priv, err := bls.RandKey()
|
priv, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
sig := priv.Sign([]byte("dummy_test_data"))
|
sig := priv.Sign([]byte("dummy_test_data"))
|
||||||
mockRoot := [32]byte{}
|
var mockRoot [32]byte
|
||||||
d := ðpb.AttestationData{
|
d := ðpb.AttestationData{
|
||||||
BeaconBlockRoot: mockRoot[:],
|
BeaconBlockRoot: mockRoot[:],
|
||||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||||
@@ -186,7 +186,7 @@ func TestAggregateAndSaveForkChoiceAtts_Multiple(t *testing.T) {
|
|||||||
priv, err := bls.RandKey()
|
priv, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
sig := priv.Sign([]byte("dummy_test_data"))
|
sig := priv.Sign([]byte("dummy_test_data"))
|
||||||
mockRoot := [32]byte{}
|
var mockRoot [32]byte
|
||||||
d := ðpb.AttestationData{
|
d := ðpb.AttestationData{
|
||||||
BeaconBlockRoot: mockRoot[:],
|
BeaconBlockRoot: mockRoot[:],
|
||||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||||
|
|||||||
@@ -603,10 +603,10 @@ func TestTrimmedOrderedPeers(t *testing.T) {
|
|||||||
|
|
||||||
expectedTarget := types.Epoch(2)
|
expectedTarget := types.Epoch(2)
|
||||||
maxPeers := 3
|
maxPeers := 3
|
||||||
mockroot2 := [32]byte{}
|
var mockroot2 [32]byte
|
||||||
mockroot3 := [32]byte{}
|
var mockroot3 [32]byte
|
||||||
mockroot4 := [32]byte{}
|
var mockroot4 [32]byte
|
||||||
mockroot5 := [32]byte{}
|
var mockroot5 [32]byte
|
||||||
copy(mockroot2[:], "two")
|
copy(mockroot2[:], "two")
|
||||||
copy(mockroot3[:], "three")
|
copy(mockroot3[:], "three")
|
||||||
copy(mockroot4[:], "four")
|
copy(mockroot4[:], "four")
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ func TestService_CanSubscribe(t *testing.T) {
|
|||||||
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
|
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
|
||||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||||
genesisTime := time.Now()
|
genesisTime := time.Now()
|
||||||
valRoot := [32]byte{}
|
var valRoot [32]byte
|
||||||
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
type test struct {
|
type test struct {
|
||||||
@@ -216,7 +216,7 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
|||||||
params.SetupTestConfigCleanup(t)
|
params.SetupTestConfigCleanup(t)
|
||||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||||
genesisTime := time.Now()
|
genesisTime := time.Now()
|
||||||
valRoot := [32]byte{}
|
var valRoot [32]byte
|
||||||
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
type args struct {
|
type args struct {
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ func (vs *Server) GetAttesterDuties(ctx context.Context, req *ethpbv1.AttesterDu
|
|||||||
duties := make([]*ethpbv1.AttesterDuty, 0, len(req.Index))
|
duties := make([]*ethpbv1.AttesterDuty, 0, len(req.Index))
|
||||||
for _, index := range req.Index {
|
for _, index := range req.Index {
|
||||||
pubkey := s.PubkeyAtIndex(index)
|
pubkey := s.PubkeyAtIndex(index)
|
||||||
zeroPubkey := [fieldparams.BLSPubkeyLength]byte{}
|
var zeroPubkey [fieldparams.BLSPubkeyLength]byte
|
||||||
if bytes.Equal(pubkey[:], zeroPubkey[:]) {
|
if bytes.Equal(pubkey[:], zeroPubkey[:]) {
|
||||||
return nil, status.Errorf(codes.InvalidArgument, "Invalid validator index")
|
return nil, status.Errorf(codes.InvalidArgument, "Invalid validator index")
|
||||||
}
|
}
|
||||||
@@ -1118,7 +1118,7 @@ func syncCommitteeDuties(
|
|||||||
ValidatorIndex: index,
|
ValidatorIndex: index,
|
||||||
}
|
}
|
||||||
valPubkey48 := st.PubkeyAtIndex(index)
|
valPubkey48 := st.PubkeyAtIndex(index)
|
||||||
zeroPubkey := [fieldparams.BLSPubkeyLength]byte{}
|
var zeroPubkey [fieldparams.BLSPubkeyLength]byte
|
||||||
if bytes.Equal(valPubkey48[:], zeroPubkey[:]) {
|
if bytes.Equal(valPubkey48[:], zeroPubkey[:]) {
|
||||||
return nil, errInvalidValIndex
|
return nil, errInvalidValIndex
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -231,7 +231,7 @@ func generateAtt(state state.ReadOnlyBeaconState, index uint64, privKeys []bls.S
|
|||||||
}
|
}
|
||||||
|
|
||||||
sigs := make([]bls.Signature, len(attestingIndices))
|
sigs := make([]bls.Signature, len(attestingIndices))
|
||||||
zeroSig := [96]byte{}
|
var zeroSig [96]byte
|
||||||
att.Signature = zeroSig[:]
|
att.Signature = zeroSig[:]
|
||||||
|
|
||||||
for i, indice := range attestingIndices {
|
for i, indice := range attestingIndices {
|
||||||
@@ -274,7 +274,7 @@ func generateUnaggregatedAtt(state state.ReadOnlyBeaconState, index uint64, priv
|
|||||||
}
|
}
|
||||||
|
|
||||||
sigs := make([]bls.Signature, len(attestingIndices))
|
sigs := make([]bls.Signature, len(attestingIndices))
|
||||||
zeroSig := [96]byte{}
|
var zeroSig [96]byte
|
||||||
att.Signature = zeroSig[:]
|
att.Signature = zeroSig[:]
|
||||||
|
|
||||||
for i, indice := range attestingIndices {
|
for i, indice := range attestingIndices {
|
||||||
|
|||||||
@@ -1751,7 +1751,7 @@ func TestProposer_FilterAttestation(t *testing.T) {
|
|||||||
domain, err := signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, params.BeaconConfig().ZeroHash[:])
|
domain, err := signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, params.BeaconConfig().ZeroHash[:])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
sigs := make([]bls.Signature, len(attestingIndices))
|
sigs := make([]bls.Signature, len(attestingIndices))
|
||||||
zeroSig := [96]byte{}
|
var zeroSig [96]byte
|
||||||
atts[i].Signature = zeroSig[:]
|
atts[i].Signature = zeroSig[:]
|
||||||
|
|
||||||
for i, indice := range attestingIndices {
|
for i, indice := range attestingIndices {
|
||||||
|
|||||||
@@ -90,7 +90,7 @@ func TestGetSyncSubcommitteeIndex_Ok(t *testing.T) {
|
|||||||
SyncCommitteeIndices: []types.CommitteeIndex{0},
|
SyncCommitteeIndices: []types.CommitteeIndex{0},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
// Request slot 0, should get the index 0 for validator 0.
|
// Request slot 0, should get the index 0 for validator 0.
|
||||||
res, err := server.GetSyncSubcommitteeIndex(context.Background(), ðpb.SyncSubcommitteeIndexRequest{
|
res, err := server.GetSyncSubcommitteeIndex(context.Background(), ðpb.SyncSubcommitteeIndexRequest{
|
||||||
PublicKey: pubKey[:], Slot: types.Slot(0),
|
PublicKey: pubKey[:], Slot: types.Slot(0),
|
||||||
|
|||||||
@@ -857,7 +857,7 @@ func runAttestationsBenchmark(b *testing.B, s *Service, numAtts, numValidators u
|
|||||||
for i := uint64(0); i < numAtts; i++ {
|
for i := uint64(0); i < numAtts; i++ {
|
||||||
source := types.Epoch(i)
|
source := types.Epoch(i)
|
||||||
target := types.Epoch(i + 1)
|
target := types.Epoch(i + 1)
|
||||||
signingRoot := [32]byte{}
|
var signingRoot [32]byte
|
||||||
copy(signingRoot[:], fmt.Sprintf("%d", i))
|
copy(signingRoot[:], fmt.Sprintf("%d", i))
|
||||||
atts[i] = createAttestationWrapper(
|
atts[i] = createAttestationWrapper(
|
||||||
b,
|
b,
|
||||||
|
|||||||
@@ -369,7 +369,7 @@ func handleBalanceSlice(val, indices []uint64, convertAll bool) ([][32]byte, err
|
|||||||
// are compressed according to 4 values -> 1 chunk.
|
// are compressed according to 4 values -> 1 chunk.
|
||||||
startIdx := idx / numOfElems
|
startIdx := idx / numOfElems
|
||||||
startGroup := startIdx * numOfElems
|
startGroup := startIdx * numOfElems
|
||||||
chunk := [32]byte{}
|
var chunk [32]byte
|
||||||
sizeOfElem := len(chunk) / iNumOfElems
|
sizeOfElem := len(chunk) / iNumOfElems
|
||||||
for i, j := 0, startGroup; j < startGroup+numOfElems; i, j = i+sizeOfElem, j+1 {
|
for i, j := 0, startGroup; j < startGroup+numOfElems; i, j = i+sizeOfElem, j+1 {
|
||||||
wantedVal := uint64(0)
|
wantedVal := uint64(0)
|
||||||
|
|||||||
@@ -46,13 +46,13 @@ func TestBalancesSlice_CorrectRoots_All(t *testing.T) {
|
|||||||
roots, err := handleBalanceSlice(balances, []uint64{}, true)
|
roots, err := handleBalanceSlice(balances, []uint64{}, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
root1 := [32]byte{}
|
var root1 [32]byte
|
||||||
binary.LittleEndian.PutUint64(root1[:8], balances[0])
|
binary.LittleEndian.PutUint64(root1[:8], balances[0])
|
||||||
binary.LittleEndian.PutUint64(root1[8:16], balances[1])
|
binary.LittleEndian.PutUint64(root1[8:16], balances[1])
|
||||||
binary.LittleEndian.PutUint64(root1[16:24], balances[2])
|
binary.LittleEndian.PutUint64(root1[16:24], balances[2])
|
||||||
binary.LittleEndian.PutUint64(root1[24:32], balances[3])
|
binary.LittleEndian.PutUint64(root1[24:32], balances[3])
|
||||||
|
|
||||||
root2 := [32]byte{}
|
var root2 [32]byte
|
||||||
binary.LittleEndian.PutUint64(root2[:8], balances[4])
|
binary.LittleEndian.PutUint64(root2[:8], balances[4])
|
||||||
|
|
||||||
assert.DeepEqual(t, roots, [][32]byte{root1, root2})
|
assert.DeepEqual(t, roots, [][32]byte{root1, root2})
|
||||||
@@ -63,7 +63,7 @@ func TestBalancesSlice_CorrectRoots_Some(t *testing.T) {
|
|||||||
roots, err := handleBalanceSlice(balances, []uint64{2, 3}, false)
|
roots, err := handleBalanceSlice(balances, []uint64{2, 3}, false)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
root1 := [32]byte{}
|
var root1 [32]byte
|
||||||
binary.LittleEndian.PutUint64(root1[:8], balances[0])
|
binary.LittleEndian.PutUint64(root1[:8], balances[0])
|
||||||
binary.LittleEndian.PutUint64(root1[8:16], balances[1])
|
binary.LittleEndian.PutUint64(root1[8:16], balances[1])
|
||||||
binary.LittleEndian.PutUint64(root1[16:24], balances[2])
|
binary.LittleEndian.PutUint64(root1[16:24], balances[2])
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
|||||||
fieldRoots[nativetypes.GenesisTime.RealPosition()] = genesisRoot[:]
|
fieldRoots[nativetypes.GenesisTime.RealPosition()] = genesisRoot[:]
|
||||||
|
|
||||||
// Genesis validators root.
|
// Genesis validators root.
|
||||||
r := [32]byte{}
|
var r [32]byte
|
||||||
copy(r[:], state.genesisValidatorsRoot[:])
|
copy(r[:], state.genesisValidatorsRoot[:])
|
||||||
fieldRoots[nativetypes.GenesisValidatorsRoot.RealPosition()] = r[:]
|
fieldRoots[nativetypes.GenesisValidatorsRoot.RealPosition()] = r[:]
|
||||||
|
|
||||||
|
|||||||
@@ -22,8 +22,8 @@ func TestValidatorMap_DistinctCopy(t *testing.T) {
|
|||||||
count := uint64(100)
|
count := uint64(100)
|
||||||
vals := make([]*ethpb.Validator, 0, count)
|
vals := make([]*ethpb.Validator, 0, count)
|
||||||
for i := uint64(1); i < count; i++ {
|
for i := uint64(1); i < count; i++ {
|
||||||
someRoot := [32]byte{}
|
var someRoot [32]byte
|
||||||
someKey := [fieldparams.BLSPubkeyLength]byte{}
|
var someKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||||
copy(someKey[:], strconv.Itoa(int(i)))
|
copy(someKey[:], strconv.Itoa(int(i)))
|
||||||
vals = append(vals, ðpb.Validator{
|
vals = append(vals, ðpb.Validator{
|
||||||
@@ -50,8 +50,8 @@ func TestBeaconState_NoDeadlock_Phase0(t *testing.T) {
|
|||||||
count := uint64(100)
|
count := uint64(100)
|
||||||
vals := make([]*ethpb.Validator, 0, count)
|
vals := make([]*ethpb.Validator, 0, count)
|
||||||
for i := uint64(1); i < count; i++ {
|
for i := uint64(1); i < count; i++ {
|
||||||
someRoot := [32]byte{}
|
var someRoot [32]byte
|
||||||
someKey := [fieldparams.BLSPubkeyLength]byte{}
|
var someKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||||
copy(someKey[:], strconv.Itoa(int(i)))
|
copy(someKey[:], strconv.Itoa(int(i)))
|
||||||
vals = append(vals, ðpb.Validator{
|
vals = append(vals, ðpb.Validator{
|
||||||
@@ -106,8 +106,8 @@ func TestBeaconState_NoDeadlock_Altair(t *testing.T) {
|
|||||||
count := uint64(100)
|
count := uint64(100)
|
||||||
vals := make([]*ethpb.Validator, 0, count)
|
vals := make([]*ethpb.Validator, 0, count)
|
||||||
for i := uint64(1); i < count; i++ {
|
for i := uint64(1); i < count; i++ {
|
||||||
someRoot := [32]byte{}
|
var someRoot [32]byte
|
||||||
someKey := [fieldparams.BLSPubkeyLength]byte{}
|
var someKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||||
copy(someKey[:], strconv.Itoa(int(i)))
|
copy(someKey[:], strconv.Itoa(int(i)))
|
||||||
vals = append(vals, ðpb.Validator{
|
vals = append(vals, ðpb.Validator{
|
||||||
@@ -162,8 +162,8 @@ func TestBeaconState_NoDeadlock_Bellatrix(t *testing.T) {
|
|||||||
count := uint64(100)
|
count := uint64(100)
|
||||||
vals := make([]*ethpb.Validator, 0, count)
|
vals := make([]*ethpb.Validator, 0, count)
|
||||||
for i := uint64(1); i < count; i++ {
|
for i := uint64(1); i < count; i++ {
|
||||||
someRoot := [32]byte{}
|
var someRoot [32]byte
|
||||||
someKey := [fieldparams.BLSPubkeyLength]byte{}
|
var someKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||||
copy(someKey[:], strconv.Itoa(int(i)))
|
copy(someKey[:], strconv.Itoa(int(i)))
|
||||||
vals = append(vals, ðpb.Validator{
|
vals = append(vals, ðpb.Validator{
|
||||||
@@ -218,8 +218,8 @@ func TestBeaconState_NoDeadlock_Capella(t *testing.T) {
|
|||||||
count := uint64(100)
|
count := uint64(100)
|
||||||
vals := make([]*ethpb.Validator, 0, count)
|
vals := make([]*ethpb.Validator, 0, count)
|
||||||
for i := uint64(1); i < count; i++ {
|
for i := uint64(1); i < count; i++ {
|
||||||
someRoot := [32]byte{}
|
var someRoot [32]byte
|
||||||
someKey := [fieldparams.BLSPubkeyLength]byte{}
|
var someKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||||
copy(someKey[:], strconv.Itoa(int(i)))
|
copy(someKey[:], strconv.Itoa(int(i)))
|
||||||
vals = append(vals, ðpb.Validator{
|
vals = append(vals, ðpb.Validator{
|
||||||
@@ -275,8 +275,8 @@ func TestBeaconState_AppendBalanceWithTrie(t *testing.T) {
|
|||||||
vals := make([]*ethpb.Validator, 0, count)
|
vals := make([]*ethpb.Validator, 0, count)
|
||||||
bals := make([]uint64, 0, count)
|
bals := make([]uint64, 0, count)
|
||||||
for i := uint64(1); i < count; i++ {
|
for i := uint64(1); i < count; i++ {
|
||||||
someRoot := [32]byte{}
|
var someRoot [32]byte
|
||||||
someKey := [fieldparams.BLSPubkeyLength]byte{}
|
var someKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||||
copy(someKey[:], strconv.Itoa(int(i)))
|
copy(someKey[:], strconv.Itoa(int(i)))
|
||||||
vals = append(vals, ðpb.Validator{
|
vals = append(vals, ðpb.Validator{
|
||||||
|
|||||||
@@ -55,8 +55,8 @@ func setupGenesisState(tb testing.TB, count uint64) *ethpb.BeaconState {
|
|||||||
genesisState, _, err := interop.GenerateGenesisState(context.Background(), 0, count)
|
genesisState, _, err := interop.GenerateGenesisState(context.Background(), 0, count)
|
||||||
require.NoError(tb, err, "Could not generate genesis beacon state")
|
require.NoError(tb, err, "Could not generate genesis beacon state")
|
||||||
for i := uint64(1); i < count; i++ {
|
for i := uint64(1); i < count; i++ {
|
||||||
someRoot := [32]byte{}
|
var someRoot [32]byte
|
||||||
someKey := [fieldparams.BLSPubkeyLength]byte{}
|
var someKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||||
copy(someKey[:], strconv.Itoa(int(i)))
|
copy(someKey[:], strconv.Itoa(int(i)))
|
||||||
genesisState.Validators = append(genesisState.Validators, ðpb.Validator{
|
genesisState.Validators = append(genesisState.Validators, ðpb.Validator{
|
||||||
|
|||||||
@@ -573,7 +573,7 @@ func mockBlocks(n int, iter func(int, chan uint32)) []interfaces.SignedBeaconBlo
|
|||||||
go iter(n, bchan)
|
go iter(n, bchan)
|
||||||
mb := make([]interfaces.SignedBeaconBlock, 0)
|
mb := make([]interfaces.SignedBeaconBlock, 0)
|
||||||
for i := range bchan {
|
for i := range bchan {
|
||||||
h := [32]byte{}
|
var h [32]byte
|
||||||
binary.LittleEndian.PutUint32(h[:], i)
|
binary.LittleEndian.PutUint32(h[:], i)
|
||||||
b := &mock.SignedBeaconBlock{BeaconBlock: &mock.BeaconBlock{BeaconBlockBody: &mock.BeaconBlockBody{}, Htr: h}}
|
b := &mock.SignedBeaconBlock{BeaconBlock: &mock.BeaconBlock{BeaconBlockBody: &mock.BeaconBlockBody{}, Htr: h}}
|
||||||
mb = append(mb, b)
|
mb = append(mb, b)
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ func TestReplayBlocks_AllSkipSlots(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, beaconState.SetSlashings(make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)))
|
require.NoError(t, beaconState.SetSlashings(make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)))
|
||||||
cp := beaconState.CurrentJustifiedCheckpoint()
|
cp := beaconState.CurrentJustifiedCheckpoint()
|
||||||
mockRoot := [32]byte{}
|
var mockRoot [32]byte
|
||||||
copy(mockRoot[:], "hello-world")
|
copy(mockRoot[:], "hello-world")
|
||||||
cp.Root = mockRoot[:]
|
cp.Root = mockRoot[:]
|
||||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
||||||
@@ -65,7 +65,7 @@ func TestReplayBlocks_SameSlot(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, beaconState.SetSlashings(make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)))
|
require.NoError(t, beaconState.SetSlashings(make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)))
|
||||||
cp := beaconState.CurrentJustifiedCheckpoint()
|
cp := beaconState.CurrentJustifiedCheckpoint()
|
||||||
mockRoot := [32]byte{}
|
var mockRoot [32]byte
|
||||||
copy(mockRoot[:], "hello-world")
|
copy(mockRoot[:], "hello-world")
|
||||||
cp.Root = mockRoot[:]
|
cp.Root = mockRoot[:]
|
||||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
||||||
@@ -95,7 +95,7 @@ func TestReplayBlocks_LowerSlotBlock(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, beaconState.SetSlashings(make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)))
|
require.NoError(t, beaconState.SetSlashings(make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)))
|
||||||
cp := beaconState.CurrentJustifiedCheckpoint()
|
cp := beaconState.CurrentJustifiedCheckpoint()
|
||||||
mockRoot := [32]byte{}
|
var mockRoot [32]byte
|
||||||
copy(mockRoot[:], "hello-world")
|
copy(mockRoot[:], "hello-world")
|
||||||
cp.Root = mockRoot[:]
|
cp.Root = mockRoot[:]
|
||||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ func packParticipationBits(bytes []byte) ([][32]byte, error) {
|
|||||||
}
|
}
|
||||||
// We create chunks from the list of items based on the
|
// We create chunks from the list of items based on the
|
||||||
// indices determined above.
|
// indices determined above.
|
||||||
chunk := [32]byte{}
|
var chunk [32]byte
|
||||||
copy(chunk[:], bytes[i:j])
|
copy(chunk[:], bytes[i:j])
|
||||||
chunks = append(chunks, chunk)
|
chunks = append(chunks, chunk)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -63,8 +63,8 @@ func setupGenesisState(tb testing.TB, count uint64) *ethpb.BeaconState {
|
|||||||
genesisState, _, err := interop.GenerateGenesisState(context.Background(), 0, 1)
|
genesisState, _, err := interop.GenerateGenesisState(context.Background(), 0, 1)
|
||||||
require.NoError(tb, err, "Could not generate genesis beacon state")
|
require.NoError(tb, err, "Could not generate genesis beacon state")
|
||||||
for i := uint64(1); i < count; i++ {
|
for i := uint64(1); i < count; i++ {
|
||||||
someRoot := [32]byte{}
|
var someRoot [32]byte
|
||||||
someKey := [fieldparams.BLSPubkeyLength]byte{}
|
var someKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(someRoot[:], strconv.Itoa(int(i)))
|
copy(someRoot[:], strconv.Itoa(int(i)))
|
||||||
copy(someKey[:], strconv.Itoa(int(i)))
|
copy(someKey[:], strconv.Itoa(int(i)))
|
||||||
genesisState.Validators = append(genesisState.Validators, ðpb.Validator{
|
genesisState.Validators = append(genesisState.Validators, ðpb.Validator{
|
||||||
|
|||||||
@@ -173,12 +173,12 @@ func recomputeRootFromLayer(idx int, layers [][]*[32]byte, chunks []*[32]byte,
|
|||||||
// only its branch up the tree.
|
// only its branch up the tree.
|
||||||
currentIndex := idx
|
currentIndex := idx
|
||||||
// Allocate only once.
|
// Allocate only once.
|
||||||
combinedChunks := [64]byte{}
|
var combinedChunks [64]byte
|
||||||
for i := 0; i < len(layers)-1; i++ {
|
for i := 0; i < len(layers)-1; i++ {
|
||||||
isLeft := currentIndex%2 == 0
|
isLeft := currentIndex%2 == 0
|
||||||
neighborIdx := currentIndex ^ 1
|
neighborIdx := currentIndex ^ 1
|
||||||
|
|
||||||
neighbor := [32]byte{}
|
var neighbor [32]byte
|
||||||
if layers[i] != nil && len(layers[i]) != 0 && neighborIdx < len(layers[i]) {
|
if layers[i] != nil && len(layers[i]) != 0 && neighborIdx < len(layers[i]) {
|
||||||
neighbor = *layers[i][neighborIdx]
|
neighbor = *layers[i][neighborIdx]
|
||||||
}
|
}
|
||||||
@@ -224,8 +224,8 @@ func recomputeRootFromLayerVariable(idx int, item [32]byte, layers [][]*[32]byte
|
|||||||
currentIndex := idx
|
currentIndex := idx
|
||||||
root := item
|
root := item
|
||||||
// Allocate only once.
|
// Allocate only once.
|
||||||
neighbor := [32]byte{}
|
var neighbor [32]byte
|
||||||
combinedChunks := [64]byte{}
|
var combinedChunks [64]byte
|
||||||
|
|
||||||
for i := 0; i < len(layers)-1; i++ {
|
for i := 0; i < len(layers)-1; i++ {
|
||||||
isLeft := currentIndex%2 == 0
|
isLeft := currentIndex%2 == 0
|
||||||
|
|||||||
@@ -28,25 +28,25 @@ func ValidatorFieldRoots(hasher ssz.HashFn, validator *ethpb.Validator) ([][32]b
|
|||||||
if validator != nil {
|
if validator != nil {
|
||||||
pubkey := bytesutil.ToBytes48(validator.PublicKey)
|
pubkey := bytesutil.ToBytes48(validator.PublicKey)
|
||||||
withdrawCreds := bytesutil.ToBytes32(validator.WithdrawalCredentials)
|
withdrawCreds := bytesutil.ToBytes32(validator.WithdrawalCredentials)
|
||||||
effectiveBalanceBuf := [32]byte{}
|
var effectiveBalanceBuf [32]byte
|
||||||
binary.LittleEndian.PutUint64(effectiveBalanceBuf[:8], validator.EffectiveBalance)
|
binary.LittleEndian.PutUint64(effectiveBalanceBuf[:8], validator.EffectiveBalance)
|
||||||
// Slashed.
|
// Slashed.
|
||||||
slashBuf := [32]byte{}
|
var slashBuf [32]byte
|
||||||
if validator.Slashed {
|
if validator.Slashed {
|
||||||
slashBuf[0] = uint8(1)
|
slashBuf[0] = uint8(1)
|
||||||
} else {
|
} else {
|
||||||
slashBuf[0] = uint8(0)
|
slashBuf[0] = uint8(0)
|
||||||
}
|
}
|
||||||
activationEligibilityBuf := [32]byte{}
|
var activationEligibilityBuf [32]byte
|
||||||
binary.LittleEndian.PutUint64(activationEligibilityBuf[:8], uint64(validator.ActivationEligibilityEpoch))
|
binary.LittleEndian.PutUint64(activationEligibilityBuf[:8], uint64(validator.ActivationEligibilityEpoch))
|
||||||
|
|
||||||
activationBuf := [32]byte{}
|
var activationBuf [32]byte
|
||||||
binary.LittleEndian.PutUint64(activationBuf[:8], uint64(validator.ActivationEpoch))
|
binary.LittleEndian.PutUint64(activationBuf[:8], uint64(validator.ActivationEpoch))
|
||||||
|
|
||||||
exitBuf := [32]byte{}
|
var exitBuf [32]byte
|
||||||
binary.LittleEndian.PutUint64(exitBuf[:8], uint64(validator.ExitEpoch))
|
binary.LittleEndian.PutUint64(exitBuf[:8], uint64(validator.ExitEpoch))
|
||||||
|
|
||||||
withdrawalBuf := [32]byte{}
|
var withdrawalBuf [32]byte
|
||||||
binary.LittleEndian.PutUint64(withdrawalBuf[:8], uint64(validator.WithdrawableEpoch))
|
binary.LittleEndian.PutUint64(withdrawalBuf[:8], uint64(validator.WithdrawableEpoch))
|
||||||
|
|
||||||
// Public key.
|
// Public key.
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ func VerifyBeaconStateMarshalSSZNilState(t *testing.T, factory getState, clear c
|
|||||||
|
|
||||||
func VerifyBeaconStateValidatorByPubkey(t *testing.T, factory getState) {
|
func VerifyBeaconStateValidatorByPubkey(t *testing.T, factory getState) {
|
||||||
keyCreator := func(input []byte) [fieldparams.BLSPubkeyLength]byte {
|
keyCreator := func(input []byte) [fieldparams.BLSPubkeyLength]byte {
|
||||||
nKey := [fieldparams.BLSPubkeyLength]byte{}
|
var nKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(nKey[:1], input)
|
copy(nKey[:1], input)
|
||||||
return nKey
|
return nKey
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Populate the database with blocks that would match the request.
|
// Populate the database with blocks that would match the request.
|
||||||
prevRoot := [32]byte{}
|
var prevRoot [32]byte
|
||||||
var err error
|
var err error
|
||||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Count); i += types.Slot(1) {
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Count); i += types.Slot(1) {
|
||||||
blk := util.NewBeaconBlock()
|
blk := util.NewBeaconBlock()
|
||||||
@@ -111,7 +111,7 @@ func TestRPCBeaconBlocksByRange_ReturnCorrectNumberBack(t *testing.T) {
|
|||||||
Count: 200,
|
Count: 200,
|
||||||
}
|
}
|
||||||
|
|
||||||
genRoot := [32]byte{}
|
var genRoot [32]byte
|
||||||
// Populate the database with blocks that would match the request.
|
// Populate the database with blocks that would match the request.
|
||||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
||||||
blk := util.NewBeaconBlock()
|
blk := util.NewBeaconBlock()
|
||||||
@@ -219,7 +219,7 @@ func TestRPCBeaconBlocksByRange_ReconstructsPayloads(t *testing.T) {
|
|||||||
header, err := blocks.PayloadToHeader(wrappedPayload)
|
header, err := blocks.PayloadToHeader(wrappedPayload)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
genRoot := [32]byte{}
|
var genRoot [32]byte
|
||||||
// Populate the database with blocks that would match the request.
|
// Populate the database with blocks that would match the request.
|
||||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
||||||
blk := util.NewBlindedBeaconBlockBellatrix()
|
blk := util.NewBlindedBeaconBlockBellatrix()
|
||||||
@@ -297,7 +297,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) {
|
|||||||
endSlot := req.StartSlot.Add(req.Count - 1)
|
endSlot := req.StartSlot.Add(req.Count - 1)
|
||||||
expectedRoots := make([][32]byte, req.Count)
|
expectedRoots := make([][32]byte, req.Count)
|
||||||
// Populate the database with blocks that would match the request.
|
// Populate the database with blocks that would match the request.
|
||||||
prevRoot := [32]byte{}
|
var prevRoot [32]byte
|
||||||
for i, j := req.StartSlot, 0; i <= endSlot; i++ {
|
for i, j := req.StartSlot, 0; i <= endSlot; i++ {
|
||||||
blk := util.NewBeaconBlock()
|
blk := util.NewBeaconBlock()
|
||||||
blk.Block.Slot = i
|
blk.Block.Slot = i
|
||||||
@@ -359,7 +359,7 @@ func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) {
|
|||||||
Count: 4,
|
Count: 4,
|
||||||
}
|
}
|
||||||
|
|
||||||
prevRoot := [32]byte{}
|
var prevRoot [32]byte
|
||||||
// Populate the database with blocks that would match the request.
|
// Populate the database with blocks that would match the request.
|
||||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i++ {
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i++ {
|
||||||
blk := util.NewBeaconBlock()
|
blk := util.NewBeaconBlock()
|
||||||
@@ -410,7 +410,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
|||||||
d := db.SetupDB(t)
|
d := db.SetupDB(t)
|
||||||
saveBlocks := func(req *ethpb.BeaconBlocksByRangeRequest) {
|
saveBlocks := func(req *ethpb.BeaconBlocksByRangeRequest) {
|
||||||
// Populate the database with blocks that would match the request.
|
// Populate the database with blocks that would match the request.
|
||||||
parentRoot := [32]byte{}
|
var parentRoot [32]byte
|
||||||
// Default to 1 to be inline with the spec.
|
// Default to 1 to be inline with the spec.
|
||||||
req.Step = 1
|
req.Step = 1
|
||||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
||||||
@@ -675,7 +675,7 @@ func TestRPCBeaconBlocksByRange_EnforceResponseInvariants(t *testing.T) {
|
|||||||
hook := logTest.NewGlobal()
|
hook := logTest.NewGlobal()
|
||||||
saveBlocks := func(req *ethpb.BeaconBlocksByRangeRequest) {
|
saveBlocks := func(req *ethpb.BeaconBlocksByRangeRequest) {
|
||||||
// Populate the database with blocks that would match the request.
|
// Populate the database with blocks that would match the request.
|
||||||
parentRoot := [32]byte{}
|
var parentRoot [32]byte
|
||||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Step*req.Count); i += types.Slot(req.Step) {
|
||||||
block := util.NewBeaconBlock()
|
block := util.NewBeaconBlock()
|
||||||
block.Block.Slot = i
|
block.Block.Slot = i
|
||||||
@@ -933,7 +933,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
|||||||
hook.Reset()
|
hook.Reset()
|
||||||
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
||||||
assert.Equal(t, uint64(2), uint64(len(blocks)))
|
assert.Equal(t, uint64(2), uint64(len(blocks)))
|
||||||
prevRoot := [32]byte{}
|
var prevRoot [32]byte
|
||||||
for _, blk := range blocks {
|
for _, blk := range blocks {
|
||||||
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= req.StartSlot.Add(req.Count*req.Step) {
|
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= req.StartSlot.Add(req.Count*req.Step) {
|
||||||
t.Errorf("Block slot is out of range: %d is not within [%d, %d)",
|
t.Errorf("Block slot is out of range: %d is not within [%d, %d)",
|
||||||
@@ -968,7 +968,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
|||||||
hook.Reset()
|
hook.Reset()
|
||||||
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
||||||
assert.Equal(t, uint64(65), uint64(len(blocks)))
|
assert.Equal(t, uint64(65), uint64(len(blocks)))
|
||||||
prevRoot := [32]byte{}
|
var prevRoot [32]byte
|
||||||
for _, blk := range blocks {
|
for _, blk := range blocks {
|
||||||
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= req.StartSlot.Add(req.Count*req.Step) {
|
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= req.StartSlot.Add(req.Count*req.Step) {
|
||||||
t.Errorf("Block slot is out of range: %d is not within [%d, %d)",
|
t.Errorf("Block slot is out of range: %d is not within [%d, %d)",
|
||||||
@@ -1009,7 +1009,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
|||||||
hook.Reset()
|
hook.Reset()
|
||||||
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
||||||
assert.Equal(t, uint64(64), uint64(len(blocks)))
|
assert.Equal(t, uint64(64), uint64(len(blocks)))
|
||||||
prevRoot := [32]byte{}
|
var prevRoot [32]byte
|
||||||
for _, blk := range blocks {
|
for _, blk := range blocks {
|
||||||
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= 65 {
|
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= 65 {
|
||||||
t.Errorf("Block slot is out of range: %d is not within [%d, 64)",
|
t.Errorf("Block slot is out of range: %d is not within [%d, 64)",
|
||||||
@@ -1051,7 +1051,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
|||||||
hook.Reset()
|
hook.Reset()
|
||||||
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
err := sendRequest(p1, p2, r, req, func(blocks []*ethpb.SignedBeaconBlock) {
|
||||||
assert.Equal(t, uint64(64), uint64(len(blocks)))
|
assert.Equal(t, uint64(64), uint64(len(blocks)))
|
||||||
prevRoot := [32]byte{}
|
var prevRoot [32]byte
|
||||||
for _, blk := range blocks {
|
for _, blk := range blocks {
|
||||||
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= 65 {
|
if blk.Block.Slot < req.StartSlot || blk.Block.Slot >= 65 {
|
||||||
t.Errorf("Block slot is out of range: %d is not within [%d, 64)",
|
t.Errorf("Block slot is out of range: %d is not within [%d, 64)",
|
||||||
@@ -1081,10 +1081,10 @@ func TestRPCBeaconBlocksByRange_FilterBlocks_PreviousRoot(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Populate the database with blocks that would match the request.
|
// Populate the database with blocks that would match the request.
|
||||||
prevRoot := [32]byte{}
|
var prevRoot [32]byte
|
||||||
var err error
|
var err error
|
||||||
blks := []interfaces.SignedBeaconBlock{}
|
blks := []interfaces.SignedBeaconBlock{}
|
||||||
roots := [][32]byte{}
|
var roots [][32]byte
|
||||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Count); i += types.Slot(1) {
|
for i := req.StartSlot; i < req.StartSlot.Add(req.Count); i += types.Slot(1) {
|
||||||
blk := util.NewBeaconBlock()
|
blk := util.NewBeaconBlock()
|
||||||
blk.Block.Slot = i
|
blk.Block.Slot = i
|
||||||
@@ -1101,7 +1101,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks_PreviousRoot(t *testing.T) {
|
|||||||
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
||||||
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
r := &Service{cfg: &config{p2p: p1, beaconDB: d, chain: &chainMock.ChainService{}}, rateLimiter: newRateLimiter(p1)}
|
||||||
|
|
||||||
initialRoot := [32]byte{}
|
var initialRoot [32]byte
|
||||||
ptrRt := &initialRoot
|
ptrRt := &initialRoot
|
||||||
newBlks, err := r.filterBlocks(context.Background(), blks, roots, ptrRt, req.Step, req.StartSlot)
|
newBlks, err := r.filterBlocks(context.Background(), blks, roots, ptrRt, req.Step, req.StartSlot)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) {
|
|||||||
p2 := p2ptest.NewTestP2P(t)
|
p2 := p2ptest.NewTestP2P(t)
|
||||||
p1.Connect(p2)
|
p1.Connect(p2)
|
||||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||||
root := [32]byte{}
|
var root [32]byte
|
||||||
|
|
||||||
r := &Service{
|
r := &Service{
|
||||||
cfg: &config{
|
cfg: &config{
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) {
|
|||||||
Genesis: time.Now(),
|
Genesis: time.Now(),
|
||||||
ValidatorsRoot: [32]byte{'A'},
|
ValidatorsRoot: [32]byte{'A'},
|
||||||
}
|
}
|
||||||
emptySig := [96]byte{}
|
var emptySig [96]byte
|
||||||
type args struct {
|
type args struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
pid peer.ID
|
pid peer.ID
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
|
|||||||
Genesis: time.Now(),
|
Genesis: time.Now(),
|
||||||
ValidatorsRoot: [32]byte{'A'},
|
ValidatorsRoot: [32]byte{'A'},
|
||||||
}
|
}
|
||||||
emptySig := [96]byte{}
|
var emptySig [96]byte
|
||||||
type args struct {
|
type args struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
pid peer.ID
|
pid peer.ID
|
||||||
|
|||||||
@@ -49,7 +49,7 @@ func TestService_ValidateSyncContributionAndProof(t *testing.T) {
|
|||||||
Genesis: time.Now(),
|
Genesis: time.Now(),
|
||||||
ValidatorsRoot: [32]byte{'A'},
|
ValidatorsRoot: [32]byte{'A'},
|
||||||
}
|
}
|
||||||
emptySig := [96]byte{}
|
var emptySig [96]byte
|
||||||
type args struct {
|
type args struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
pid peer.ID
|
pid peer.ID
|
||||||
@@ -898,7 +898,7 @@ func TestValidateSyncContributionAndProof(t *testing.T) {
|
|||||||
defaultTopic := p2p.SyncContributionAndProofSubnetTopicFormat
|
defaultTopic := p2p.SyncContributionAndProofSubnetTopicFormat
|
||||||
defaultTopic = fmt.Sprintf(defaultTopic, []byte{0xAB, 0x00, 0xCC, 0x9E})
|
defaultTopic = fmt.Sprintf(defaultTopic, []byte{0xAB, 0x00, 0xCC, 0x9E})
|
||||||
defaultTopic = defaultTopic + "/" + encoder.ProtocolSuffixSSZSnappy
|
defaultTopic = defaultTopic + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||||
emptySig := [96]byte{}
|
var emptySig [96]byte
|
||||||
pid := peer.ID("random")
|
pid := peer.ID("random")
|
||||||
msg := ðpb.SignedContributionAndProof{
|
msg := ðpb.SignedContributionAndProof{
|
||||||
Message: ðpb.ContributionAndProof{
|
Message: ðpb.ContributionAndProof{
|
||||||
@@ -1036,7 +1036,7 @@ func fillUpBlocksAndState(ctx context.Context, t *testing.T, beaconDB db.Databas
|
|||||||
assert.NoError(t, beaconDB.SaveGenesisData(context.Background(), gs))
|
assert.NoError(t, beaconDB.SaveGenesisData(context.Background(), gs))
|
||||||
|
|
||||||
testState := gs.Copy()
|
testState := gs.Copy()
|
||||||
hRoot := [32]byte{}
|
var hRoot [32]byte
|
||||||
for i := types.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {
|
for i := types.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||||
blk, err := util.GenerateFullBlockAltair(testState, keys, util.DefaultBlockGenConfig(), i)
|
blk, err := util.GenerateFullBlockAltair(testState, keys, util.DefaultBlockGenConfig(), i)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
func TestSSZUint64_Limit(t *testing.T) {
|
func TestSSZUint64_Limit(t *testing.T) {
|
||||||
sszType := types.SSZUint64(0)
|
sszType := types.SSZUint64(0)
|
||||||
serializedObj := [7]byte{}
|
var serializedObj [7]byte
|
||||||
err := sszType.UnmarshalSSZ(serializedObj[:])
|
err := sszType.UnmarshalSSZ(serializedObj[:])
|
||||||
if err == nil || !strings.Contains(err.Error(), "expected buffer of length") {
|
if err == nil || !strings.Contains(err.Error(), "expected buffer of length") {
|
||||||
t.Errorf("Expected Error = %s, got: %v", "expected buffer of length", err)
|
t.Errorf("Expected Error = %s, got: %v", "expected buffer of length", err)
|
||||||
|
|||||||
@@ -111,7 +111,7 @@ func (m *SparseMerkleTrie) Items() [][]byte {
|
|||||||
// Spec Definition:
|
// Spec Definition:
|
||||||
// sha256(concat(node, self.to_little_endian_64(self.deposit_count), slice(zero_bytes32, start=0, len=24)))
|
// sha256(concat(node, self.to_little_endian_64(self.deposit_count), slice(zero_bytes32, start=0, len=24)))
|
||||||
func (m *SparseMerkleTrie) HashTreeRoot() ([32]byte, error) {
|
func (m *SparseMerkleTrie) HashTreeRoot() ([32]byte, error) {
|
||||||
enc := [32]byte{}
|
var enc [32]byte
|
||||||
depositCount := uint64(len(m.originalItems))
|
depositCount := uint64(len(m.originalItems))
|
||||||
if len(m.originalItems) == 1 && bytes.Equal(m.originalItems[0], ZeroHashes[0][:]) {
|
if len(m.originalItems) == 1 && bytes.Equal(m.originalItems[0], ZeroHashes[0][:]) {
|
||||||
// Accounting for empty tries
|
// Accounting for empty tries
|
||||||
@@ -187,7 +187,7 @@ func (m *SparseMerkleTrie) MerkleProof(index int) ([][]byte, error) {
|
|||||||
proof[i] = ZeroHashes[i][:]
|
proof[i] = ZeroHashes[i][:]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
enc := [32]byte{}
|
var enc [32]byte
|
||||||
binary.LittleEndian.PutUint64(enc[:], uint64(len(m.originalItems)))
|
binary.LittleEndian.PutUint64(enc[:], uint64(len(m.originalItems)))
|
||||||
proof[len(proof)-1] = enc[:]
|
proof[len(proof)-1] = enc[:]
|
||||||
return proof, nil
|
return proof, nil
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ func TestDepositTrieRoot_OK(t *testing.T) {
|
|||||||
|
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
data := depositDataItems[i]
|
data := depositDataItems[i]
|
||||||
dataRoot := [32]byte{}
|
var dataRoot [32]byte
|
||||||
copy(dataRoot[:], depositDataRoots[i])
|
copy(dataRoot[:], depositDataRoots[i])
|
||||||
|
|
||||||
_, err := testAcc.Contract.Deposit(testAcc.TxOpts, data.PublicKey, data.WithdrawalCredentials, data.Signature, dataRoot)
|
_, err := testAcc.Contract.Deposit(testAcc.TxOpts, data.PublicKey, data.WithdrawalCredentials, data.Signature, dataRoot)
|
||||||
@@ -77,7 +77,7 @@ func TestDepositTrieRoot_Fail(t *testing.T) {
|
|||||||
|
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
data := depositDataItems[i]
|
data := depositDataItems[i]
|
||||||
dataRoot := [32]byte{}
|
var dataRoot [32]byte
|
||||||
copy(dataRoot[:], depositDataRoots[i])
|
copy(dataRoot[:], depositDataRoots[i])
|
||||||
|
|
||||||
_, err := testAcc.Contract.Deposit(testAcc.TxOpts, data.PublicKey, data.WithdrawalCredentials, data.Signature, dataRoot)
|
_, err := testAcc.Contract.Deposit(testAcc.TxOpts, data.PublicKey, data.WithdrawalCredentials, data.Signature, dataRoot)
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ func TestSerialize(t *testing.T) {
|
|||||||
|
|
||||||
func TestZeroKey(t *testing.T) {
|
func TestZeroKey(t *testing.T) {
|
||||||
// Is Zero
|
// Is Zero
|
||||||
zKey := [32]byte{}
|
var zKey [32]byte
|
||||||
assert.Equal(t, true, blst.IsZero(zKey[:]))
|
assert.Equal(t, true, blst.IsZero(zKey[:]))
|
||||||
|
|
||||||
// Is Not Zero
|
// Is Not Zero
|
||||||
|
|||||||
@@ -703,7 +703,7 @@ func NewInvalidSignatureSet(t *testing.T, msgBody string, num int, throwErr bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
func messageBytes(message string) [32]byte {
|
func messageBytes(message string) [32]byte {
|
||||||
bytes := [32]byte{}
|
var bytes [32]byte
|
||||||
copy(bytes[:], []byte(message))
|
copy(bytes[:], []byte(message))
|
||||||
return bytes
|
return bytes
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -63,7 +63,7 @@ func BitwiseMerkleize(hasher HashFn, chunks [][32]byte, count, limit uint64) ([3
|
|||||||
|
|
||||||
// PackByChunk a given byte array's final chunk with zeroes if needed.
|
// PackByChunk a given byte array's final chunk with zeroes if needed.
|
||||||
func PackByChunk(serializedItems [][]byte) ([][bytesPerChunk]byte, error) {
|
func PackByChunk(serializedItems [][]byte) ([][bytesPerChunk]byte, error) {
|
||||||
emptyChunk := [bytesPerChunk]byte{}
|
var emptyChunk [bytesPerChunk]byte
|
||||||
// If there are no items, we return an empty chunk.
|
// If there are no items, we return an empty chunk.
|
||||||
if len(serializedItems) == 0 {
|
if len(serializedItems) == 0 {
|
||||||
return [][bytesPerChunk]byte{emptyChunk}, nil
|
return [][bytesPerChunk]byte{emptyChunk}, nil
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ func Merkleize(hasher Hasher, count, limit uint64, leaf func(i uint64) []byte) (
|
|||||||
tmp := make([][32]byte, limitDepth+1)
|
tmp := make([][32]byte, limitDepth+1)
|
||||||
|
|
||||||
j := uint8(0)
|
j := uint8(0)
|
||||||
hArr := [32]byte{}
|
var hArr [32]byte
|
||||||
h := hArr[:]
|
h := hArr[:]
|
||||||
|
|
||||||
merge := func(i uint64) {
|
merge := func(i uint64) {
|
||||||
@@ -151,7 +151,7 @@ func ConstructProof(hasher Hasher, count, limit uint64, leaf func(i uint64) []by
|
|||||||
tmp := make([][32]byte, limitDepth+1)
|
tmp := make([][32]byte, limitDepth+1)
|
||||||
|
|
||||||
j := uint8(0)
|
j := uint8(0)
|
||||||
hArr := [32]byte{}
|
var hArr [32]byte
|
||||||
h := hArr[:]
|
h := hArr[:]
|
||||||
|
|
||||||
merge := func(i uint64) {
|
merge := func(i uint64) {
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ func TestMerkleizeLimitAndCountAreZero(t *testing.T) {
|
|||||||
leafIndexer := func(i uint64) []byte {
|
leafIndexer := func(i uint64) []byte {
|
||||||
return chunks[i]
|
return chunks[i]
|
||||||
}
|
}
|
||||||
expected := [32]byte{}
|
var expected [32]byte
|
||||||
result := ssz.Merkleize(hashFn, count, limit, leafIndexer)
|
result := ssz.Merkleize(hashFn, count, limit, leafIndexer)
|
||||||
assert.Equal(t, expected, result)
|
assert.Equal(t, expected, result)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -130,7 +130,7 @@ func NextForkData(currEpoch types.Epoch) ([4]byte, types.Epoch, error) {
|
|||||||
fSchedule := params.BeaconConfig().ForkVersionSchedule
|
fSchedule := params.BeaconConfig().ForkVersionSchedule
|
||||||
sortedForkVersions := SortedForkVersions(fSchedule)
|
sortedForkVersions := SortedForkVersions(fSchedule)
|
||||||
nextForkEpoch := types.Epoch(math.MaxUint64)
|
nextForkEpoch := types.Epoch(math.MaxUint64)
|
||||||
nextForkVersion := [4]byte{}
|
var nextForkVersion [4]byte
|
||||||
for _, forkVersion := range sortedForkVersions {
|
for _, forkVersion := range sortedForkVersions {
|
||||||
epoch, ok := fSchedule[forkVersion]
|
epoch, ok := fSchedule[forkVersion]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ func (e *ExecutionBlock) UnmarshalJSON(enc []byte) error {
|
|||||||
|
|
||||||
// UnmarshalJSON --
|
// UnmarshalJSON --
|
||||||
func (b *PayloadIDBytes) UnmarshalJSON(enc []byte) error {
|
func (b *PayloadIDBytes) UnmarshalJSON(enc []byte) error {
|
||||||
res := [8]byte{}
|
var res [8]byte
|
||||||
if err := hexutil.UnmarshalFixedJSON(reflect.TypeOf(b), enc, res[:]); err != nil {
|
if err := hexutil.UnmarshalFixedJSON(reflect.TypeOf(b), enc, res[:]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ func (s *Simulator) generateBlockHeadersForSlot(
|
|||||||
slashings := make([]*ethpb.ProposerSlashing, 0)
|
slashings := make([]*ethpb.ProposerSlashing, 0)
|
||||||
proposer := rand.NewGenerator().Uint64() % s.srvConfig.Params.NumValidators
|
proposer := rand.NewGenerator().Uint64() % s.srvConfig.Params.NumValidators
|
||||||
|
|
||||||
parentRoot := [32]byte{}
|
var parentRoot [32]byte
|
||||||
beaconState, err := s.srvConfig.StateGen.StateByRoot(ctx, parentRoot)
|
beaconState, err := s.srvConfig.StateGen.StateByRoot(ctx, parentRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ func TestSubmitAggregateAndProof_GetDutiesRequestFailure(t *testing.T) {
|
|||||||
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{}}
|
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{}}
|
||||||
defer finish()
|
defer finish()
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.SubmitAggregateAndProof(context.Background(), 0, pubKey)
|
validator.SubmitAggregateAndProof(context.Background(), 0, pubKey)
|
||||||
|
|
||||||
@@ -36,7 +36,7 @@ func TestSubmitAggregateAndProof_GetDutiesRequestFailure(t *testing.T) {
|
|||||||
func TestSubmitAggregateAndProof_SignFails(t *testing.T) {
|
func TestSubmitAggregateAndProof_SignFails(t *testing.T) {
|
||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.duties = ðpb.DutiesResponse{
|
validator.duties = ðpb.DutiesResponse{
|
||||||
Duties: []*ethpb.DutiesResponse_Duty{
|
Duties: []*ethpb.DutiesResponse_Duty{
|
||||||
@@ -75,7 +75,7 @@ func TestSubmitAggregateAndProof_SignFails(t *testing.T) {
|
|||||||
func TestSubmitAggregateAndProof_Ok(t *testing.T) {
|
func TestSubmitAggregateAndProof_Ok(t *testing.T) {
|
||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.duties = ðpb.DutiesResponse{
|
validator.duties = ðpb.DutiesResponse{
|
||||||
Duties: []*ethpb.DutiesResponse_Duty{
|
Duties: []*ethpb.DutiesResponse_Duty{
|
||||||
@@ -150,7 +150,7 @@ func TestAggregateAndProofSignature_CanSignValidSignature(t *testing.T) {
|
|||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
m.validatorClient.EXPECT().DomainData(
|
m.validatorClient.EXPECT().DomainData(
|
||||||
gomock.Any(), // ctx
|
gomock.Any(), // ctx
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ func Test_slashableAttestationCheck(t *testing.T) {
|
|||||||
defer reset()
|
defer reset()
|
||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
att := ðpb.IndexedAttestation{
|
att := ðpb.IndexedAttestation{
|
||||||
AttestingIndices: []uint64{1, 2},
|
AttestingIndices: []uint64{1, 2},
|
||||||
@@ -69,7 +69,7 @@ func Test_slashableAttestationCheck_UpdatesLowestSignedEpochs(t *testing.T) {
|
|||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
att := ðpb.IndexedAttestation{
|
att := ðpb.IndexedAttestation{
|
||||||
AttestingIndices: []uint64{1, 2},
|
AttestingIndices: []uint64{1, 2},
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ func TestRequestAttestation_ValidatorDutiesRequestFailure(t *testing.T) {
|
|||||||
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{}}
|
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{}}
|
||||||
defer finish()
|
defer finish()
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.SubmitAttestation(context.Background(), 30, pubKey)
|
validator.SubmitAttestation(context.Background(), 30, pubKey)
|
||||||
require.LogsContain(t, hook, "Could not fetch validator assignment")
|
require.LogsContain(t, hook, "Could not fetch validator assignment")
|
||||||
@@ -47,7 +47,7 @@ func TestAttestToBlockHead_SubmitAttestation_EmptyCommittee(t *testing.T) {
|
|||||||
|
|
||||||
validator, _, validatorKey, finish := setup(t)
|
validator, _, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
||||||
{
|
{
|
||||||
@@ -89,7 +89,7 @@ func TestAttestToBlockHead_SubmitAttestation_RequestFailure(t *testing.T) {
|
|||||||
gomock.AssignableToTypeOf(ðpb.Attestation{}),
|
gomock.AssignableToTypeOf(ðpb.Attestation{}),
|
||||||
).Return(nil, errors.New("something went wrong"))
|
).Return(nil, errors.New("something went wrong"))
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.SubmitAttestation(context.Background(), 30, pubKey)
|
validator.SubmitAttestation(context.Background(), 30, pubKey)
|
||||||
require.LogsContain(t, hook, "Could not submit attestation to beacon node")
|
require.LogsContain(t, hook, "Could not submit attestation to beacon node")
|
||||||
@@ -101,7 +101,7 @@ func TestAttestToBlockHead_AttestsCorrectly(t *testing.T) {
|
|||||||
hook := logTest.NewGlobal()
|
hook := logTest.NewGlobal()
|
||||||
validatorIndex := types.ValidatorIndex(7)
|
validatorIndex := types.ValidatorIndex(7)
|
||||||
committee := []types.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
|
committee := []types.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
||||||
{
|
{
|
||||||
@@ -174,7 +174,7 @@ func TestAttestToBlockHead_BlocksDoubleAtt(t *testing.T) {
|
|||||||
defer finish()
|
defer finish()
|
||||||
validatorIndex := types.ValidatorIndex(7)
|
validatorIndex := types.ValidatorIndex(7)
|
||||||
committee := []types.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
|
committee := []types.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
||||||
{
|
{
|
||||||
@@ -226,7 +226,7 @@ func TestAttestToBlockHead_BlocksSurroundAtt(t *testing.T) {
|
|||||||
defer finish()
|
defer finish()
|
||||||
validatorIndex := types.ValidatorIndex(7)
|
validatorIndex := types.ValidatorIndex(7)
|
||||||
committee := []types.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
|
committee := []types.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
||||||
{
|
{
|
||||||
@@ -277,7 +277,7 @@ func TestAttestToBlockHead_BlocksSurroundedAtt(t *testing.T) {
|
|||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
validatorIndex := types.ValidatorIndex(7)
|
validatorIndex := types.ValidatorIndex(7)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
committee := []types.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
|
committee := []types.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
|
||||||
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
||||||
@@ -331,7 +331,7 @@ func TestAttestToBlockHead_DoesNotAttestBeforeDelay(t *testing.T) {
|
|||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.genesisTime = uint64(prysmTime.Now().Unix())
|
validator.genesisTime = uint64(prysmTime.Now().Unix())
|
||||||
m.validatorClient.EXPECT().GetDuties(
|
m.validatorClient.EXPECT().GetDuties(
|
||||||
@@ -365,7 +365,7 @@ func TestAttestToBlockHead_DoesAttestAfterDelay(t *testing.T) {
|
|||||||
validator.genesisTime = uint64(prysmTime.Now().Unix())
|
validator.genesisTime = uint64(prysmTime.Now().Unix())
|
||||||
validatorIndex := types.ValidatorIndex(5)
|
validatorIndex := types.ValidatorIndex(5)
|
||||||
committee := []types.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
|
committee := []types.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
||||||
{
|
{
|
||||||
@@ -404,7 +404,7 @@ func TestAttestToBlockHead_CorrectBitfieldLength(t *testing.T) {
|
|||||||
defer finish()
|
defer finish()
|
||||||
validatorIndex := types.ValidatorIndex(2)
|
validatorIndex := types.ValidatorIndex(2)
|
||||||
committee := []types.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
|
committee := []types.ValidatorIndex{0, 3, 4, 2, validatorIndex, 6, 8, 9, 10}
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ func (c *waitForActivationClient) Recv() (*ethpb.ValidatorActivationResponse, er
|
|||||||
stringRetrievedPubKeys := make(map[string]struct{})
|
stringRetrievedPubKeys := make(map[string]struct{})
|
||||||
|
|
||||||
// Contains all keys in targetPubKeys but not in retrievedPubKeys
|
// Contains all keys in targetPubKeys but not in retrievedPubKeys
|
||||||
missingPubKeys := [][]byte{}
|
var missingPubKeys [][]byte
|
||||||
|
|
||||||
statuses := []*ethpb.ValidatorActivationResponse_Status{}
|
statuses := []*ethpb.ValidatorActivationResponse_Status{}
|
||||||
|
|
||||||
|
|||||||
@@ -25,11 +25,11 @@ func TestValidator_HandleKeyReload(t *testing.T) {
|
|||||||
|
|
||||||
inactivePrivKey, err := bls.RandKey()
|
inactivePrivKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
inactivePubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var inactivePubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(inactivePubKey[:], inactivePrivKey.PublicKey().Marshal())
|
copy(inactivePubKey[:], inactivePrivKey.PublicKey().Marshal())
|
||||||
activePrivKey, err := bls.RandKey()
|
activePrivKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
activePubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var activePubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(activePubKey[:], activePrivKey.PublicKey().Marshal())
|
copy(activePubKey[:], activePrivKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
@@ -68,7 +68,7 @@ func TestValidator_HandleKeyReload(t *testing.T) {
|
|||||||
|
|
||||||
inactivePrivKey, err := bls.RandKey()
|
inactivePrivKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
inactivePubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var inactivePubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(inactivePubKey[:], inactivePrivKey.PublicKey().Marshal())
|
copy(inactivePubKey[:], inactivePrivKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
@@ -104,7 +104,7 @@ func TestValidator_HandleKeyReload(t *testing.T) {
|
|||||||
t.Run("error when getting status", func(t *testing.T) {
|
t.Run("error when getting status", func(t *testing.T) {
|
||||||
inactivePrivKey, err := bls.RandKey()
|
inactivePrivKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
inactivePubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var inactivePubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(inactivePubKey[:], inactivePrivKey.PublicKey().Marshal())
|
copy(inactivePubKey[:], inactivePrivKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ func Test_slashableProposalCheck_PreventsLowerThanMinProposal(t *testing.T) {
|
|||||||
validator, _, validatorKey, finish := setup(t)
|
validator, _, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
lowestSignedSlot := types.Slot(10)
|
lowestSignedSlot := types.Slot(10)
|
||||||
pubKeyBytes := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKeyBytes [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKeyBytes[:], validatorKey.PublicKey().Marshal())
|
copy(pubKeyBytes[:], validatorKey.PublicKey().Marshal())
|
||||||
|
|
||||||
// We save a proposal at the lowest signed slot in the DB.
|
// We save a proposal at the lowest signed slot in the DB.
|
||||||
@@ -102,7 +102,7 @@ func Test_slashableProposalCheck(t *testing.T) {
|
|||||||
Signature: params.BeaconConfig().EmptySignature[:],
|
Signature: params.BeaconConfig().EmptySignature[:],
|
||||||
})
|
})
|
||||||
|
|
||||||
pubKeyBytes := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKeyBytes [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKeyBytes[:], validatorKey.PublicKey().Marshal())
|
copy(pubKeyBytes[:], validatorKey.PublicKey().Marshal())
|
||||||
|
|
||||||
// We save a proposal at slot 1 as our lowest proposal.
|
// We save a proposal at slot 1 as our lowest proposal.
|
||||||
@@ -113,7 +113,7 @@ func Test_slashableProposalCheck(t *testing.T) {
|
|||||||
dummySigningRoot := [32]byte{1}
|
dummySigningRoot := [32]byte{1}
|
||||||
err = validator.db.SaveProposalHistoryForSlot(ctx, pubKeyBytes, 10, dummySigningRoot[:])
|
err = validator.db.SaveProposalHistoryForSlot(ctx, pubKeyBytes, 10, dummySigningRoot[:])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
sBlock, err := blocks.NewSignedBeaconBlock(blk)
|
sBlock, err := blocks.NewSignedBeaconBlock(blk)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -168,7 +168,7 @@ func Test_slashableProposalCheck_RemoteProtection(t *testing.T) {
|
|||||||
defer reset()
|
defer reset()
|
||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
|
|
||||||
blk := util.NewBeaconBlock()
|
blk := util.NewBeaconBlock()
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ func setup(t *testing.T) (*validator, *mocks, bls.SecretKey, func()) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setupWithKey(t *testing.T, validatorKey bls.SecretKey) (*validator, *mocks, bls.SecretKey, func()) {
|
func setupWithKey(t *testing.T, validatorKey bls.SecretKey) (*validator, *mocks, bls.SecretKey, func()) {
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
valDB := testing2.SetupDB(t, [][fieldparams.BLSPubkeyLength]byte{pubKey})
|
valDB := testing2.SetupDB(t, [][fieldparams.BLSPubkeyLength]byte{pubKey})
|
||||||
ctrl := gomock.NewController(t)
|
ctrl := gomock.NewController(t)
|
||||||
@@ -103,7 +103,7 @@ func TestProposeBlock_DoesNotProposeGenesisBlock(t *testing.T) {
|
|||||||
hook := logTest.NewGlobal()
|
hook := logTest.NewGlobal()
|
||||||
validator, _, validatorKey, finish := setup(t)
|
validator, _, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.ProposeBlock(context.Background(), 0, pubKey)
|
validator.ProposeBlock(context.Background(), 0, pubKey)
|
||||||
|
|
||||||
@@ -114,7 +114,7 @@ func TestProposeBlock_DomainDataFailed(t *testing.T) {
|
|||||||
hook := logTest.NewGlobal()
|
hook := logTest.NewGlobal()
|
||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
|
|
||||||
m.validatorClient.EXPECT().DomainData(
|
m.validatorClient.EXPECT().DomainData(
|
||||||
@@ -130,7 +130,7 @@ func TestProposeBlock_DomainDataIsNil(t *testing.T) {
|
|||||||
hook := logTest.NewGlobal()
|
hook := logTest.NewGlobal()
|
||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
|
|
||||||
m.validatorClient.EXPECT().DomainData(
|
m.validatorClient.EXPECT().DomainData(
|
||||||
@@ -172,7 +172,7 @@ func TestProposeBlock_RequestBlockFailed(t *testing.T) {
|
|||||||
hook := logTest.NewGlobal()
|
hook := logTest.NewGlobal()
|
||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
|
|
||||||
m.validatorClient.EXPECT().DomainData(
|
m.validatorClient.EXPECT().DomainData(
|
||||||
@@ -227,7 +227,7 @@ func TestProposeBlock_ProposeBlockFailed(t *testing.T) {
|
|||||||
hook := logTest.NewGlobal()
|
hook := logTest.NewGlobal()
|
||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
|
|
||||||
m.validatorClient.EXPECT().DomainData(
|
m.validatorClient.EXPECT().DomainData(
|
||||||
@@ -258,7 +258,7 @@ func TestProposeBlock_ProposeBlockFailed(t *testing.T) {
|
|||||||
|
|
||||||
func TestProposeBlock_BlocksDoubleProposal(t *testing.T) {
|
func TestProposeBlock_BlocksDoubleProposal(t *testing.T) {
|
||||||
slot := params.BeaconConfig().SlotsPerEpoch.Mul(5).Add(2)
|
slot := params.BeaconConfig().SlotsPerEpoch.Mul(5).Add(2)
|
||||||
blockGraffiti := [32]byte{}
|
var blockGraffiti [32]byte
|
||||||
copy(blockGraffiti[:], "someothergraffiti")
|
copy(blockGraffiti[:], "someothergraffiti")
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@@ -325,10 +325,10 @@ func TestProposeBlock_BlocksDoubleProposal(t *testing.T) {
|
|||||||
hook := logTest.NewGlobal()
|
hook := logTest.NewGlobal()
|
||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
|
|
||||||
dummyRoot := [32]byte{}
|
var dummyRoot [32]byte
|
||||||
// Save a dummy proposal history at slot 0.
|
// Save a dummy proposal history at slot 0.
|
||||||
err := validator.db.SaveProposalHistoryForSlot(context.Background(), pubKey, 0, dummyRoot[:])
|
err := validator.db.SaveProposalHistoryForSlot(context.Background(), pubKey, 0, dummyRoot[:])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -371,10 +371,10 @@ func TestProposeBlock_BlocksDoubleProposal_After54KEpochs(t *testing.T) {
|
|||||||
hook := logTest.NewGlobal()
|
hook := logTest.NewGlobal()
|
||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
|
|
||||||
dummyRoot := [32]byte{}
|
var dummyRoot [32]byte
|
||||||
// Save a dummy proposal history at slot 0.
|
// Save a dummy proposal history at slot 0.
|
||||||
err := validator.db.SaveProposalHistoryForSlot(context.Background(), pubKey, 0, dummyRoot[:])
|
err := validator.db.SaveProposalHistoryForSlot(context.Background(), pubKey, 0, dummyRoot[:])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -398,7 +398,7 @@ func TestProposeBlock_BlocksDoubleProposal_After54KEpochs(t *testing.T) {
|
|||||||
|
|
||||||
secondTestBlock := util.NewBeaconBlock()
|
secondTestBlock := util.NewBeaconBlock()
|
||||||
secondTestBlock.Block.Slot = farFuture
|
secondTestBlock.Block.Slot = farFuture
|
||||||
blockGraffiti := [32]byte{}
|
var blockGraffiti [32]byte
|
||||||
copy(blockGraffiti[:], "someothergraffiti")
|
copy(blockGraffiti[:], "someothergraffiti")
|
||||||
secondTestBlock.Block.Body.Graffiti = blockGraffiti[:]
|
secondTestBlock.Block.Body.Graffiti = blockGraffiti[:]
|
||||||
m.validatorClient.EXPECT().GetBeaconBlock(
|
m.validatorClient.EXPECT().GetBeaconBlock(
|
||||||
@@ -447,7 +447,7 @@ func TestProposeBlock_AllowsPastProposals(t *testing.T) {
|
|||||||
hook := logTest.NewGlobal()
|
hook := logTest.NewGlobal()
|
||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
|
|
||||||
// Save a dummy proposal history at slot 0.
|
// Save a dummy proposal history at slot 0.
|
||||||
@@ -592,7 +592,7 @@ func testProposeBlock(t *testing.T, graffiti []byte) {
|
|||||||
hook := logTest.NewGlobal()
|
hook := logTest.NewGlobal()
|
||||||
validator, m, validatorKey, finish := setup(t)
|
validator, m, validatorKey, finish := setup(t)
|
||||||
defer finish()
|
defer finish()
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
|
|
||||||
validator.graffiti = graffiti
|
validator.graffiti = graffiti
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ func TestSubmitSyncCommitteeMessage_ValidatorDutiesRequestFailure(t *testing.T)
|
|||||||
Root: bytesutil.PadTo([]byte{}, 32),
|
Root: bytesutil.PadTo([]byte{}, 32),
|
||||||
}, nil)
|
}, nil)
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.SubmitSyncCommitteeMessage(context.Background(), 1, pubKey)
|
validator.SubmitSyncCommitteeMessage(context.Background(), 1, pubKey)
|
||||||
require.LogsContain(t, hook, "Could not fetch validator assignment")
|
require.LogsContain(t, hook, "Could not fetch validator assignment")
|
||||||
@@ -64,7 +64,7 @@ func TestSubmitSyncCommitteeMessage_BadDomainData(t *testing.T) {
|
|||||||
DomainData(gomock.Any(), gomock.Any()).
|
DomainData(gomock.Any(), gomock.Any()).
|
||||||
Return(nil, errors.New("uh oh"))
|
Return(nil, errors.New("uh oh"))
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.SubmitSyncCommitteeMessage(context.Background(), 1, pubKey)
|
validator.SubmitSyncCommitteeMessage(context.Background(), 1, pubKey)
|
||||||
require.LogsContain(t, hook, "Could not get sync committee domain data")
|
require.LogsContain(t, hook, "Could not get sync committee domain data")
|
||||||
@@ -104,7 +104,7 @@ func TestSubmitSyncCommitteeMessage_CouldNotSubmit(t *testing.T) {
|
|||||||
gomock.AssignableToTypeOf(ðpb.SyncCommitteeMessage{}),
|
gomock.AssignableToTypeOf(ðpb.SyncCommitteeMessage{}),
|
||||||
).Return(&emptypb.Empty{}, errors.New("uh oh") /* error */)
|
).Return(&emptypb.Empty{}, errors.New("uh oh") /* error */)
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.SubmitSyncCommitteeMessage(context.Background(), 1, pubKey)
|
validator.SubmitSyncCommitteeMessage(context.Background(), 1, pubKey)
|
||||||
|
|
||||||
@@ -148,7 +148,7 @@ func TestSubmitSyncCommitteeMessage_OK(t *testing.T) {
|
|||||||
generatedMsg = msg
|
generatedMsg = msg
|
||||||
}).Return(&emptypb.Empty{}, nil /* error */)
|
}).Return(&emptypb.Empty{}, nil /* error */)
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.SubmitSyncCommitteeMessage(context.Background(), 1, pubKey)
|
validator.SubmitSyncCommitteeMessage(context.Background(), 1, pubKey)
|
||||||
|
|
||||||
@@ -164,7 +164,7 @@ func TestSubmitSignedContributionAndProof_ValidatorDutiesRequestFailure(t *testi
|
|||||||
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{}}
|
validator.duties = ðpb.DutiesResponse{Duties: []*ethpb.DutiesResponse_Duty{}}
|
||||||
defer finish()
|
defer finish()
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
validator.SubmitSignedContributionAndProof(context.Background(), 1, pubKey)
|
validator.SubmitSignedContributionAndProof(context.Background(), 1, pubKey)
|
||||||
require.LogsContain(t, hook, "Could not fetch validator assignment")
|
require.LogsContain(t, hook, "Could not fetch validator assignment")
|
||||||
@@ -184,7 +184,7 @@ func TestSubmitSignedContributionAndProof_GetSyncSubcommitteeIndexFailure(t *tes
|
|||||||
}}
|
}}
|
||||||
defer finish()
|
defer finish()
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
m.validatorClient.EXPECT().GetSyncSubcommitteeIndex(
|
m.validatorClient.EXPECT().GetSyncSubcommitteeIndex(
|
||||||
gomock.Any(), // ctx
|
gomock.Any(), // ctx
|
||||||
@@ -212,7 +212,7 @@ func TestSubmitSignedContributionAndProof_NothingToDo(t *testing.T) {
|
|||||||
}}
|
}}
|
||||||
defer finish()
|
defer finish()
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
m.validatorClient.EXPECT().GetSyncSubcommitteeIndex(
|
m.validatorClient.EXPECT().GetSyncSubcommitteeIndex(
|
||||||
gomock.Any(), // ctx
|
gomock.Any(), // ctx
|
||||||
@@ -240,7 +240,7 @@ func TestSubmitSignedContributionAndProof_BadDomain(t *testing.T) {
|
|||||||
}}
|
}}
|
||||||
defer finish()
|
defer finish()
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
m.validatorClient.EXPECT().GetSyncSubcommitteeIndex(
|
m.validatorClient.EXPECT().GetSyncSubcommitteeIndex(
|
||||||
gomock.Any(), // ctx
|
gomock.Any(), // ctx
|
||||||
@@ -282,7 +282,7 @@ func TestSubmitSignedContributionAndProof_CouldNotGetContribution(t *testing.T)
|
|||||||
}}
|
}}
|
||||||
defer finish()
|
defer finish()
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
m.validatorClient.EXPECT().GetSyncSubcommitteeIndex(
|
m.validatorClient.EXPECT().GetSyncSubcommitteeIndex(
|
||||||
gomock.Any(), // ctx
|
gomock.Any(), // ctx
|
||||||
@@ -332,7 +332,7 @@ func TestSubmitSignedContributionAndProof_CouldNotSubmitContribution(t *testing.
|
|||||||
}}
|
}}
|
||||||
defer finish()
|
defer finish()
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
m.validatorClient.EXPECT().GetSyncSubcommitteeIndex(
|
m.validatorClient.EXPECT().GetSyncSubcommitteeIndex(
|
||||||
gomock.Any(), // ctx
|
gomock.Any(), // ctx
|
||||||
@@ -410,7 +410,7 @@ func TestSubmitSignedContributionAndProof_Ok(t *testing.T) {
|
|||||||
}}
|
}}
|
||||||
defer finish()
|
defer finish()
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
copy(pubKey[:], validatorKey.PublicKey().Marshal())
|
||||||
m.validatorClient.EXPECT().GetSyncSubcommitteeIndex(
|
m.validatorClient.EXPECT().GetSyncSubcommitteeIndex(
|
||||||
gomock.Any(), // ctx
|
gomock.Any(), // ctx
|
||||||
|
|||||||
@@ -515,7 +515,7 @@ func buildDuplicateError(response []*ethpb.DoppelGangerResponse_ValidatorRespons
|
|||||||
duplicates := make([][]byte, 0)
|
duplicates := make([][]byte, 0)
|
||||||
for _, valRes := range response {
|
for _, valRes := range response {
|
||||||
if valRes.DuplicateExists {
|
if valRes.DuplicateExists {
|
||||||
copiedKey := [fieldparams.BLSPubkeyLength]byte{}
|
var copiedKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(copiedKey[:], valRes.PublicKey)
|
copy(copiedKey[:], valRes.PublicKey)
|
||||||
duplicates = append(duplicates, copiedKey[:])
|
duplicates = append(duplicates, copiedKey[:])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -85,7 +85,7 @@ func (m *mockKeymanager) FetchValidatingPublicKeys(_ context.Context) ([][fieldp
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockKeymanager) Sign(_ context.Context, req *validatorpb.SignRequest) (bls.Signature, error) {
|
func (m *mockKeymanager) Sign(_ context.Context, req *validatorpb.SignRequest) (bls.Signature, error) {
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], req.PublicKey)
|
copy(pubKey[:], req.PublicKey)
|
||||||
privKey, ok := m.keysMap[pubKey]
|
privKey, ok := m.keysMap[pubKey]
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -306,7 +306,7 @@ func TestWaitMultipleActivation_LogsActivationEpochOK(t *testing.T) {
|
|||||||
beaconClient := mock2.NewMockBeaconChainClient(ctrl)
|
beaconClient := mock2.NewMockBeaconChainClient(ctrl)
|
||||||
privKey, err := bls.RandKey()
|
privKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], privKey.PublicKey().Marshal())
|
copy(pubKey[:], privKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
@@ -344,7 +344,7 @@ func TestWaitActivation_NotAllValidatorsActivatedOK(t *testing.T) {
|
|||||||
beaconClient := mock2.NewMockBeaconChainClient(ctrl)
|
beaconClient := mock2.NewMockBeaconChainClient(ctrl)
|
||||||
privKey, err := bls.RandKey()
|
privKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], privKey.PublicKey().Marshal())
|
copy(pubKey[:], privKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
@@ -467,7 +467,7 @@ func TestUpdateDuties_ReturnsError(t *testing.T) {
|
|||||||
|
|
||||||
privKey, err := bls.RandKey()
|
privKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], privKey.PublicKey().Marshal())
|
copy(pubKey[:], privKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
@@ -505,7 +505,7 @@ func TestUpdateDuties_OK(t *testing.T) {
|
|||||||
slot := params.BeaconConfig().SlotsPerEpoch
|
slot := params.BeaconConfig().SlotsPerEpoch
|
||||||
privKey, err := bls.RandKey()
|
privKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], privKey.PublicKey().Marshal())
|
copy(pubKey[:], privKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
@@ -567,7 +567,7 @@ func TestUpdateDuties_OK_FilterBlacklistedPublicKeys(t *testing.T) {
|
|||||||
for i := 0; i < numValidators; i++ {
|
for i := 0; i < numValidators; i++ {
|
||||||
priv, err := bls.RandKey()
|
priv, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], priv.PublicKey().Marshal())
|
copy(pubKey[:], priv.PublicKey().Marshal())
|
||||||
keysMap[pubKey] = priv
|
keysMap[pubKey] = priv
|
||||||
blacklistedPublicKeys[pubKey] = true
|
blacklistedPublicKeys[pubKey] = true
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ func TestWaitActivation_ContextCanceled(t *testing.T) {
|
|||||||
beaconClient := mock.NewMockBeaconChainClient(ctrl)
|
beaconClient := mock.NewMockBeaconChainClient(ctrl)
|
||||||
privKey, err := bls.RandKey()
|
privKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], privKey.PublicKey().Marshal())
|
copy(pubKey[:], privKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
@@ -70,7 +70,7 @@ func TestWaitActivation_StreamSetupFails_AttemptsToReconnect(t *testing.T) {
|
|||||||
beaconClient := mock.NewMockBeaconChainClient(ctrl)
|
beaconClient := mock.NewMockBeaconChainClient(ctrl)
|
||||||
privKey, err := bls.RandKey()
|
privKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], privKey.PublicKey().Marshal())
|
copy(pubKey[:], privKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
@@ -103,7 +103,7 @@ func TestWaitForActivation_ReceiveErrorFromStream_AttemptsReconnection(t *testin
|
|||||||
beaconClient := mock.NewMockBeaconChainClient(ctrl)
|
beaconClient := mock.NewMockBeaconChainClient(ctrl)
|
||||||
privKey, err := bls.RandKey()
|
privKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], privKey.PublicKey().Marshal())
|
copy(pubKey[:], privKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
@@ -141,7 +141,7 @@ func TestWaitActivation_LogsActivationEpochOK(t *testing.T) {
|
|||||||
beaconClient := mock.NewMockBeaconChainClient(ctrl)
|
beaconClient := mock.NewMockBeaconChainClient(ctrl)
|
||||||
privKey, err := bls.RandKey()
|
privKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], privKey.PublicKey().Marshal())
|
copy(pubKey[:], privKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
@@ -179,7 +179,7 @@ func TestWaitForActivation_Exiting(t *testing.T) {
|
|||||||
beaconClient := mock.NewMockBeaconChainClient(ctrl)
|
beaconClient := mock.NewMockBeaconChainClient(ctrl)
|
||||||
privKey, err := bls.RandKey()
|
privKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], privKey.PublicKey().Marshal())
|
copy(pubKey[:], privKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
@@ -222,7 +222,7 @@ func TestWaitForActivation_RefetchKeys(t *testing.T) {
|
|||||||
beaconClient := mock.NewMockBeaconChainClient(ctrl)
|
beaconClient := mock.NewMockBeaconChainClient(ctrl)
|
||||||
privKey, err := bls.RandKey()
|
privKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], privKey.PublicKey().Marshal())
|
copy(pubKey[:], privKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
@@ -262,11 +262,11 @@ func TestWaitForActivation_AccountsChanged(t *testing.T) {
|
|||||||
t.Run("Imported keymanager", func(t *testing.T) {
|
t.Run("Imported keymanager", func(t *testing.T) {
|
||||||
inactivePrivKey, err := bls.RandKey()
|
inactivePrivKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
inactivePubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var inactivePubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(inactivePubKey[:], inactivePrivKey.PublicKey().Marshal())
|
copy(inactivePubKey[:], inactivePrivKey.PublicKey().Marshal())
|
||||||
activePrivKey, err := bls.RandKey()
|
activePrivKey, err := bls.RandKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
activePubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var activePubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(activePubKey[:], activePrivKey.PublicKey().Marshal())
|
copy(activePubKey[:], activePrivKey.PublicKey().Marshal())
|
||||||
km := &mockKeymanager{
|
km := &mockKeymanager{
|
||||||
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
keysMap: map[[fieldparams.BLSPubkeyLength]byte]bls.SecretKey{
|
||||||
@@ -327,12 +327,12 @@ func TestWaitForActivation_AccountsChanged(t *testing.T) {
|
|||||||
inactivePrivKey, err :=
|
inactivePrivKey, err :=
|
||||||
util.PrivateKeyFromSeedAndPath(seed, fmt.Sprintf(derived.ValidatingKeyDerivationPathTemplate, 0))
|
util.PrivateKeyFromSeedAndPath(seed, fmt.Sprintf(derived.ValidatingKeyDerivationPathTemplate, 0))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
inactivePubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var inactivePubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(inactivePubKey[:], inactivePrivKey.PublicKey().Marshal())
|
copy(inactivePubKey[:], inactivePrivKey.PublicKey().Marshal())
|
||||||
activePrivKey, err :=
|
activePrivKey, err :=
|
||||||
util.PrivateKeyFromSeedAndPath(seed, fmt.Sprintf(derived.ValidatingKeyDerivationPathTemplate, 1))
|
util.PrivateKeyFromSeedAndPath(seed, fmt.Sprintf(derived.ValidatingKeyDerivationPathTemplate, 1))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
activePubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var activePubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(activePubKey[:], activePrivKey.PublicKey().Marshal())
|
copy(activePubKey[:], activePrivKey.PublicKey().Marshal())
|
||||||
wallet := &walletMock.Wallet{
|
wallet := &walletMock.Wallet{
|
||||||
Files: make(map[string]map[string][]byte),
|
Files: make(map[string]map[string][]byte),
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ func (s *Store) EIPImportBlacklistedPublicKeys(ctx context.Context) ([][fieldpar
|
|||||||
bucket := tx.Bucket(slashablePublicKeysBucket)
|
bucket := tx.Bucket(slashablePublicKeysBucket)
|
||||||
return bucket.ForEach(func(key []byte, _ []byte) error {
|
return bucket.ForEach(func(key []byte, _ []byte) error {
|
||||||
if key != nil {
|
if key != nil {
|
||||||
pubKeyBytes := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKeyBytes [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKeyBytes[:], key)
|
copy(pubKeyBytes[:], key)
|
||||||
publicKeys = append(publicKeys, pubKeyBytes)
|
publicKeys = append(publicKeys, pubKeyBytes)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ func TestStore_EIPBlacklistedPublicKeys(t *testing.T) {
|
|||||||
numValidators := 100
|
numValidators := 100
|
||||||
publicKeys := make([][fieldparams.BLSPubkeyLength]byte, numValidators)
|
publicKeys := make([][fieldparams.BLSPubkeyLength]byte, numValidators)
|
||||||
for i := 0; i < numValidators; i++ {
|
for i := 0; i < numValidators; i++ {
|
||||||
key := [fieldparams.BLSPubkeyLength]byte{}
|
var key [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(key[:], fmt.Sprintf("%d", i))
|
copy(key[:], fmt.Sprintf("%d", i))
|
||||||
publicKeys[i] = key
|
publicKeys[i] = key
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ func (s *Store) ProposedPublicKeys(ctx context.Context) ([][fieldparams.BLSPubke
|
|||||||
err = s.view(func(tx *bolt.Tx) error {
|
err = s.view(func(tx *bolt.Tx) error {
|
||||||
bucket := tx.Bucket(historicProposalsBucket)
|
bucket := tx.Bucket(historicProposalsBucket)
|
||||||
return bucket.ForEach(func(key []byte, _ []byte) error {
|
return bucket.ForEach(func(key []byte, _ []byte) error {
|
||||||
pubKeyBytes := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKeyBytes [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKeyBytes[:], key)
|
copy(pubKeyBytes[:], key)
|
||||||
proposedPublicKeys = append(proposedPublicKeys, pubKeyBytes)
|
proposedPublicKeys = append(proposedPublicKeys, pubKeyBytes)
|
||||||
return nil
|
return nil
|
||||||
@@ -52,7 +52,7 @@ func (s *Store) ProposalHistoryForSlot(ctx context.Context, publicKey [fieldpara
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
var proposalExists bool
|
var proposalExists bool
|
||||||
signingRoot := [32]byte{}
|
var signingRoot [32]byte
|
||||||
err = s.view(func(tx *bolt.Tx) error {
|
err = s.view(func(tx *bolt.Tx) error {
|
||||||
bucket := tx.Bucket(historicProposalsBucket)
|
bucket := tx.Bucket(historicProposalsBucket)
|
||||||
valBucket := bucket.Bucket(publicKey[:])
|
valBucket := bucket.Bucket(publicKey[:])
|
||||||
|
|||||||
@@ -192,7 +192,7 @@ func TestStore_ProposedPublicKeys(t *testing.T) {
|
|||||||
assert.DeepEqual(t, make([][fieldparams.BLSPubkeyLength]byte, 0), keys)
|
assert.DeepEqual(t, make([][fieldparams.BLSPubkeyLength]byte, 0), keys)
|
||||||
|
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{1}
|
pubKey := [fieldparams.BLSPubkeyLength]byte{1}
|
||||||
dummyRoot := [32]byte{}
|
var dummyRoot [32]byte
|
||||||
err = validatorDB.SaveProposalHistoryForSlot(ctx, pubKey, 1, dummyRoot[:])
|
err = validatorDB.SaveProposalHistoryForSlot(ctx, pubKey, 1, dummyRoot[:])
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -204,7 +204,7 @@ func TestStore_ProposedPublicKeys(t *testing.T) {
|
|||||||
func TestStore_LowestSignedProposal(t *testing.T) {
|
func TestStore_LowestSignedProposal(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
pubkey := [fieldparams.BLSPubkeyLength]byte{3}
|
pubkey := [fieldparams.BLSPubkeyLength]byte{3}
|
||||||
dummySigningRoot := [32]byte{}
|
var dummySigningRoot [32]byte
|
||||||
validatorDB := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{pubkey})
|
validatorDB := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{pubkey})
|
||||||
|
|
||||||
_, exists, err := validatorDB.LowestSignedProposal(ctx, pubkey)
|
_, exists, err := validatorDB.LowestSignedProposal(ctx, pubkey)
|
||||||
@@ -245,7 +245,7 @@ func TestStore_LowestSignedProposal(t *testing.T) {
|
|||||||
func TestStore_HighestSignedProposal(t *testing.T) {
|
func TestStore_HighestSignedProposal(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
pubkey := [fieldparams.BLSPubkeyLength]byte{3}
|
pubkey := [fieldparams.BLSPubkeyLength]byte{3}
|
||||||
dummySigningRoot := [32]byte{}
|
var dummySigningRoot [32]byte
|
||||||
validatorDB := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{pubkey})
|
validatorDB := setupDB(t, [][fieldparams.BLSPubkeyLength]byte{pubkey})
|
||||||
|
|
||||||
_, exists, err := validatorDB.HighestSignedProposal(ctx, pubkey)
|
_, exists, err := validatorDB.HighestSignedProposal(ctx, pubkey)
|
||||||
|
|||||||
@@ -103,7 +103,7 @@ func TestDerivedKeymanager_FetchValidatingPublicKeys(t *testing.T) {
|
|||||||
for i := 0; i < numAccounts; i++ {
|
for i := 0; i < numAccounts; i++ {
|
||||||
privKey, err := util.PrivateKeyFromSeedAndPath(derivedSeed, fmt.Sprintf(ValidatingKeyDerivationPathTemplate, i))
|
privKey, err := util.PrivateKeyFromSeedAndPath(derivedSeed, fmt.Sprintf(ValidatingKeyDerivationPathTemplate, i))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
pubKey := [fieldparams.BLSPubkeyLength]byte{}
|
var pubKey [fieldparams.BLSPubkeyLength]byte
|
||||||
copy(pubKey[:], privKey.PublicKey().Marshal())
|
copy(pubKey[:], privKey.PublicKey().Marshal())
|
||||||
wantedPubKeys[i] = pubKey
|
wantedPubKeys[i] = pubKey
|
||||||
}
|
}
|
||||||
@@ -142,7 +142,7 @@ func TestDerivedKeymanager_FetchValidatingPrivateKeys(t *testing.T) {
|
|||||||
for i := 0; i < numAccounts; i++ {
|
for i := 0; i < numAccounts; i++ {
|
||||||
privKey, err := util.PrivateKeyFromSeedAndPath(derivedSeed, fmt.Sprintf(ValidatingKeyDerivationPathTemplate, i))
|
privKey, err := util.PrivateKeyFromSeedAndPath(derivedSeed, fmt.Sprintf(ValidatingKeyDerivationPathTemplate, i))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
privKeyBytes := [32]byte{}
|
var privKeyBytes [32]byte
|
||||||
copy(privKeyBytes[:], privKey.Marshal())
|
copy(privKeyBytes[:], privKey.Marshal())
|
||||||
wantedPrivKeys[i] = privKeyBytes
|
wantedPrivKeys[i] = privKeyBytes
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ func MockAttestingAndProposalHistories(pubkeys [][fieldparams.BLSPubkeyLength]by
|
|||||||
historicalAtts := make([]*kv.AttestationRecord, 0)
|
historicalAtts := make([]*kv.AttestationRecord, 0)
|
||||||
proposals := make([]kv.Proposal, 0)
|
proposals := make([]kv.Proposal, 0)
|
||||||
for i := types.Epoch(1); i < latestTarget; i++ {
|
for i := types.Epoch(1); i < latestTarget; i++ {
|
||||||
signingRoot := [32]byte{}
|
var signingRoot [32]byte
|
||||||
signingRootStr := fmt.Sprintf("%d", i)
|
signingRootStr := fmt.Sprintf("%d", i)
|
||||||
copy(signingRoot[:], signingRootStr)
|
copy(signingRoot[:], signingRootStr)
|
||||||
historicalAtts = append(historicalAtts, &kv.AttestationRecord{
|
historicalAtts = append(historicalAtts, &kv.AttestationRecord{
|
||||||
@@ -79,7 +79,7 @@ func MockAttestingAndProposalHistories(pubkeys [][fieldparams.BLSPubkeyLength]by
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
for i := types.Epoch(1); i <= latestTarget; i++ {
|
for i := types.Epoch(1); i <= latestTarget; i++ {
|
||||||
signingRoot := [32]byte{}
|
var signingRoot [32]byte
|
||||||
signingRootStr := fmt.Sprintf("%d", i)
|
signingRootStr := fmt.Sprintf("%d", i)
|
||||||
copy(signingRoot[:], signingRootStr)
|
copy(signingRoot[:], signingRootStr)
|
||||||
proposals = append(proposals, kv.Proposal{
|
proposals = append(proposals, kv.Proposal{
|
||||||
|
|||||||
Reference in New Issue
Block a user