mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 15:37:56 -05:00
Applies assertion funcs to sync tests (#6603)
* applies assertion funcs to sync/initial-sync tests * applies assertion funcs to sync/initial-sync tests * gazelle * Merge branch 'master' into sync-apply-testutils-assertions * gazelle * applies assertion funcs to sync/initial-sync tests * applies assertion funcs to sync/initial-sync tests * applies assertion funcs to sync/initial-sync tests * applies assertion funcs to sync/initial-sync tests * applies assertion funcs to sync/initial-sync tests * Merge branch 'master' into sync-apply-testutils-assertions
This commit is contained in:
@@ -153,6 +153,8 @@ go_test(
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/roughtime:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestRegularSync_generateErrorResponse(t *testing.T) {
|
||||
@@ -13,23 +15,15 @@ func TestRegularSync_generateErrorResponse(t *testing.T) {
|
||||
p2p: p2ptest.NewTestP2P(t),
|
||||
}
|
||||
data, err := r.generateErrorResponse(responseCodeServerError, "something bad happened")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
buf := bytes.NewBuffer(data)
|
||||
b := make([]byte, 1)
|
||||
if _, err := buf.Read(b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if b[0] != responseCodeServerError {
|
||||
t.Errorf("The first byte was not the status code. Got %#x wanted %#x", b, responseCodeServerError)
|
||||
}
|
||||
assert.Equal(t, responseCodeServerError, b[0], "The first byte was not the status code")
|
||||
msg := &pb.ErrorResponse{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(buf, msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(msg.Message) != "something bad happened" {
|
||||
t.Errorf("Received the wrong message: %v", msg)
|
||||
}
|
||||
require.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(buf, msg))
|
||||
assert.Equal(t, "something bad happened", string(msg.Message), "Received the wrong message")
|
||||
}
|
||||
|
||||
@@ -77,6 +77,8 @@ go_test(
|
||||
"//shared/roughtime:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_kevinms_leakybucket_go//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//:go_default_library",
|
||||
@@ -117,6 +119,8 @@ go_test(
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/roughtime:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_kevinms_leakybucket_go//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//:go_default_library",
|
||||
|
||||
@@ -3,7 +3,6 @@ package initialsync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -25,6 +24,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/roughtime"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -43,9 +44,7 @@ func TestBlocksFetcher_InitStartStop(t *testing.T) {
|
||||
|
||||
t.Run("check for leaked goroutines", func(t *testing.T) {
|
||||
err := fetcher.start()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
fetcher.stop() // should block up until all resources are reclaimed
|
||||
select {
|
||||
case <-fetcher.requestResponses():
|
||||
@@ -55,9 +54,7 @@ func TestBlocksFetcher_InitStartStop(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("re-starting of stopped fetcher", func(t *testing.T) {
|
||||
if err := fetcher.start(); err == nil {
|
||||
t.Errorf("expected error not returned: %v", errFetcherCtxIsDone)
|
||||
}
|
||||
assert.ErrorContains(t, errFetcherCtxIsDone.Error(), fetcher.start())
|
||||
})
|
||||
|
||||
t.Run("multiple stopping attempts", func(t *testing.T) {
|
||||
@@ -67,10 +64,7 @@ func TestBlocksFetcher_InitStartStop(t *testing.T) {
|
||||
headFetcher: mc,
|
||||
p2p: p2p,
|
||||
})
|
||||
if err := fetcher.start(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
require.NoError(t, fetcher.start())
|
||||
fetcher.stop()
|
||||
fetcher.stop()
|
||||
})
|
||||
@@ -83,10 +77,7 @@ func TestBlocksFetcher_InitStartStop(t *testing.T) {
|
||||
headFetcher: mc,
|
||||
p2p: p2p,
|
||||
})
|
||||
if err := fetcher.start(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
require.NoError(t, fetcher.start())
|
||||
cancel()
|
||||
fetcher.stop()
|
||||
})
|
||||
@@ -260,18 +251,12 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
genesisRoot := cache.rootCache[0]
|
||||
cache.RUnlock()
|
||||
|
||||
err := beaconDB.SaveBlock(context.Background(), ð.SignedBeaconBlock{
|
||||
Block: ð.BeaconBlock{
|
||||
Slot: 0,
|
||||
}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err := beaconDB.SaveBlock(context.Background(), ð.SignedBeaconBlock{Block: ð.BeaconBlock{Slot: 0}})
|
||||
require.NoError(t, err)
|
||||
|
||||
st, err := stateTrie.InitializeFromProto(&p2ppb.BeaconState{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
mc := &mock.ChainService{
|
||||
State: st,
|
||||
Root: genesisRoot[:],
|
||||
@@ -279,17 +264,8 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
fetcher := newBlocksFetcher(
|
||||
ctx,
|
||||
&blocksFetcherConfig{
|
||||
headFetcher: mc,
|
||||
p2p: p,
|
||||
})
|
||||
|
||||
err = fetcher.start()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{headFetcher: mc, p2p: p})
|
||||
require.NoError(t, fetcher.start())
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(tt.requests)) // how many block requests we are going to make
|
||||
@@ -330,16 +306,12 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
maxExpectedBlocks := uint64(0)
|
||||
for _, requestParams := range tt.requests {
|
||||
err = fetcher.scheduleRequest(context.Background(), requestParams.start, requestParams.count)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
maxExpectedBlocks += requestParams.count
|
||||
}
|
||||
|
||||
blocks, err := processFetchedBlocks()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
|
||||
sort.Slice(blocks, func(i, j int) bool {
|
||||
return blocks[i].Block.Slot < blocks[j].Block.Slot
|
||||
@@ -358,9 +330,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
if len(blocks) > int(maxExpectedBlocks) {
|
||||
t.Errorf("Too many blocks returned. Wanted %d got %d", maxExpectedBlocks, len(blocks))
|
||||
}
|
||||
if len(blocks) != len(tt.expectedBlockSlots) {
|
||||
t.Errorf("Processes wrong number of blocks. Wanted %d got %d", len(tt.expectedBlockSlots), len(blocks))
|
||||
}
|
||||
assert.Equal(t, len(tt.expectedBlockSlots), len(blocks), "Processes wrong number of blocks")
|
||||
var receivedBlockSlots []uint64
|
||||
for _, blk := range blocks {
|
||||
receivedBlockSlots = append(receivedBlockSlots, blk.Block.Slot)
|
||||
@@ -383,9 +353,7 @@ func TestBlocksFetcher_scheduleRequest(t *testing.T) {
|
||||
p2p: nil,
|
||||
})
|
||||
cancel()
|
||||
if err := fetcher.scheduleRequest(ctx, 1, blockBatchLimit); err == nil {
|
||||
t.Errorf("expected error: %v", errFetcherCtxIsDone)
|
||||
}
|
||||
assert.ErrorContains(t, "context canceled", fetcher.scheduleRequest(ctx, 1, blockBatchLimit))
|
||||
})
|
||||
}
|
||||
func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
@@ -420,9 +388,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
|
||||
cancel()
|
||||
response := fetcher.handleRequest(ctx, 1, blockBatchLimit)
|
||||
if response.err == nil {
|
||||
t.Errorf("expected error: %v", errFetcherCtxIsDone)
|
||||
}
|
||||
assert.ErrorContains(t, "context canceled", response.err)
|
||||
})
|
||||
|
||||
t.Run("receive blocks", func(t *testing.T) {
|
||||
@@ -510,20 +476,14 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) {
|
||||
Count: blockBatchLimit,
|
||||
}
|
||||
blocks, err := fetcher.requestBlocks(ctx, req, peers[0])
|
||||
if err != nil {
|
||||
t.Errorf("error: %v", err)
|
||||
}
|
||||
if uint64(len(blocks)) != blockBatchLimit {
|
||||
t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchLimit, len(blocks))
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, blockBatchLimit, uint64(len(blocks)), "Incorrect number of blocks returned")
|
||||
|
||||
// Test context cancellation.
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
cancel()
|
||||
blocks, err = fetcher.requestBlocks(ctx, req, peers[0])
|
||||
if err == nil || err.Error() != "context canceled" {
|
||||
t.Errorf("expected context closed error, got: %v", err)
|
||||
}
|
||||
assert.ErrorContains(t, "context canceled", err)
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_selectFailOverPeer(t *testing.T) {
|
||||
@@ -670,12 +630,8 @@ func TestBlocksFetcher_nonSkippedSlotAfter(t *testing.T) {
|
||||
for seekSlot, expectedSlot := range seekSlots {
|
||||
t.Run(fmt.Sprintf("range: %d (%d-%d)", expectedSlot-seekSlot, seekSlot, expectedSlot), func(t *testing.T) {
|
||||
slot, err := fetcher.nonSkippedSlotAfter(ctx, seekSlot)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if slot != expectedSlot {
|
||||
t.Errorf("unexpected slot, want: %v, got: %v", expectedSlot, slot)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expectedSlot, slot, "Unexpected slot")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -686,9 +642,7 @@ func TestBlocksFetcher_nonSkippedSlotAfter(t *testing.T) {
|
||||
var i int
|
||||
for i = 0; i < 100; i++ {
|
||||
slot, err := fetcher.nonSkippedSlotAfter(ctx, seekSlot)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
if slot == expectedSlot {
|
||||
found = true
|
||||
break
|
||||
@@ -772,9 +726,7 @@ func TestBlocksFetcher_filterPeers(t *testing.T) {
|
||||
fetcher.rateLimiter.Add(pid.ID.String(), pid.usedCapacity)
|
||||
}
|
||||
got, err := fetcher.filterPeers(pids, tt.args.peersPercentage)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
// Re-arrange peers with the same remaining capacity, deterministically .
|
||||
// They are deliberately shuffled - so that on the same capacity any of
|
||||
// such peers can be selected. That's why they are sorted here.
|
||||
@@ -786,9 +738,7 @@ func TestBlocksFetcher_filterPeers(t *testing.T) {
|
||||
}
|
||||
return i < j
|
||||
})
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("filterPeers() got = %#v, want %#v", got, tt.want)
|
||||
}
|
||||
assert.DeepEqual(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -799,9 +749,7 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
p3 := p2pt.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
p1.Connect(p3)
|
||||
if len(p1.BHost.Network().Peers()) != 2 {
|
||||
t.Fatal("Expected peers to be connected")
|
||||
}
|
||||
require.Equal(t, 2, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
req := &p2ppb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 100,
|
||||
Step: 1,
|
||||
@@ -811,9 +759,7 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
topic := p2pm.RPCBlocksByRangeTopic
|
||||
protocol := core.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
|
||||
streamHandlerFn := func(stream network.Stream) {
|
||||
if err := stream.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
p2.BHost.SetStreamHandler(protocol, streamHandlerFn)
|
||||
p3.BHost.SetStreamHandler(protocol, streamHandlerFn)
|
||||
@@ -851,9 +797,7 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
ch := make(chan struct{}, 1)
|
||||
go func() {
|
||||
_, err := fetcher.requestBlocks(ctx, req, p3.PeerID())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
ch <- struct{}{}
|
||||
}()
|
||||
timer := time.NewTimer(2 * time.Second)
|
||||
@@ -992,9 +936,7 @@ func TestBlocksFetcher_removeStalePeerLocks(t *testing.T) {
|
||||
sort.SliceStable(peersOut2, func(i, j int) bool {
|
||||
return peersOut2[i].String() < peersOut2[j].String()
|
||||
})
|
||||
if !reflect.DeepEqual(peersOut1, peersOut2) {
|
||||
t.Errorf("unexpected peers map, want: %#v, got: %#v", peersOut1, peersOut2)
|
||||
}
|
||||
assert.DeepEqual(t, peersOut1, peersOut2, "Unexpected peers map")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
)
|
||||
|
||||
func TestBlocksQueueInitStartStop(t *testing.T) {
|
||||
@@ -30,10 +31,7 @@ func TestBlocksQueueInitStartStop(t *testing.T) {
|
||||
headFetcher: mc,
|
||||
highestExpectedSlot: blockBatchLimit,
|
||||
})
|
||||
|
||||
if err := queue.stop(); err == nil {
|
||||
t.Errorf("expected error: %v", errQueueTakesTooLongToStop)
|
||||
}
|
||||
assert.ErrorContains(t, errQueueTakesTooLongToStop.Error(), queue.stop())
|
||||
})
|
||||
|
||||
t.Run("use default fetcher", func(t *testing.T) {
|
||||
@@ -43,9 +41,7 @@ func TestBlocksQueueInitStartStop(t *testing.T) {
|
||||
headFetcher: mc,
|
||||
highestExpectedSlot: blockBatchLimit,
|
||||
})
|
||||
if err := queue.start(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, queue.start())
|
||||
})
|
||||
|
||||
t.Run("stop timeout", func(t *testing.T) {
|
||||
@@ -55,12 +51,8 @@ func TestBlocksQueueInitStartStop(t *testing.T) {
|
||||
headFetcher: mc,
|
||||
highestExpectedSlot: blockBatchLimit,
|
||||
})
|
||||
if err := queue.start(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := queue.stop(); err == nil {
|
||||
t.Errorf("expected error: %v", errQueueTakesTooLongToStop)
|
||||
}
|
||||
assert.NoError(t, queue.start())
|
||||
assert.ErrorContains(t, errQueueTakesTooLongToStop.Error(), queue.stop())
|
||||
})
|
||||
|
||||
t.Run("check for leaked goroutines", func(t *testing.T) {
|
||||
@@ -72,13 +64,9 @@ func TestBlocksQueueInitStartStop(t *testing.T) {
|
||||
highestExpectedSlot: blockBatchLimit,
|
||||
})
|
||||
|
||||
if err := queue.start(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, queue.start())
|
||||
// Blocks up until all resources are reclaimed (or timeout is called)
|
||||
if err := queue.stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, queue.stop())
|
||||
select {
|
||||
case <-queue.fetchedBlocks:
|
||||
default:
|
||||
@@ -99,15 +87,9 @@ func TestBlocksQueueInitStartStop(t *testing.T) {
|
||||
headFetcher: mc,
|
||||
highestExpectedSlot: blockBatchLimit,
|
||||
})
|
||||
if err := queue.start(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := queue.stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := queue.start(); err == nil {
|
||||
t.Errorf("expected error not returned: %v", errQueueCtxIsDone)
|
||||
}
|
||||
assert.NoError(t, queue.start())
|
||||
assert.NoError(t, queue.stop())
|
||||
assert.ErrorContains(t, errQueueCtxIsDone.Error(), queue.start())
|
||||
})
|
||||
|
||||
t.Run("multiple stopping attempts", func(t *testing.T) {
|
||||
@@ -118,16 +100,9 @@ func TestBlocksQueueInitStartStop(t *testing.T) {
|
||||
headFetcher: mc,
|
||||
highestExpectedSlot: blockBatchLimit,
|
||||
})
|
||||
if err := queue.start(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := queue.stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := queue.stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, queue.start())
|
||||
assert.NoError(t, queue.stop())
|
||||
assert.NoError(t, queue.stop())
|
||||
})
|
||||
|
||||
t.Run("cancellation", func(t *testing.T) {
|
||||
@@ -137,14 +112,9 @@ func TestBlocksQueueInitStartStop(t *testing.T) {
|
||||
headFetcher: mc,
|
||||
highestExpectedSlot: blockBatchLimit,
|
||||
})
|
||||
if err := queue.start(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
assert.NoError(t, queue.start())
|
||||
cancel()
|
||||
if err := queue.stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, queue.stop())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -268,9 +238,7 @@ func TestBlocksQueueLoop(t *testing.T) {
|
||||
headFetcher: mc,
|
||||
highestExpectedSlot: tt.highestExpectedSlot,
|
||||
})
|
||||
if err := queue.start(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, queue.start())
|
||||
processBlock := func(block *eth.SignedBeaconBlock) error {
|
||||
if !beaconDB.HasBlock(ctx, bytesutil.ToBytes32(block.Block.ParentRoot)) {
|
||||
return fmt.Errorf("beacon node doesn't have a block in db with root %#x", block.Block.ParentRoot)
|
||||
@@ -296,22 +264,19 @@ func TestBlocksQueueLoop(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := queue.stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, queue.stop())
|
||||
|
||||
if queue.headFetcher.HeadSlot() < tt.highestExpectedSlot {
|
||||
t.Errorf("Not enough slots synced, want: %v, got: %v",
|
||||
len(tt.expectedBlockSlots), queue.headFetcher.HeadSlot())
|
||||
}
|
||||
if len(blocks) != len(tt.expectedBlockSlots) {
|
||||
t.Errorf("Processes wrong number of blocks. Wanted %d got %d", len(tt.expectedBlockSlots), len(blocks))
|
||||
}
|
||||
assert.Equal(t, len(tt.expectedBlockSlots), len(blocks), "Processes wrong number of blocks")
|
||||
var receivedBlockSlots []uint64
|
||||
for _, blk := range blocks {
|
||||
receivedBlockSlots = append(receivedBlockSlots, blk.Block.Slot)
|
||||
}
|
||||
if missing := sliceutil.NotUint64(sliceutil.IntersectionUint64(tt.expectedBlockSlots, receivedBlockSlots), tt.expectedBlockSlots); len(missing) > 0 {
|
||||
missing := sliceutil.NotUint64(sliceutil.IntersectionUint64(tt.expectedBlockSlots, receivedBlockSlots), tt.expectedBlockSlots)
|
||||
if len(missing) > 0 {
|
||||
t.Errorf("Missing blocks at slots %v", missing)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -2,6 +2,8 @@ package initialsync
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func BenchmarkStateMachine_trigger(b *testing.B) {
|
||||
@@ -32,8 +34,6 @@ func BenchmarkStateMachine_trigger(b *testing.B) {
|
||||
count: 32,
|
||||
}
|
||||
err := sm.machines[64].trigger(eventTick, data)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,8 +3,10 @@ package initialsync
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestStateMachineManager_String(t *testing.T) {
|
||||
@@ -35,27 +37,19 @@ func TestStateMachineManager_String(t *testing.T) {
|
||||
smm := &stateMachineManager{
|
||||
machines: tt.machines,
|
||||
}
|
||||
if got := smm.String(); got != tt.want {
|
||||
t.Errorf("unexpected output, got: %v, want: %v", got, tt.want)
|
||||
}
|
||||
assert.Equal(t, tt.want, smm.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateMachine_StateIDString(t *testing.T) {
|
||||
stateIDs := []stateID{stateNew, stateScheduled, stateDataParsed, stateSkipped, stateSent}
|
||||
want := "[new scheduled dataParsed skipped sent]"
|
||||
if got := fmt.Sprintf("%v", stateIDs); got != want {
|
||||
t.Errorf("unexpected output, got: %q, want: %q", got, want)
|
||||
}
|
||||
assert.Equal(t, "[new scheduled dataParsed skipped sent]", fmt.Sprintf("%v", stateIDs))
|
||||
}
|
||||
|
||||
func TestStateMachine_EventIDString(t *testing.T) {
|
||||
eventIDs := []eventID{eventTick, eventDataReceived}
|
||||
want := "[tick dataReceived]"
|
||||
if got := fmt.Sprintf("%v", eventIDs); got != want {
|
||||
t.Errorf("unexpected output, got: %q, want: %q", got, want)
|
||||
}
|
||||
assert.Equal(t, "[tick dataReceived]", fmt.Sprintf("%v", eventIDs))
|
||||
}
|
||||
|
||||
func TestStateMachineManager_addEventHandler(t *testing.T) {
|
||||
@@ -64,47 +58,29 @@ func TestStateMachineManager_addEventHandler(t *testing.T) {
|
||||
smm.addEventHandler(eventTick, stateNew, func(m *stateMachine, i interface{}) (id stateID, err error) {
|
||||
return stateScheduled, nil
|
||||
})
|
||||
if len(smm.handlers[stateNew]) != 1 {
|
||||
t.Errorf("unexpected size, got: %v, want: %v", len(smm.handlers[stateNew]), 1)
|
||||
}
|
||||
assert.Equal(t, 1, len(smm.handlers[stateNew]), "Unexpected size")
|
||||
state, err := smm.handlers[stateNew][eventTick](nil, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if state != stateScheduled {
|
||||
t.Errorf("unexpected state, got: %v, want: %v", state, stateScheduled)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, stateScheduled, state, "Unexpected state")
|
||||
|
||||
// Add second handler to the same event
|
||||
smm.addEventHandler(eventTick, stateSent, func(m *stateMachine, i interface{}) (id stateID, err error) {
|
||||
return stateDataParsed, nil
|
||||
})
|
||||
if len(smm.handlers[stateSent]) != 1 {
|
||||
t.Errorf("unexpected size, got: %v, want: %v", len(smm.handlers[stateSent]), 1)
|
||||
}
|
||||
assert.Equal(t, 1, len(smm.handlers[stateSent]), "Unexpected size")
|
||||
state, err = smm.handlers[stateSent][eventTick](nil, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if state != stateDataParsed {
|
||||
t.Errorf("unexpected state, got: %v, want: %v", state, stateScheduled)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, stateDataParsed, state, "Unexpected state")
|
||||
|
||||
// Add another handler to existing event/state pair. Should have no effect.
|
||||
smm.addEventHandler(eventTick, stateSent, func(m *stateMachine, i interface{}) (id stateID, err error) {
|
||||
return stateSkipped, nil
|
||||
})
|
||||
if len(smm.handlers[stateSent]) != 1 {
|
||||
t.Errorf("unexpected size, got: %v, want: %v", len(smm.handlers[stateSent]), 1)
|
||||
}
|
||||
assert.Equal(t, 1, len(smm.handlers[stateSent]), "Unexpected size")
|
||||
state, err = smm.handlers[stateSent][eventTick](nil, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
// No effect, previous handler worked.
|
||||
if state != stateDataParsed {
|
||||
t.Errorf("unexpected state, got: %v, want: %v", state, stateScheduled)
|
||||
}
|
||||
assert.Equal(t, stateDataParsed, state, "Unexpected state")
|
||||
}
|
||||
|
||||
func TestStateMachine_trigger(t *testing.T) {
|
||||
@@ -242,28 +218,20 @@ func TestStateMachineManager_QueueLoop(t *testing.T) {
|
||||
|
||||
return stateScheduled, nil
|
||||
})
|
||||
if len(smm.handlers) != 4 {
|
||||
t.Errorf("unexpected number of state events, want: %v, got: %v", 4, len(smm.handlers))
|
||||
}
|
||||
assert.Equal(t, 4, len(smm.handlers), "Unexpected number of state events")
|
||||
smm.addStateMachine(64)
|
||||
smm.addStateMachine(512)
|
||||
|
||||
assertState := func(startBlock uint64, state stateID) {
|
||||
fsm, ok := smm.findStateMachine(startBlock)
|
||||
if !ok {
|
||||
t.Fatalf("state machine not found: %v", startBlock)
|
||||
}
|
||||
if fsm.state != state {
|
||||
t.Errorf("unexpected state machine state, want: %v, got: %v", state, fsm.state)
|
||||
}
|
||||
require.Equal(t, true, ok, "State machine not found")
|
||||
assert.Equal(t, state, fsm.state, "Unexpected state machine state")
|
||||
}
|
||||
|
||||
triggerTickEvent := func() {
|
||||
for _, fsm := range smm.machines {
|
||||
data := 42
|
||||
if err := fsm.trigger(eventTick, data); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, fsm.trigger(eventTick, data))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -292,13 +260,9 @@ func TestStateMachineManager_removeStateMachine(t *testing.T) {
|
||||
if _, ok := smm.findStateMachine(64); !ok {
|
||||
t.Error("expected machine not found")
|
||||
}
|
||||
expectedError := fmt.Errorf("state for machine %v is not found", 65)
|
||||
if err := smm.removeStateMachine(65); err == nil || err.Error() != expectedError.Error() {
|
||||
t.Errorf("expected error (%v), got: %v", expectedError, err)
|
||||
}
|
||||
if err := smm.removeStateMachine(64); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
expectedError := fmt.Sprintf("state for machine %v is not found", 65)
|
||||
assert.ErrorContains(t, expectedError, smm.removeStateMachine(65))
|
||||
assert.NoError(t, smm.removeStateMachine(64))
|
||||
if _, ok := smm.findStateMachine(64); ok {
|
||||
t.Error("unexpected machine found")
|
||||
}
|
||||
@@ -310,24 +274,13 @@ func TestStateMachineManager_removeAllStateMachines(t *testing.T) {
|
||||
smm.addStateMachine(128)
|
||||
smm.addStateMachine(196)
|
||||
keys := []uint64{64, 128, 196}
|
||||
if !reflect.DeepEqual(keys, smm.keys) {
|
||||
t.Errorf("keys not sorted, want: %v, got: %v", keys, smm.keys)
|
||||
}
|
||||
if len(smm.machines) != 3 {
|
||||
t.Errorf("unexpected list size: %v", len(smm.machines))
|
||||
}
|
||||
|
||||
if err := smm.removeAllStateMachines(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.DeepEqual(t, smm.keys, keys, "Keys not sorted")
|
||||
assert.Equal(t, 3, len(smm.machines), "Unexpected list size")
|
||||
assert.NoError(t, smm.removeAllStateMachines())
|
||||
|
||||
keys = []uint64{}
|
||||
if !reflect.DeepEqual(keys, smm.keys) {
|
||||
t.Errorf("unexpected keys, want: %v, got: %v", keys, smm.keys)
|
||||
}
|
||||
if len(smm.machines) != 0 {
|
||||
t.Error("expected empty list")
|
||||
}
|
||||
assert.DeepEqual(t, smm.keys, keys, "Unexpected keys")
|
||||
assert.Equal(t, 0, len(smm.machines), "Expected empty list")
|
||||
}
|
||||
|
||||
func TestStateMachineManager_findStateMachine(t *testing.T) {
|
||||
@@ -350,36 +303,23 @@ func TestStateMachineManager_findStateMachine(t *testing.T) {
|
||||
t.Errorf("unexpected start block: %v, want: %v", fsm.start, 512)
|
||||
}
|
||||
keys := []uint64{64, 128, 196, 256, 512}
|
||||
if !reflect.DeepEqual(keys, smm.keys) {
|
||||
t.Errorf("keys not sorted, want: %v, got: %v", keys, smm.keys)
|
||||
}
|
||||
assert.DeepEqual(t, smm.keys, keys, "Keys not sorted")
|
||||
}
|
||||
|
||||
func TestStateMachineManager_highestStartBlock(t *testing.T) {
|
||||
smm := newStateMachineManager()
|
||||
if _, err := smm.highestStartBlock(); err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
_, err := smm.highestStartBlock()
|
||||
assert.ErrorContains(t, "no state machine exist", err)
|
||||
smm.addStateMachine(64)
|
||||
smm.addStateMachine(128)
|
||||
smm.addStateMachine(196)
|
||||
start, err := smm.highestStartBlock()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if start != 196 {
|
||||
t.Errorf("incorrect highest start block: %v, want: %v", start, 196)
|
||||
}
|
||||
if err := smm.removeStateMachine(196); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(196), start, "Incorrect highest start block")
|
||||
assert.NoError(t, smm.removeStateMachine(196))
|
||||
start, err = smm.highestStartBlock()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if start != 128 {
|
||||
t.Errorf("incorrect highest start block: %v, want: %v", start, 128)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(128), start, "Incorrect highest start block")
|
||||
}
|
||||
|
||||
func TestStateMachineManager_allMachinesInState(t *testing.T) {
|
||||
@@ -469,14 +409,10 @@ func TestStateMachineManager_allMachinesInState(t *testing.T) {
|
||||
|
||||
func TestStateMachine_isFirstLast(t *testing.T) {
|
||||
checkFirst := func(m *stateMachine, want bool) {
|
||||
if m.isFirst() != want {
|
||||
t.Errorf("isFirst() returned unexpected value, want: %v, got: %v", want, m.start)
|
||||
}
|
||||
assert.Equal(t, want, m.isFirst(), "isFirst() returned unexpected value")
|
||||
}
|
||||
checkLast := func(m *stateMachine, want bool) {
|
||||
if m.isLast() != want {
|
||||
t.Errorf("isLast(%v) returned unexpected value, want: %v, got: %v", m.start, want, m.start)
|
||||
}
|
||||
assert.Equal(t, want, m.isLast(), "isLast() returned unexpected value")
|
||||
}
|
||||
smm := newStateMachineManager()
|
||||
m1 := smm.addStateMachine(64)
|
||||
@@ -522,7 +458,5 @@ func TestStateMachine_isFirstLast(t *testing.T) {
|
||||
checkLast(m5, false)
|
||||
|
||||
keys := []uint64{32, 64, 128, 196, 512}
|
||||
if !reflect.DeepEqual(keys, smm.keys) {
|
||||
t.Errorf("keys not sorted, want: %v, got: %v", keys, smm.keys)
|
||||
}
|
||||
assert.DeepEqual(t, smm.keys, keys, "Keys not sorted")
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -29,6 +28,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/roughtime"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -78,18 +79,11 @@ func initializeTestServices(t *testing.T, blocks []uint64, peers []*peerData) (*
|
||||
genesisRoot := cache.rootCache[0]
|
||||
cache.RUnlock()
|
||||
|
||||
err := beaconDB.SaveBlock(context.Background(), ð.SignedBeaconBlock{
|
||||
Block: ð.BeaconBlock{
|
||||
Slot: 0,
|
||||
}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err := beaconDB.SaveBlock(context.Background(), ð.SignedBeaconBlock{Block: ð.BeaconBlock{Slot: 0}})
|
||||
require.NoError(t, err)
|
||||
|
||||
st, err := stateTrie.InitializeFromProto(&p2ppb.BeaconState{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
return &mock.ChainService{
|
||||
State: st,
|
||||
@@ -107,9 +101,7 @@ func makeGenesisTime(currentSlot uint64) time.Time {
|
||||
func TestMakeGenesisTime(t *testing.T) {
|
||||
currentSlot := uint64(64)
|
||||
gt := makeGenesisTime(currentSlot)
|
||||
if helpers.SlotsSince(gt) != currentSlot {
|
||||
t.Fatalf("Wanted %d, got %d", currentSlot, helpers.SlotsSince(gt))
|
||||
}
|
||||
require.Equal(t, currentSlot, helpers.SlotsSince(gt))
|
||||
}
|
||||
|
||||
// helper function for sequences of block slots
|
||||
@@ -135,9 +127,7 @@ func (c *testCache) initializeRootCache(reqSlots []uint64, t *testing.T) {
|
||||
Slot: 0,
|
||||
}
|
||||
genesisRoot, err := stateutil.BlockRoot(genesisBlock)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
c.rootCache[0] = genesisRoot
|
||||
parentRoot := genesisRoot
|
||||
for _, slot := range reqSlots {
|
||||
@@ -146,9 +136,7 @@ func (c *testCache) initializeRootCache(reqSlots []uint64, t *testing.T) {
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
parentRoot, err = stateutil.BlockRoot(currentBlock)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
c.rootCache[slot] = parentRoot
|
||||
c.parentSlotCache[slot] = parentSlot
|
||||
parentSlot = slot
|
||||
@@ -159,9 +147,7 @@ func (c *testCache) initializeRootCache(reqSlots []uint64, t *testing.T) {
|
||||
func TestMakeSequence(t *testing.T) {
|
||||
got := makeSequence(3, 5)
|
||||
want := []uint64{3, 4, 5}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("Wanted %v, got %v", want, got)
|
||||
}
|
||||
require.DeepEqual(t, want, got)
|
||||
}
|
||||
|
||||
// Connect peers with local host. This method sets up peer statuses and the appropriate handlers
|
||||
@@ -183,9 +169,7 @@ func connectPeers(t *testing.T, host *p2pt.TestP2P, data []*peerData, peerStatus
|
||||
}()
|
||||
|
||||
req := &p2ppb.BeaconBlocksByRangeRequest{}
|
||||
if err := peer.Encoding().DecodeWithMaxLength(stream, req); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, peer.Encoding().DecodeWithMaxLength(stream, req))
|
||||
|
||||
requestedBlocks := makeSequence(req.StartSlot, req.StartSlot+(req.Count*req.Step))
|
||||
|
||||
@@ -224,9 +208,7 @@ func connectPeers(t *testing.T, host *p2pt.TestP2P, data []*peerData, peerStatus
|
||||
}
|
||||
ret = append(ret, blk)
|
||||
currRoot, err := stateutil.BlockRoot(blk.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
logrus.Tracef("block with slot %d , signing root %#x and parent root %#x", slot, currRoot, parentRoot)
|
||||
}
|
||||
|
||||
@@ -235,9 +217,7 @@ func connectPeers(t *testing.T, host *p2pt.TestP2P, data []*peerData, peerStatus
|
||||
}
|
||||
|
||||
for i := 0; i < len(ret); i++ {
|
||||
if err := beaconsync.WriteChunk(stream, peer.Encoding(), ret[i]); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, peer.Encoding(), ret[i]))
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package initialsync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
@@ -16,6 +15,8 @@ import (
|
||||
p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestConstants(t *testing.T) {
|
||||
@@ -239,18 +240,11 @@ func TestService_roundRobinSync(t *testing.T) {
|
||||
genesisRoot := cache.rootCache[0]
|
||||
cache.RUnlock()
|
||||
|
||||
err := beaconDB.SaveBlock(context.Background(), ð.SignedBeaconBlock{
|
||||
Block: ð.BeaconBlock{
|
||||
Slot: 0,
|
||||
}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err := beaconDB.SaveBlock(context.Background(), ð.SignedBeaconBlock{Block: ð.BeaconBlock{Slot: 0}})
|
||||
require.NoError(t, err)
|
||||
|
||||
st, err := stateTrie.InitializeFromProto(&p2ppb.BeaconState{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
mc := &mock.ChainService{
|
||||
State: st,
|
||||
Root: genesisRoot[:],
|
||||
@@ -263,20 +257,17 @@ func TestService_roundRobinSync(t *testing.T) {
|
||||
synced: false,
|
||||
chainStarted: true,
|
||||
}
|
||||
if err := s.roundRobinSync(makeGenesisTime(tt.currentSlot)); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, s.roundRobinSync(makeGenesisTime(tt.currentSlot)))
|
||||
if s.chain.HeadSlot() != tt.currentSlot {
|
||||
t.Errorf("Head slot (%d) is not currentSlot (%d)", s.chain.HeadSlot(), tt.currentSlot)
|
||||
}
|
||||
if len(mc.BlocksReceived) != len(tt.expectedBlockSlots) {
|
||||
t.Errorf("Processes wrong number of blocks. Wanted %d got %d", len(tt.expectedBlockSlots), len(mc.BlocksReceived))
|
||||
}
|
||||
assert.Equal(t, len(tt.expectedBlockSlots), len(mc.BlocksReceived), "Processes wrong number of blocks")
|
||||
var receivedBlockSlots []uint64
|
||||
for _, blk := range mc.BlocksReceived {
|
||||
receivedBlockSlots = append(receivedBlockSlots, blk.Block.Slot)
|
||||
}
|
||||
if missing := sliceutil.NotUint64(sliceutil.IntersectionUint64(tt.expectedBlockSlots, receivedBlockSlots), tt.expectedBlockSlots); len(missing) > 0 {
|
||||
missing := sliceutil.NotUint64(sliceutil.IntersectionUint64(tt.expectedBlockSlots, receivedBlockSlots), tt.expectedBlockSlots)
|
||||
if len(missing) > 0 {
|
||||
t.Errorf("Missing blocks at slots %v", missing)
|
||||
}
|
||||
})
|
||||
@@ -289,17 +280,11 @@ func TestService_processBlock(t *testing.T) {
|
||||
Slot: 0,
|
||||
}
|
||||
genesisBlkRoot, err := stateutil.BlockRoot(genesisBlk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = beaconDB.SaveBlock(context.Background(), ð.SignedBeaconBlock{Block: genesisBlk})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
st, err := stateTrie.InitializeFromProto(&p2ppb.BeaconState{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
s := NewInitialSync(&Config{
|
||||
P2P: p2pt.NewTestP2P(t),
|
||||
DB: beaconDB,
|
||||
@@ -320,9 +305,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
},
|
||||
}
|
||||
blk1Root, err := stateutil.BlockRoot(blk1.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
blk2 := ð.SignedBeaconBlock{
|
||||
Block: ð.BeaconBlock{
|
||||
Slot: 2,
|
||||
@@ -333,14 +316,10 @@ func TestService_processBlock(t *testing.T) {
|
||||
// Process block normally.
|
||||
err = s.processBlock(ctx, genesis, blk1, func(
|
||||
ctx context.Context, block *eth.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
if err := s.chain.ReceiveBlock(ctx, block, blockRoot); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, s.chain.ReceiveBlock(ctx, block, blockRoot))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Duplicate processing should trigger error.
|
||||
err = s.processBlock(ctx, genesis, blk1, func(
|
||||
@@ -348,25 +327,16 @@ func TestService_processBlock(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
expectedErr := fmt.Errorf("slot %d already processed", blk1.Block.Slot)
|
||||
if err == nil || err.Error() != expectedErr.Error() {
|
||||
t.Errorf("Expected error not thrown, want: %v, got: %v", expectedErr, err)
|
||||
}
|
||||
assert.ErrorContains(t, expectedErr.Error(), err)
|
||||
|
||||
// Continue normal processing, should proceed w/o errors.
|
||||
err = s.processBlock(ctx, genesis, blk2, func(
|
||||
ctx context.Context, block *eth.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
if err := s.chain.ReceiveBlock(ctx, block, blockRoot); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, s.chain.ReceiveBlock(ctx, block, blockRoot))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if s.chain.HeadSlot() != 2 {
|
||||
t.Errorf("Unexpected head slot, want: %d, got: %d", 2, s.chain.HeadSlot())
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(2), s.chain.HeadSlot(), "Unexpected head slot")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -376,17 +346,11 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
Slot: 0,
|
||||
}
|
||||
genesisBlkRoot, err := stateutil.BlockRoot(genesisBlk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = beaconDB.SaveBlock(context.Background(), ð.SignedBeaconBlock{Block: genesisBlk})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
st, err := stateTrie.InitializeFromProto(&p2ppb.BeaconState{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
s := NewInitialSync(&Config{
|
||||
P2P: p2pt.NewTestP2P(t),
|
||||
DB: beaconDB,
|
||||
@@ -411,13 +375,9 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
},
|
||||
}
|
||||
blk1Root, err := stateutil.BlockRoot(blk1.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = beaconDB.SaveBlock(context.Background(), blk1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
batch = append(batch, blk1)
|
||||
currBlockRoot = blk1Root
|
||||
}
|
||||
@@ -432,13 +392,9 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
},
|
||||
}
|
||||
blk1Root, err := stateutil.BlockRoot(blk1.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = beaconDB.SaveBlock(context.Background(), blk1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
batch2 = append(batch2, blk1)
|
||||
currBlockRoot = blk1Root
|
||||
}
|
||||
@@ -446,27 +402,20 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
// Process block normally.
|
||||
err = s.processBatchedBlocks(ctx, genesis, batch, func(
|
||||
ctx context.Context, blks []*eth.SignedBeaconBlock, blockRoots [][32]byte) error {
|
||||
if err := s.chain.ReceiveBlockBatch(ctx, blks, blockRoots); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, s.chain.ReceiveBlockBatch(ctx, blks, blockRoots))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Duplicate processing should trigger error.
|
||||
err = s.processBatchedBlocks(ctx, genesis, batch, func(
|
||||
ctx context.Context, blocks []*eth.SignedBeaconBlock, blockRoots [][32]byte) error {
|
||||
return nil
|
||||
})
|
||||
expectedErr := fmt.Errorf("no good blocks in batch")
|
||||
if err == nil || err.Error() != expectedErr.Error() {
|
||||
t.Errorf("Expected error not thrown, want: %v, got: %v", expectedErr, err)
|
||||
}
|
||||
|
||||
badBatch2 := []*eth.SignedBeaconBlock{}
|
||||
expectedErr := fmt.Sprintf("no good blocks in batch")
|
||||
assert.ErrorContains(t, expectedErr, err)
|
||||
|
||||
var badBatch2 []*eth.SignedBeaconBlock
|
||||
for i, b := range batch2 {
|
||||
// create a non-linear batch
|
||||
if i%3 == 0 && i != 0 {
|
||||
@@ -481,24 +430,15 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
expectedSubErr := "expected linear block list"
|
||||
if err == nil || !strings.Contains(err.Error(), expectedSubErr) {
|
||||
t.Errorf("Expected error not thrown, wanted error to include: %v, got: %v", expectedSubErr, err)
|
||||
}
|
||||
assert.ErrorContains(t, expectedSubErr, err)
|
||||
|
||||
// Continue normal processing, should proceed w/o errors.
|
||||
err = s.processBatchedBlocks(ctx, genesis, batch2, func(
|
||||
ctx context.Context, blks []*eth.SignedBeaconBlock, blockRoots [][32]byte) error {
|
||||
if err := s.chain.ReceiveBlockBatch(ctx, blks, blockRoots); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, s.chain.ReceiveBlockBatch(ctx, blks, blockRoots))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if s.chain.HeadSlot() != 19 {
|
||||
t.Errorf("Unexpected head slot, want: %d, got: %d", 2, s.chain.HeadSlot())
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(19), s.chain.HeadSlot(), "Unexpected head slot")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -26,6 +25,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/roughtime"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -35,9 +36,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
p1.Peers().Add(new(enr.Record), p2.PeerID(), nil, network.DirOutbound)
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerConnected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), &pb.Status{})
|
||||
@@ -52,10 +51,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
|
||||
a := ðpb.AggregateAttestationAndProof{Aggregate: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{}}}}
|
||||
r.blkRootToPendingAtts[[32]byte{'A'}] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a}}
|
||||
if err := r.processPendingAtts(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
testutil.AssertLogsContain(t, hook, "Requesting block for pending attestation")
|
||||
}
|
||||
|
||||
@@ -84,32 +80,17 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
r32, err := stateutil.BlockRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
s := testutil.NewBeaconState()
|
||||
if err := r.db.SaveBlock(context.Background(), b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.db.SaveState(context.Background(), s, r32); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), b))
|
||||
require.NoError(t, r.db.SaveState(context.Background(), s, r32))
|
||||
|
||||
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a}}
|
||||
if err := r.processPendingAtts(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(r.attPool.UnaggregatedAttestations()) != 1 {
|
||||
t.Error("Did not save unaggregated att")
|
||||
}
|
||||
if !reflect.DeepEqual(r.attPool.UnaggregatedAttestations()[0], a.Aggregate) {
|
||||
t.Error("Incorrect saved att")
|
||||
}
|
||||
if len(r.attPool.AggregatedAttestations()) != 0 {
|
||||
t.Error("Did save aggregated att")
|
||||
}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
assert.Equal(t, 1, len(r.attPool.UnaggregatedAttestations()), "Did not save unaggregated att")
|
||||
assert.DeepEqual(t, a.Aggregate, r.attPool.UnaggregatedAttestations()[0], "Incorrect saved att")
|
||||
assert.Equal(t, 0, len(r.attPool.AggregatedAttestations()), "Did save aggregated att")
|
||||
testutil.AssertLogsContain(t, hook, "Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
@@ -122,13 +103,9 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, validators)
|
||||
|
||||
sb := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(context.Background(), sb); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlock(context.Background(), sb))
|
||||
root, err := stateutil.BlockRoot(sb.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
@@ -143,21 +120,13 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
}
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
attestingIndices := attestationutil.AttestingIndices(att.AggregationBits, committee)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
attesterDomain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot, err := helpers.ComputeSigningRoot(att.Data, attesterDomain)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sig := privKeys[indice].Sign(hashTreeRoot[:])
|
||||
@@ -166,13 +135,9 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()[:]
|
||||
|
||||
selectionDomain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainSelectionProof, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
slotRoot, err := helpers.ComputeSigningRoot(att.Data.Slot, selectionDomain)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
// Arbitrary aggregator index for testing purposes.
|
||||
aggregatorIndex := committee[0]
|
||||
sig := privKeys[aggregatorIndex].Sign(slotRoot[:])
|
||||
@@ -182,18 +147,12 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
AggregatorIndex: aggregatorIndex,
|
||||
}
|
||||
attesterDomain, err = helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainAggregateAndProof, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := helpers.ComputeSigningRoot(aggregateAndProof, attesterDomain)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
aggreSig := privKeys[aggregatorIndex].Sign(signingRoot[:]).Marshal()
|
||||
|
||||
if err := beaconState.SetGenesisTime(uint64(time.Now().Unix())); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix())))
|
||||
|
||||
r := &Service{
|
||||
p2p: p1,
|
||||
@@ -210,32 +169,17 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
|
||||
sb = ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
r32, err := stateutil.BlockRoot(sb.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.db.SaveBlock(context.Background(), sb); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), sb))
|
||||
s := testutil.NewBeaconState()
|
||||
if err := r.db.SaveState(context.Background(), s, r32); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.db.SaveState(context.Background(), s, r32))
|
||||
|
||||
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
if err := r.processPendingAtts(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(r.attPool.AggregatedAttestations()) != 1 {
|
||||
t.Fatal("Did not save aggregated att")
|
||||
}
|
||||
if !reflect.DeepEqual(r.attPool.AggregatedAttestations()[0], att) {
|
||||
t.Error("Incorrect saved att")
|
||||
}
|
||||
if len(r.attPool.UnaggregatedAttestations()) != 0 {
|
||||
t.Error("Did save unaggregated att")
|
||||
}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
assert.Equal(t, 1, len(r.attPool.AggregatedAttestations()), "Did not save aggregated att")
|
||||
assert.DeepEqual(t, att, r.attPool.AggregatedAttestations()[0], "Incorrect saved att")
|
||||
assert.Equal(t, 0, len(r.attPool.UnaggregatedAttestations()), "Did save unaggregated att")
|
||||
testutil.AssertLogsContain(t, hook, "Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
@@ -264,42 +208,22 @@ func TestValidatePendingAtts_CanPruneOldAtts(t *testing.T) {
|
||||
Data: ðpb.AttestationData{Slot: uint64(i), BeaconBlockRoot: r3[:]}}}})
|
||||
}
|
||||
|
||||
if len(s.blkRootToPendingAtts[r1]) != 100 {
|
||||
t.Error("Did not save pending atts")
|
||||
}
|
||||
if len(s.blkRootToPendingAtts[r2]) != 100 {
|
||||
t.Error("Did not save pending atts")
|
||||
}
|
||||
if len(s.blkRootToPendingAtts[r3]) != 100 {
|
||||
t.Error("Did not save pending atts")
|
||||
}
|
||||
assert.Equal(t, 100, len(s.blkRootToPendingAtts[r1]), "Did not save pending atts")
|
||||
assert.Equal(t, 100, len(s.blkRootToPendingAtts[r2]), "Did not save pending atts")
|
||||
assert.Equal(t, 100, len(s.blkRootToPendingAtts[r3]), "Did not save pending atts")
|
||||
|
||||
// Set current slot to 50, it should prune 19 attestations. (50 - 31)
|
||||
s.validatePendingAtts(context.Background(), 50)
|
||||
if len(s.blkRootToPendingAtts[r1]) != 81 {
|
||||
t.Error("Did not delete pending atts")
|
||||
}
|
||||
if len(s.blkRootToPendingAtts[r2]) != 81 {
|
||||
t.Error("Did not delete pending atts")
|
||||
}
|
||||
if len(s.blkRootToPendingAtts[r3]) != 81 {
|
||||
t.Error("Did not delete pending atts")
|
||||
}
|
||||
assert.Equal(t, 81, len(s.blkRootToPendingAtts[r1]), "Did not delete pending atts")
|
||||
assert.Equal(t, 81, len(s.blkRootToPendingAtts[r2]), "Did not delete pending atts")
|
||||
assert.Equal(t, 81, len(s.blkRootToPendingAtts[r3]), "Did not delete pending atts")
|
||||
|
||||
// Set current slot to 100 + slot_duration, it should prune all the attestations.
|
||||
s.validatePendingAtts(context.Background(), 100+params.BeaconConfig().SlotsPerEpoch)
|
||||
if len(s.blkRootToPendingAtts[r1]) != 0 {
|
||||
t.Error("Did not delete pending atts")
|
||||
}
|
||||
if len(s.blkRootToPendingAtts[r2]) != 0 {
|
||||
t.Error("Did not delete pending atts")
|
||||
}
|
||||
if len(s.blkRootToPendingAtts[r3]) != 0 {
|
||||
t.Error("Did not delete pending atts")
|
||||
}
|
||||
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r1]), "Did not delete pending atts")
|
||||
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r2]), "Did not delete pending atts")
|
||||
assert.Equal(t, 0, len(s.blkRootToPendingAtts[r3]), "Did not delete pending atts")
|
||||
|
||||
// Verify the keys are deleted.
|
||||
if len(s.blkRootToPendingAtts) != 0 {
|
||||
t.Error("Did not delete block keys")
|
||||
}
|
||||
assert.Equal(t, 0, len(s.blkRootToPendingAtts), "Did not delete block keys")
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
@@ -18,6 +17,8 @@ import (
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
// /- b1 - b2
|
||||
@@ -41,58 +42,34 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) {
|
||||
}
|
||||
|
||||
b0 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := r.db.SaveBlock(context.Background(), b0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), b0))
|
||||
b0Root, err := stateutil.BlockRoot(b0.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
b3 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 3, ParentRoot: b0Root[:]}}
|
||||
if err := r.db.SaveBlock(context.Background(), b3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), b3))
|
||||
// Incomplete block link
|
||||
b1 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: b0Root[:]}}
|
||||
b1Root, err := stateutil.BlockRoot(b1.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
b2 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2, ParentRoot: b1Root[:]}}
|
||||
b2Root, err := stateutil.BlockRoot(b1.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add b2 to the cache
|
||||
r.slotToPendingBlocks[b2.Block.Slot] = b2
|
||||
r.seenPendingBlocks[b2Root] = true
|
||||
|
||||
if err := r.processPendingBlocks(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(r.slotToPendingBlocks) != 1 {
|
||||
t.Errorf("Incorrect size for slot to pending blocks cache: got %d", len(r.slotToPendingBlocks))
|
||||
}
|
||||
if len(r.seenPendingBlocks) != 1 {
|
||||
t.Errorf("Incorrect size for seen pending block: got %d", len(r.seenPendingBlocks))
|
||||
}
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 1, len(r.slotToPendingBlocks), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 1, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
|
||||
// Add b1 to the cache
|
||||
r.slotToPendingBlocks[b1.Block.Slot] = b1
|
||||
r.seenPendingBlocks[b1Root] = true
|
||||
if err := r.db.SaveBlock(context.Background(), b1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.processPendingBlocks(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(r.slotToPendingBlocks) != 0 {
|
||||
t.Errorf("Incorrect size for slot to pending blocks cache: got %d", len(r.slotToPendingBlocks))
|
||||
}
|
||||
if len(r.seenPendingBlocks) != 0 {
|
||||
t.Errorf("Incorrect size for seen pending block: got %d", len(r.seenPendingBlocks))
|
||||
}
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), b1))
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 0, len(r.slotToPendingBlocks), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
}
|
||||
|
||||
// /- b1 - b2 - b5
|
||||
@@ -104,18 +81,14 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks2(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
pcl := protocol.ID("/eth2/beacon_chain/req/hello/1/ssz_snappy")
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
code, errMsg, err := ReadStatusCode(stream, p1.Encoding())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
if code == 0 {
|
||||
t.Error("Expected a non-zero code")
|
||||
}
|
||||
@@ -141,92 +114,53 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks2(t *testing.T) {
|
||||
p1.Peers().SetChainState(p2.PeerID(), &pb.Status{})
|
||||
|
||||
b0 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := r.db.SaveBlock(context.Background(), b0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), b0))
|
||||
b0Root, err := stateutil.BlockRoot(b0.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
b1 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: b0Root[:]}}
|
||||
if err := r.db.SaveBlock(context.Background(), b1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), b1))
|
||||
b1Root, err := stateutil.BlockRoot(b1.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Incomplete block links
|
||||
b2 := ðpb.BeaconBlock{Slot: 2, ParentRoot: b1Root[:]}
|
||||
b2Root, err := ssz.HashTreeRoot(b2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
b5 := ðpb.BeaconBlock{Slot: 5, ParentRoot: b2Root[:]}
|
||||
b5Root, err := ssz.HashTreeRoot(b5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
b3 := ðpb.BeaconBlock{Slot: 3, ParentRoot: b0Root[:]}
|
||||
b3Root, err := ssz.HashTreeRoot(b3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
b4 := ðpb.BeaconBlock{Slot: 4, ParentRoot: b3Root[:]}
|
||||
b4Root, err := ssz.HashTreeRoot(b4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
r.slotToPendingBlocks[b4.Slot] = ðpb.SignedBeaconBlock{Block: b4}
|
||||
r.seenPendingBlocks[b4Root] = true
|
||||
r.slotToPendingBlocks[b5.Slot] = ðpb.SignedBeaconBlock{Block: b5}
|
||||
r.seenPendingBlocks[b5Root] = true
|
||||
|
||||
if err := r.processPendingBlocks(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(r.slotToPendingBlocks) != 2 {
|
||||
t.Errorf("Incorrect size for slot to pending blocks cache: got %d", len(r.slotToPendingBlocks))
|
||||
}
|
||||
if len(r.seenPendingBlocks) != 2 {
|
||||
t.Errorf("Incorrect size for seen pending block: got %d", len(r.seenPendingBlocks))
|
||||
}
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 2, len(r.slotToPendingBlocks), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 2, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
|
||||
// Add b3 to the cache
|
||||
r.slotToPendingBlocks[b3.Slot] = ðpb.SignedBeaconBlock{Block: b3}
|
||||
r.seenPendingBlocks[b3Root] = true
|
||||
if err := r.db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b3}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.processPendingBlocks(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(r.slotToPendingBlocks) != 1 {
|
||||
t.Errorf("Incorrect size for slot to pending blocks cache: got %d", len(r.slotToPendingBlocks))
|
||||
}
|
||||
if len(r.seenPendingBlocks) != 1 {
|
||||
t.Errorf("Incorrect size for seen pending block: got %d", len(r.seenPendingBlocks))
|
||||
}
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b3}))
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 1, len(r.slotToPendingBlocks), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 1, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
|
||||
// Add b2 to the cache
|
||||
r.slotToPendingBlocks[b2.Slot] = ðpb.SignedBeaconBlock{Block: b2}
|
||||
r.seenPendingBlocks[b2Root] = true
|
||||
|
||||
if err := r.db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b2}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.processPendingBlocks(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(r.slotToPendingBlocks) != 0 {
|
||||
t.Errorf("Incorrect size for slot to pending blocks cache: got %d", len(r.slotToPendingBlocks))
|
||||
}
|
||||
t.Log(r.seenPendingBlocks)
|
||||
if len(r.seenPendingBlocks) != 0 {
|
||||
t.Errorf("Incorrect size for seen pending block: got %d", len(r.seenPendingBlocks))
|
||||
}
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b2}))
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 0, len(r.slotToPendingBlocks), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
}
|
||||
|
||||
func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
|
||||
@@ -234,9 +168,7 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
r := &Service{
|
||||
p2p: p1,
|
||||
@@ -254,43 +186,27 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
|
||||
p1.Peers().SetChainState(p1.PeerID(), &pb.Status{})
|
||||
|
||||
b0 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := r.db.SaveBlock(context.Background(), b0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), b0))
|
||||
b0Root, err := stateutil.BlockRoot(b0.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
b1 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: b0Root[:]}}
|
||||
if err := r.db.SaveBlock(context.Background(), b1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), b1))
|
||||
b1Root, err := stateutil.BlockRoot(b1.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Incomplete block links
|
||||
b2 := ðpb.BeaconBlock{Slot: 2, ParentRoot: b1Root[:]}
|
||||
b2Root, err := ssz.HashTreeRoot(b2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
b5 := ðpb.BeaconBlock{Slot: 5, ParentRoot: b2Root[:]}
|
||||
b5Root, err := ssz.HashTreeRoot(b5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
b3 := ðpb.BeaconBlock{Slot: 3, ParentRoot: b0Root[:]}
|
||||
b3Root, err := ssz.HashTreeRoot(b3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
b4 := ðpb.BeaconBlock{Slot: 4, ParentRoot: b3Root[:]}
|
||||
b4Root, err := ssz.HashTreeRoot(b4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
r.slotToPendingBlocks[b2.Slot] = ðpb.SignedBeaconBlock{Block: b2}
|
||||
r.seenPendingBlocks[b2Root] = true
|
||||
@@ -301,15 +217,9 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
|
||||
r.slotToPendingBlocks[b5.Slot] = ðpb.SignedBeaconBlock{Block: b5}
|
||||
r.seenPendingBlocks[b5Root] = true
|
||||
|
||||
if err := r.processPendingBlocks(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(r.slotToPendingBlocks) != 0 {
|
||||
t.Errorf("Incorrect size for slot to pending blocks cache: got %d", len(r.slotToPendingBlocks))
|
||||
}
|
||||
if len(r.seenPendingBlocks) != 0 {
|
||||
t.Errorf("Incorrect size for seen pending block: got %d", len(r.seenPendingBlocks))
|
||||
}
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 0, len(r.slotToPendingBlocks), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
}
|
||||
|
||||
func TestService_sortedPendingSlots(t *testing.T) {
|
||||
@@ -324,8 +234,5 @@ func TestService_sortedPendingSlots(t *testing.T) {
|
||||
r.slotToPendingBlocks[lastSlot-2] = ðpb.SignedBeaconBlock{}
|
||||
|
||||
want := []uint64{lastSlot - 5, lastSlot - 3, lastSlot - 2, lastSlot}
|
||||
got := r.sortedPendingSlots()
|
||||
if !reflect.DeepEqual(want, got) {
|
||||
t.Errorf("unexpected pending slots list, want: %v, got: %v", want, got)
|
||||
}
|
||||
assert.DeepEqual(t, want, r.sortedPendingSlots(), "Unexpected pending slots list")
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -24,9 +26,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
d, _ := db.SetupDB(t)
|
||||
|
||||
req := &pb.BeaconBlocksByRangeRequest{
|
||||
@@ -37,9 +37,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) {
|
||||
|
||||
// Populate the database with blocks that would match the request.
|
||||
for i := req.StartSlot; i < req.StartSlot+(req.Step*req.Count); i += req.Step {
|
||||
if err := d.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: i}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, d.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: i}}))
|
||||
}
|
||||
|
||||
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
||||
@@ -54,9 +52,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) {
|
||||
for i := req.StartSlot; i < req.StartSlot+req.Count*req.Step; i += req.Step {
|
||||
expectSuccess(t, r, stream)
|
||||
res := ðpb.SignedBeaconBlock{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, res); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
||||
if (res.Block.Slot-req.StartSlot)%req.Step != 0 {
|
||||
t.Errorf("Received unexpected block slot %d", res.Block.Slot)
|
||||
}
|
||||
@@ -64,21 +60,15 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsBlocks(t *testing.T) {
|
||||
})
|
||||
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
err = r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make sure that rate limiter doesn't limit capacity exceedingly.
|
||||
remainingCapacity := r.blocksRateLimiter.Remaining(p2.PeerID().String())
|
||||
expectedCapacity := int64(req.Count*10 - req.Count)
|
||||
if remainingCapacity != expectedCapacity {
|
||||
t.Fatalf("Unexpected rate limiting capacity, expected: %v, got: %v", expectedCapacity, remainingCapacity)
|
||||
}
|
||||
require.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
@@ -89,9 +79,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
d, _ := db.SetupDB(t)
|
||||
|
||||
req := &pb.BeaconBlocksByRangeRequest{
|
||||
@@ -103,9 +91,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) {
|
||||
endSlot := req.StartSlot + (req.Step * (req.Count - 1))
|
||||
// Populate the database with blocks that would match the request.
|
||||
for i := endSlot; i >= req.StartSlot; i -= req.Step {
|
||||
if err := d.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: i}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, d.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: i}}))
|
||||
}
|
||||
|
||||
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
||||
@@ -121,9 +107,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) {
|
||||
for i := req.StartSlot; i < req.StartSlot+req.Count*req.Step; i += req.Step {
|
||||
expectSuccess(t, r, stream)
|
||||
res := ðpb.SignedBeaconBlock{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, res); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
||||
if res.Block.Slot < prevSlot {
|
||||
t.Errorf("Received block is unsorted with slot %d lower than previous slot %d", res.Block.Slot, prevSlot)
|
||||
}
|
||||
@@ -132,14 +116,8 @@ func TestRPCBeaconBlocksByRange_RPCHandlerReturnsSortedBlocks(t *testing.T) {
|
||||
})
|
||||
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1))
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
@@ -150,9 +128,7 @@ func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
d, _ := db.SetupDB(t)
|
||||
|
||||
req := &pb.BeaconBlocksByRangeRequest{
|
||||
@@ -166,16 +142,10 @@ func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) {
|
||||
// Save genesis block
|
||||
if i == 0 {
|
||||
rt, err := stateutil.BlockRoot(ðpb.BeaconBlock{Slot: i})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.SaveGenesisBlockRoot(context.Background(), rt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if err := d.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: i}}); err != nil {
|
||||
t.Fatal(err)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, d.SaveGenesisBlockRoot(context.Background(), rt))
|
||||
}
|
||||
require.NoError(t, d.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: i}}))
|
||||
}
|
||||
|
||||
r := &Service{p2p: p1, db: d, blocksRateLimiter: leakybucket.NewCollector(10000, 10000, false), chain: &chainMock.ChainService{}}
|
||||
@@ -188,30 +158,18 @@ func TestRPCBeaconBlocksByRange_ReturnsGenesisBlock(t *testing.T) {
|
||||
// check for genesis block
|
||||
expectSuccess(t, r, stream)
|
||||
res := ðpb.SignedBeaconBlock{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, res); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if res.Block.Slot != 0 {
|
||||
t.Fatal("genesis block was not returned")
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
||||
assert.Equal(t, uint64(0), res.Block.Slot, "genesis block was not returned")
|
||||
for i := req.StartSlot + req.Step; i < req.Count*req.Step; i += req.Step {
|
||||
expectSuccess(t, r, stream)
|
||||
res := ðpb.SignedBeaconBlock{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, res); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
||||
}
|
||||
})
|
||||
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream1))
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
@@ -225,9 +183,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
// Populate the database with blocks that would match the request.
|
||||
for i := req.StartSlot; i < req.StartSlot+(req.Step*req.Count); i += req.Step {
|
||||
block := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: i}}
|
||||
if err := d.SaveBlock(context.Background(), block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, d.SaveBlock(context.Background(), block))
|
||||
}
|
||||
}
|
||||
sendRequest := func(p1, p2 *p2ptest.TestP2P, r *Service,
|
||||
@@ -243,18 +199,14 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
for i := req.StartSlot; i < req.StartSlot+req.Count*req.Step; i += req.Step {
|
||||
expectSuccess(t, r, stream)
|
||||
res := ðpb.SignedBeaconBlock{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, res); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, res))
|
||||
if (res.Block.Slot-req.StartSlot)%req.Step != 0 {
|
||||
t.Errorf("Received unexpected block slot %d", res.Block.Slot)
|
||||
}
|
||||
}
|
||||
})
|
||||
stream, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
if err = r.beaconBlocksByRangeRPCHandler(context.Background(), req, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -268,9 +220,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
capacity := int64(flags.Get().BlockBatchLimit * 3)
|
||||
r := &Service{p2p: p1, db: d, blocksRateLimiter: leakybucket.NewCollector(0.000001, capacity, false), chain: &chainMock.ChainService{}}
|
||||
@@ -283,25 +233,19 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
saveBlocks(req)
|
||||
|
||||
hook.Reset()
|
||||
if err := sendRequest(p1, p2, r, req, true); err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
assert.NoError(t, sendRequest(p1, p2, r, req, true))
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Disconnecting bad peer")
|
||||
|
||||
remainingCapacity := r.blocksRateLimiter.Remaining(p2.PeerID().String())
|
||||
expectedCapacity := int64(0) // Whole capacity is used, but no overflow.
|
||||
if remainingCapacity != expectedCapacity {
|
||||
t.Fatalf("Unexpected rate limiting capacity, expected: %v, got: %v", expectedCapacity, remainingCapacity)
|
||||
}
|
||||
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
||||
})
|
||||
|
||||
t.Run("high request count param and overflow", func(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
capacity := int64(flags.Get().BlockBatchLimit * 3)
|
||||
r := &Service{p2p: p1, db: d, blocksRateLimiter: leakybucket.NewCollector(0.000001, capacity, false), chain: &chainMock.ChainService{}}
|
||||
@@ -316,27 +260,21 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
hook.Reset()
|
||||
for i := 0; i < p2.Peers().MaxBadResponses(); i++ {
|
||||
err := sendRequest(p1, p2, r, req, false)
|
||||
if err == nil || err.Error() != rateLimitedError {
|
||||
t.Errorf("Expected error not thrown, want: %v, got: %v", rateLimitedError, err)
|
||||
}
|
||||
assert.ErrorContains(t, rateLimitedError, err)
|
||||
}
|
||||
// Make sure that we were blocked indeed.
|
||||
testutil.AssertLogsContain(t, hook, "Disconnecting bad peer")
|
||||
|
||||
remainingCapacity := r.blocksRateLimiter.Remaining(p2.PeerID().String())
|
||||
expectedCapacity := int64(0) // Whole capacity is used.
|
||||
if remainingCapacity != expectedCapacity {
|
||||
t.Fatalf("Unexpected rate limiting capacity, expected: %v, got: %v", expectedCapacity, remainingCapacity)
|
||||
}
|
||||
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
||||
})
|
||||
|
||||
t.Run("many requests with count set to max blocks per second", func(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
capacity := int64(flags.Get().BlockBatchLimit * flags.Get().BlockBatchLimitBurstFactor)
|
||||
r := &Service{p2p: p1, db: d, blocksRateLimiter: leakybucket.NewCollector(0.000001, capacity, false), chain: &chainMock.ChainService{}}
|
||||
@@ -350,9 +288,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
|
||||
hook.Reset()
|
||||
for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ {
|
||||
if err := sendRequest(p1, p2, r, req, true); err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
assert.NoError(t, sendRequest(p1, p2, r, req, true))
|
||||
}
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Disconnecting bad peer")
|
||||
|
||||
@@ -360,16 +296,12 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
hook.Reset()
|
||||
for i := 0; i < p2.Peers().MaxBadResponses(); i++ {
|
||||
err := sendRequest(p1, p2, r, req, false)
|
||||
if err == nil || err.Error() != rateLimitedError {
|
||||
t.Errorf("Expected error not thrown, want: %v, got: %v", rateLimitedError, err)
|
||||
}
|
||||
assert.ErrorContains(t, rateLimitedError, err)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Disconnecting bad peer")
|
||||
|
||||
remainingCapacity := r.blocksRateLimiter.Remaining(p2.PeerID().String())
|
||||
expectedCapacity := int64(0) // Whole capacity is used.
|
||||
if remainingCapacity != expectedCapacity {
|
||||
t.Fatalf("Unexpected rate limiting capacity, expected: %v, got: %v", expectedCapacity, remainingCapacity)
|
||||
}
|
||||
assert.Equal(t, expectedCapacity, remainingCapacity, "Unexpected rate limiting capacity")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -22,15 +22,15 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
d, _ := db.SetupDB(t)
|
||||
|
||||
var blkRoots [][32]byte
|
||||
@@ -40,12 +40,8 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks(t *testing.T) {
|
||||
Slot: uint64(i),
|
||||
}
|
||||
root, err := ssz.HashTreeRoot(blk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := d.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: blk}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, d.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: blk}))
|
||||
blkRoots = append(blkRoots, root)
|
||||
}
|
||||
|
||||
@@ -59,9 +55,7 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks(t *testing.T) {
|
||||
for i := range blkRoots {
|
||||
expectSuccess(t, r, stream)
|
||||
res := ðpb.SignedBeaconBlock{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, &res); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, &res))
|
||||
if res.Block.Slot != uint64(i+1) {
|
||||
t.Errorf("Received unexpected block slot %d but wanted %d", res.Block.Slot, i+1)
|
||||
}
|
||||
@@ -69,13 +63,9 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks(t *testing.T) {
|
||||
})
|
||||
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = r.beaconBlocksRootRPCHandler(context.Background(), blkRoots, stream1)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
@@ -91,23 +81,13 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) {
|
||||
blockB := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 40}}
|
||||
// Set up a head state with data we expect.
|
||||
blockARoot, err := stateutil.BlockRoot(blockA.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
blockBRoot, err := stateutil.BlockRoot(blockB.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
genesisState, err := state.GenesisBeaconState(nil, 0, ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := genesisState.SetSlot(111); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, blockARoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, genesisState.SetSlot(111))
|
||||
require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, blockARoot))
|
||||
finalizedCheckpt := ðpb.Checkpoint{
|
||||
Epoch: 5,
|
||||
Root: blockBRoot[:],
|
||||
@@ -135,28 +115,19 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
out := [][32]byte{}
|
||||
if err := p2.Encoding().DecodeWithMaxLength(stream, &out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(out, expectedRoots) {
|
||||
t.Fatalf("Did not receive expected message. Got %+v wanted %+v", out, expectedRoots)
|
||||
}
|
||||
assert.NoError(t, p2.Encoding().DecodeWithMaxLength(stream, &out))
|
||||
assert.DeepEqual(t, expectedRoots, out, "Did not receive expected message")
|
||||
response := []*ethpb.SignedBeaconBlock{blockB, blockA}
|
||||
for _, blk := range response {
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
t.Fatalf("Failed to write to stream: %v", err)
|
||||
}
|
||||
_, err := p2.Encoding().EncodeWithMaxLength(stream, blk)
|
||||
if err != nil {
|
||||
t.Errorf("Could not send response back: %v ", err)
|
||||
}
|
||||
_, err := stream.Write([]byte{responseCodeSuccess})
|
||||
assert.NoError(t, err, "Failed to write to stream")
|
||||
_, err = p2.Encoding().EncodeWithMaxLength(stream, blk)
|
||||
assert.NoError(t, err, "Could not send response back")
|
||||
}
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
if err := r.sendRecentBeaconBlocksRequest(context.Background(), expectedRoots, p2.PeerID()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.sendRecentBeaconBlocksRequest(context.Background(), expectedRoots, p2.PeerID()))
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
@@ -176,18 +147,12 @@ func TestSSZCompatibility(t *testing.T) {
|
||||
list := testList{rootA, rootB, rootC}
|
||||
writer := bytes.NewBuffer([]byte{})
|
||||
sszType, err := types.SSZFactory(reflect.TypeOf(list))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
n, err := zssz.Encode(writer, list, sszType)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
encodedPart := writer.Bytes()[:n]
|
||||
fastSSZ, err := ssz.Marshal(list)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
if !bytes.Equal(fastSSZ, encodedPart) {
|
||||
t.Errorf("Wanted the same result as ZSSZ of %#x but got %#X", encodedPart, fastSSZ)
|
||||
}
|
||||
|
||||
@@ -11,15 +11,15 @@ import (
|
||||
db "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestGoodByeRPCHandler_Disconnects_With_Peer(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
// Set up a head state in the database with data we expect.
|
||||
d, _ := db.SetupDB(t)
|
||||
@@ -37,15 +37,10 @@ func TestGoodByeRPCHandler_Disconnects_With_Peer(t *testing.T) {
|
||||
expectResetStream(t, r, stream)
|
||||
})
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
failureCode := codeClientShutdown
|
||||
|
||||
err = r.goodbyeRPCHandler(context.Background(), &failureCode, stream1)
|
||||
if err != nil {
|
||||
t.Errorf("Unxpected error: %v", err)
|
||||
}
|
||||
assert.NoError(t, r.goodbyeRPCHandler(context.Background(), &failureCode, stream1))
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
@@ -61,9 +56,7 @@ func TestSendGoodbye_SendsMessage(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
// Set up a head state in the database with data we expect.
|
||||
d, _ := db.SetupDB(t)
|
||||
@@ -80,19 +73,12 @@ func TestSendGoodbye_SendsMessage(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
out := new(uint64)
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *out != failureCode {
|
||||
t.Fatalf("Wanted goodbye code of %d but got %d", failureCode, *out)
|
||||
}
|
||||
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.Equal(t, failureCode, *out)
|
||||
})
|
||||
|
||||
err := r.sendGoodByeMessage(context.Background(), failureCode, p2.BHost.ID())
|
||||
if err != nil {
|
||||
t.Errorf("Unxpected error: %v", err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
@@ -108,9 +94,7 @@ func TestSendGoodbye_DisconnectWithPeer(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
|
||||
// Set up a head state in the database with data we expect.
|
||||
d, _ := db.SetupDB(t)
|
||||
@@ -127,19 +111,11 @@ func TestSendGoodbye_DisconnectWithPeer(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
out := new(uint64)
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *out != failureCode {
|
||||
t.Fatalf("Wanted goodbye code of %d but got %d", failureCode, *out)
|
||||
}
|
||||
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.Equal(t, failureCode, *out)
|
||||
})
|
||||
|
||||
err := r.sendGoodByeAndDisconnect(context.Background(), failureCode, p2.BHost.ID())
|
||||
if err != nil {
|
||||
t.Errorf("Unxpected error: %v", err)
|
||||
}
|
||||
assert.NoError(t, r.sendGoodByeAndDisconnect(context.Background(), failureCode, p2.BHost.ID()))
|
||||
conns := p1.BHost.Network().ConnsToPeer(p2.BHost.ID())
|
||||
if len(conns) > 0 {
|
||||
t.Error("Peer is still not disconnected despite sending a goodbye message")
|
||||
|
||||
@@ -14,15 +14,15 @@ import (
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestMetaDataRPCHandler_ReceivesMetadata(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
bitfield := [8]byte{'A', 'B'}
|
||||
p1.LocalMetadata = &pb.MetaData{
|
||||
SeqNumber: 2,
|
||||
@@ -44,22 +44,13 @@ func TestMetaDataRPCHandler_ReceivesMetadata(t *testing.T) {
|
||||
defer wg.Done()
|
||||
expectSuccess(t, r, stream)
|
||||
out := new(pb.MetaData)
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ssz.DeepEqual(p1.LocalMetadata, out) {
|
||||
t.Fatalf("Metadata unequal, received %v but wanted %v", out, p1.LocalMetadata)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.DeepEqual(t, p1.LocalMetadata, out, "Metadata unequal")
|
||||
})
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
err = r.metaDataHandler(context.Background(), new(interface{}), stream1)
|
||||
if err != nil {
|
||||
t.Errorf("Unxpected error: %v", err)
|
||||
}
|
||||
assert.NoError(t, r.metaDataHandler(context.Background(), new(interface{}), stream1))
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
@@ -75,9 +66,7 @@ func TestMetadataRPCHandler_SendsMetadata(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
bitfield := [8]byte{'A', 'B'}
|
||||
p2.LocalMetadata = &pb.MetaData{
|
||||
SeqNumber: 2,
|
||||
@@ -102,17 +91,11 @@ func TestMetadataRPCHandler_SendsMetadata(t *testing.T) {
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
|
||||
err := r2.metaDataHandler(context.Background(), new(interface{}), stream)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, r2.metaDataHandler(context.Background(), new(interface{}), stream))
|
||||
})
|
||||
|
||||
metadata, err := r.sendMetaDataRequest(context.Background(), p2.BHost.ID())
|
||||
if err != nil {
|
||||
t.Errorf("Unxpected error: %v", err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
|
||||
if !ssz.DeepEqual(metadata, p2.LocalMetadata) {
|
||||
t.Fatalf("Metadata unequal, received %v but wanted %v", metadata, p2.LocalMetadata)
|
||||
|
||||
@@ -13,15 +13,15 @@ import (
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestPingRPCHandler_ReceivesPing(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
p1.LocalMetadata = &pb.MetaData{
|
||||
SeqNumber: 2,
|
||||
Attnets: []byte{'A', 'B'},
|
||||
@@ -50,23 +50,14 @@ func TestPingRPCHandler_ReceivesPing(t *testing.T) {
|
||||
defer wg.Done()
|
||||
expectSuccess(t, r, stream)
|
||||
out := new(uint64)
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *out != 2 {
|
||||
t.Fatalf("Wanted 2 but got %d as our sequence number", *out)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.Equal(t, uint64(2), *out)
|
||||
})
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
seqNumber := uint64(1)
|
||||
|
||||
err = r.pingHandler(context.Background(), &seqNumber, stream1)
|
||||
if err != nil {
|
||||
t.Errorf("Unxpected error: %v", err)
|
||||
}
|
||||
assert.NoError(t, r.pingHandler(context.Background(), &seqNumber, stream1))
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
@@ -82,9 +73,7 @@ func TestPingRPCHandler_SendsPing(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
p1.LocalMetadata = &pb.MetaData{
|
||||
SeqNumber: 2,
|
||||
Attnets: []byte{'A', 'B'},
|
||||
@@ -119,22 +108,12 @@ func TestPingRPCHandler_SendsPing(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
out := new(uint64)
|
||||
if err := r2.p2p.Encoding().DecodeWithMaxLength(stream, out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *out != 2 {
|
||||
t.Fatalf("Wanted 2 but got %d as our sequence number", *out)
|
||||
}
|
||||
err := r2.pingHandler(context.Background(), out, stream)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, r2.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.Equal(t, uint64(2), *out)
|
||||
assert.NoError(t, r2.pingHandler(context.Background(), out, stream))
|
||||
})
|
||||
|
||||
err := r.sendPingRequest(context.Background(), p2.BHost.ID())
|
||||
if err != nil {
|
||||
t.Errorf("Unxpected error: %v", err)
|
||||
}
|
||||
assert.NoError(t, r.sendPingRequest(context.Background(), p2.BHost.ID()))
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
|
||||
@@ -24,15 +24,15 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/roughtime"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
root := [32]byte{'C'}
|
||||
|
||||
r := &Service{p2p: p1,
|
||||
@@ -56,9 +56,7 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
||||
defer wg.Done()
|
||||
expectSuccess(t, r, stream)
|
||||
out := &pb.Status{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
if !bytes.Equal(out.FinalizedRoot, root[:]) {
|
||||
t.Errorf("Expected finalized root of %#x but got %#x", root, out.FinalizedRoot)
|
||||
}
|
||||
@@ -70,24 +68,13 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl2, func(stream network.Stream) {
|
||||
defer wg2.Done()
|
||||
msg := new(uint64)
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if *msg != codeWrongNetwork {
|
||||
t.Errorf("Wrong goodbye code: %d", *msg)
|
||||
}
|
||||
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, msg))
|
||||
assert.Equal(t, codeWrongNetwork, *msg)
|
||||
})
|
||||
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = r.statusRPCHandler(context.Background(), &pb.Status{ForkDigest: []byte("fake")}, stream1)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error but got %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, r.statusRPCHandler(context.Background(), &pb.Status{ForkDigest: []byte("fake")}, stream1))
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
@@ -96,18 +83,14 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
|
||||
if len(p1.BHost.Network().Peers()) != 0 {
|
||||
t.Error("handler did not disconnect peer")
|
||||
}
|
||||
assert.Equal(t, 0, len(p1.BHost.Network().Peers()), "handler did not disconnect peer")
|
||||
}
|
||||
|
||||
func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
root := [32]byte{}
|
||||
|
||||
r := &Service{p2p: p1,
|
||||
@@ -131,72 +114,46 @@ func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) {
|
||||
defer wg.Done()
|
||||
expectSuccess(t, r, stream)
|
||||
out := &pb.Status{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
if !bytes.Equal(out.FinalizedRoot, root[:]) {
|
||||
t.Errorf("Expected finalized root of %#x but got %#x", root, out.FinalizedRoot)
|
||||
}
|
||||
})
|
||||
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
digest, err := r.forkDigest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
err = r.statusRPCHandler(context.Background(), &pb.Status{ForkDigest: digest[:], FinalizedRoot: params.BeaconConfig().ZeroHash[:]}, stream1)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error but got %v", err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("handler disconnected with peer")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Handler disconnected with peer")
|
||||
}
|
||||
|
||||
func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to be connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
db, _ := testingDB.SetupDB(t)
|
||||
|
||||
// Set up a head state with data we expect.
|
||||
headRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 111})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
blkSlot := 3 * params.BeaconConfig().SlotsPerEpoch
|
||||
finalizedRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: blkSlot})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
genesisState, err := state.GenesisBeaconState(nil, 0, ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := genesisState.SetSlot(111); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: blkSlot}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(context.Background(), finalizedRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, genesisState.SetSlot(111))
|
||||
require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, headRoot))
|
||||
require.NoError(t, db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: blkSlot}}))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), finalizedRoot))
|
||||
finalizedCheckpt := ðpb.Checkpoint{
|
||||
Epoch: 3,
|
||||
Root: finalizedRoot[:],
|
||||
@@ -220,9 +177,7 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
||||
db: db,
|
||||
}
|
||||
digest, err := r.forkDigest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup streams
|
||||
pcl := protocol.ID("/testing")
|
||||
@@ -232,9 +187,7 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
||||
defer wg.Done()
|
||||
expectSuccess(t, r, stream)
|
||||
out := &pb.Status{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
expected := &pb.Status{
|
||||
ForkDigest: digest[:],
|
||||
HeadSlot: genesisState.Slot(),
|
||||
@@ -247,18 +200,14 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
||||
}
|
||||
})
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
err = r.statusRPCHandler(context.Background(), &pb.Status{
|
||||
ForkDigest: digest[:],
|
||||
FinalizedRoot: finalizedRoot[:],
|
||||
FinalizedEpoch: 3,
|
||||
}, stream1)
|
||||
if err != nil {
|
||||
t.Errorf("Unxpected error: %v", err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
@@ -285,20 +234,12 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: 5,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
blk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 0}}
|
||||
if err := db.SaveBlock(context.Background(), blk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlock(context.Background(), blk))
|
||||
finalizedRoot, err := ssz.HashTreeRoot(blk.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(context.Background(), finalizedRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), finalizedRoot))
|
||||
r := &Service{
|
||||
p2p: p1,
|
||||
chain: &mock.ChainService{
|
||||
@@ -315,9 +256,7 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
ctx: context.Background(),
|
||||
}
|
||||
p1.Digest, err = r.forkDigest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
r2 := &Service{
|
||||
chain: &mock.ChainService{
|
||||
@@ -326,9 +265,7 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
p2p: p2,
|
||||
}
|
||||
p2.Digest, err = r.forkDigest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
r.Start()
|
||||
|
||||
@@ -339,20 +276,14 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
out := &pb.Status{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
log.WithField("status", out).Warn("received status")
|
||||
resp := &pb.Status{HeadSlot: 100, ForkDigest: p2.Digest[:],
|
||||
FinalizedRoot: finalizedRoot[:], FinalizedEpoch: 0}
|
||||
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err := r.p2p.Encoding().EncodeWithMaxLength(stream, resp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err := stream.Write([]byte{responseCodeSuccess})
|
||||
assert.NoError(t, err)
|
||||
_, err = r.p2p.Encoding().EncodeWithMaxLength(stream, resp)
|
||||
assert.NoError(t, err)
|
||||
log.WithField("status", out).Warn("sending status")
|
||||
if err := stream.Close(); err != nil {
|
||||
t.Log(err)
|
||||
@@ -365,19 +296,10 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg2.Done()
|
||||
out := new(uint64)
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *out != 2 {
|
||||
t.Fatalf("Wanted 2 but got %d as our sequence number", *out)
|
||||
}
|
||||
err := r2.pingHandler(context.Background(), out, stream)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := stream.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.Equal(t, uint64(2), *out)
|
||||
assert.NoError(t, r2.pingHandler(context.Background(), out, stream))
|
||||
assert.NoError(t, stream.Close())
|
||||
})
|
||||
|
||||
numInactive1 := len(p1.Peers().Inactive())
|
||||
@@ -404,16 +326,10 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
numInactive2 := len(p1.Peers().Inactive())
|
||||
numActive2 := len(p1.Peers().Active())
|
||||
|
||||
if numInactive2 != numInactive1 {
|
||||
t.Errorf("Number of inactive peers changed unexpectedly: was %d, now %d", numInactive1, numInactive2)
|
||||
}
|
||||
if numActive2 != numActive1+1 {
|
||||
t.Errorf("Number of active peers unexpected: wanted %d, found %d", numActive1+1, numActive2)
|
||||
}
|
||||
assert.Equal(t, numInactive1, numInactive1, "Number of inactive peers changed unexpectedly")
|
||||
assert.Equal(t, numActive1+1, numActive2, "Number of active peers unexpected")
|
||||
|
||||
if err := p2.Disconnect(p1.PeerID()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, p2.Disconnect(p1.PeerID()))
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.PeerDisconnected)
|
||||
|
||||
// Wait for disconnect event to trigger.
|
||||
@@ -421,12 +337,8 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
|
||||
numInactive3 := len(p1.Peers().Inactive())
|
||||
numActive3 := len(p1.Peers().Active())
|
||||
if numInactive3 != numInactive2+1 {
|
||||
t.Errorf("Number of inactive peers unexpected: wanted %d, found %d", numInactive2+1, numInactive3)
|
||||
}
|
||||
if numActive3 != numActive2-1 {
|
||||
t.Errorf("Number of active peers unexpected: wanted %d, found %d", numActive2-1, numActive3)
|
||||
}
|
||||
assert.Equal(t, numInactive2+1, numInactive3, "Number of inactive peers unexpected")
|
||||
assert.Equal(t, numActive2-1, numActive3, "Number of active peers unexpected")
|
||||
}
|
||||
|
||||
func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
@@ -435,23 +347,13 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
|
||||
// Set up a head state with data we expect.
|
||||
headRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 111})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
finalizedRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 40})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
genesisState, err := state.GenesisBeaconState(nil, 0, ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := genesisState.SetSlot(111); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, genesisState.SetSlot(111))
|
||||
require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, headRoot))
|
||||
finalizedCheckpt := ðpb.Checkpoint{
|
||||
Epoch: 5,
|
||||
Root: finalizedRoot[:],
|
||||
@@ -480,13 +382,9 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
out := &pb.Status{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
digest, err := r.forkDigest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
expected := &pb.Status{
|
||||
ForkDigest: digest[:],
|
||||
HeadSlot: genesisState.Slot(),
|
||||
@@ -506,9 +404,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to continue being connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to continue being connected")
|
||||
}
|
||||
|
||||
func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
@@ -518,30 +414,16 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
|
||||
// Set up a head state with data we expect.
|
||||
headRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 111})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
blkSlot := 3 * params.BeaconConfig().SlotsPerEpoch
|
||||
finalizedRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: blkSlot})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
genesisState, err := state.GenesisBeaconState(nil, 0, ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := genesisState.SetSlot(111); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: blkSlot}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(context.Background(), finalizedRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, genesisState.SetSlot(111))
|
||||
require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, headRoot))
|
||||
require.NoError(t, db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: blkSlot}}))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(context.Background(), finalizedRoot))
|
||||
finalizedCheckpt := ðpb.Checkpoint{
|
||||
Epoch: 3,
|
||||
Root: finalizedRoot[:],
|
||||
@@ -588,13 +470,8 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
out := &pb.Status{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err := r2.validateStatusMessage(context.Background(), out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.NoError(t, r2.validateStatusMessage(context.Background(), out))
|
||||
})
|
||||
|
||||
p1.AddConnectionHandler(r.sendRPCStatusRequest)
|
||||
@@ -604,9 +481,7 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
|
||||
if len(p1.BHost.Network().Peers()) != 1 {
|
||||
t.Error("Expected peers to continue being connected")
|
||||
}
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to continue being connected")
|
||||
}
|
||||
|
||||
func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
@@ -615,23 +490,13 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
|
||||
// Set up a head state with data we expect.
|
||||
headRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 111})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
finalizedRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 40})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
genesisState, err := state.GenesisBeaconState(nil, 0, ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := genesisState.SetSlot(111); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, genesisState.SetSlot(111))
|
||||
require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, headRoot))
|
||||
finalizedCheckpt := ðpb.Checkpoint{
|
||||
Epoch: 5,
|
||||
Root: finalizedRoot[:],
|
||||
@@ -662,9 +527,7 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
out := &pb.Status{}
|
||||
if err := r.p2p.Encoding().DecodeWithMaxLength(stream, out); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
expected := &pb.Status{
|
||||
ForkDigest: []byte{1, 1, 1, 1},
|
||||
HeadSlot: genesisState.Slot(),
|
||||
@@ -676,9 +539,7 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
log.WithError(err).Error("Failed to write to stream")
|
||||
}
|
||||
_, err := r.p2p.Encoding().EncodeWithMaxLength(stream, expected)
|
||||
if err != nil {
|
||||
t.Errorf("Could not send status: %v", err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
p1.Connect(p2)
|
||||
@@ -689,43 +550,25 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
connectionState, err := p1.Peers().ConnectionState(p2.PeerID())
|
||||
if err != nil {
|
||||
t.Fatal("Failed to obtain peer connection state")
|
||||
}
|
||||
if connectionState != peers.PeerDisconnected {
|
||||
t.Error("Expected peer to be disconnected")
|
||||
}
|
||||
require.NoError(t, err, "Failed to obtain peer connection state")
|
||||
assert.Equal(t, peers.PeerDisconnected, connectionState, "Expected peer to be disconnected")
|
||||
|
||||
badResponses, err := p1.Peers().BadResponses(p2.PeerID())
|
||||
if err != nil {
|
||||
t.Fatal("Failed to obtain peer connection state")
|
||||
}
|
||||
if badResponses != 1 {
|
||||
t.Errorf("Bad response was not bumped to one, instead it is %d", badResponses)
|
||||
}
|
||||
require.NoError(t, err, "Failed to obtain peer connection state")
|
||||
assert.Equal(t, 1, badResponses, "Bad response was not bumped to one")
|
||||
}
|
||||
|
||||
func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
// Set up a head state with data we expect.
|
||||
headRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 111})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
blkSlot := 3 * params.BeaconConfig().SlotsPerEpoch
|
||||
finalizedRoot, err := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: blkSlot})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
genesisState, err := state.GenesisBeaconState(nil, 0, ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := genesisState.SetSlot(111); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, genesisState.SetSlot(111))
|
||||
require.NoError(t, genesisState.UpdateBlockRootAtIndex(111%params.BeaconConfig().SlotsPerHistoricalRoot, headRoot))
|
||||
finalizedCheckpt := ðpb.Checkpoint{
|
||||
Epoch: 5,
|
||||
Root: finalizedRoot[:],
|
||||
@@ -745,9 +588,7 @@ func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
ctx: context.Background(),
|
||||
}
|
||||
digest, err := r.forkDigest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
// There should be no error for a status message
|
||||
// with a genesis checkpoint.
|
||||
err = r.validateStatusMessage(r.ctx, &pb.Status{
|
||||
@@ -757,9 +598,7 @@ func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
HeadRoot: headRoot[:],
|
||||
HeadSlot: 111,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestShouldResync(t *testing.T) {
|
||||
@@ -813,12 +652,8 @@ func TestShouldResync(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
headState, err := state.GenesisBeaconState(nil, 0, ðpb.Eth1Data{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := headState.SetSlot(tt.args.headSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(tt.args.headSlot))
|
||||
r := &Service{
|
||||
chain: &mock.ChainService{
|
||||
State: headState,
|
||||
|
||||
@@ -16,13 +16,13 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestService_StatusZeroEpoch(t *testing.T) {
|
||||
bState, err := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 0})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
p2p: p2ptest.NewTestP2P(t),
|
||||
initialSync: new(mockSync.Sync),
|
||||
@@ -33,10 +33,7 @@ func TestService_StatusZeroEpoch(t *testing.T) {
|
||||
}
|
||||
r.chainStarted = true
|
||||
|
||||
err = r.Status()
|
||||
if err != nil {
|
||||
t.Errorf("Wanted non failing status but got: %v", err)
|
||||
}
|
||||
assert.NoError(t, r.Status(), "Wanted non failing status")
|
||||
}
|
||||
|
||||
func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
@@ -68,9 +65,7 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
b := []byte("sk")
|
||||
b32 := bytesutil.ToBytes32(b)
|
||||
sk, err := bls.SecretKeyFromBytes(b32[:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
@@ -81,8 +76,5 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
p2p.ReceivePubSub(topic, msg)
|
||||
// wait for chainstart to be sent
|
||||
time.Sleep(400 * time.Millisecond)
|
||||
if !r.chainStarted {
|
||||
t.Fatal("Did not receive chain start event.")
|
||||
}
|
||||
|
||||
require.Equal(t, true, r.chainStarted, "Did not receive chain start event.")
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
@@ -10,13 +9,13 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestBeaconAggregateProofSubscriber_CanSaveAggregatedAttestation(t *testing.T) {
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
attPool: attestations.NewPool(),
|
||||
seenAttestationCache: c,
|
||||
@@ -24,20 +23,13 @@ func TestBeaconAggregateProofSubscriber_CanSaveAggregatedAttestation(t *testing.
|
||||
}
|
||||
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{Aggregate: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{}}, AggregationBits: bitfield.Bitlist{0x07}}, AggregatorIndex: 100}}
|
||||
if err := r.beaconAggregateProofSubscriber(context.Background(), a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r.attPool.AggregatedAttestations(), []*ethpb.Attestation{a.Message.Aggregate}) {
|
||||
t.Error("Did not save aggregated attestation")
|
||||
}
|
||||
require.NoError(t, r.beaconAggregateProofSubscriber(context.Background(), a))
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{a.Message.Aggregate}, r.attPool.AggregatedAttestations(), "Did not save aggregated attestation")
|
||||
}
|
||||
|
||||
func TestBeaconAggregateProofSubscriber_CanSaveUnaggregatedAttestation(t *testing.T) {
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
attPool: attestations.NewPool(),
|
||||
seenAttestationCache: c,
|
||||
@@ -45,11 +37,6 @@ func TestBeaconAggregateProofSubscriber_CanSaveUnaggregatedAttestation(t *testin
|
||||
}
|
||||
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{Aggregate: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{}}, AggregationBits: bitfield.Bitlist{0x03}}, AggregatorIndex: 100}}
|
||||
if err := r.beaconAggregateProofSubscriber(context.Background(), a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r.attPool.UnaggregatedAttestations(), []*ethpb.Attestation{a.Message.Aggregate}) {
|
||||
t.Error("Did not save unaggregated attestation")
|
||||
}
|
||||
require.NoError(t, r.beaconAggregateProofSubscriber(context.Background(), a))
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{a.Message.Aggregate}, r.attPool.UnaggregatedAttestations(), "Did not save unaggregated attestation")
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestService_committeeIndexBeaconAttestationSubscriber_ValidMessage(t *testing.T) {
|
||||
@@ -33,30 +34,18 @@ func TestService_committeeIndexBeaconAttestationSubscriber_ValidMessage(t *testi
|
||||
ctx := context.Background()
|
||||
db, _ := dbtest.SetupDB(t)
|
||||
s, sKeys := testutil.DeterministicGenesisState(t, 64 /*validators*/)
|
||||
if err := s.SetGenesisTime(uint64(time.Now().Unix())); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, s.SetGenesisTime(uint64(time.Now().Unix())))
|
||||
blk, err := testutil.GenerateFullBlock(s, sKeys, nil, 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
root, err := stateutil.BlockRoot(blk.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveBlock(ctx, blk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
|
||||
savedState := testutil.NewBeaconState()
|
||||
if err := db.SaveState(context.Background(), savedState, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveState(context.Background(), savedState, root))
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
attPool: attestations.NewPool(),
|
||||
chain: &mock.ChainService{
|
||||
@@ -76,9 +65,7 @@ func TestService_committeeIndexBeaconAttestationSubscriber_ValidMessage(t *testi
|
||||
stateSummaryCache: cache.NewStateSummaryCache(),
|
||||
}
|
||||
p.Digest, err = r.forkDigest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r.registerSubscribers()
|
||||
r.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
@@ -96,13 +83,9 @@ func TestService_committeeIndexBeaconAttestationSubscriber_ValidMessage(t *testi
|
||||
AggregationBits: bitfield.Bitlist{0b0101},
|
||||
}
|
||||
domain, err := helpers.Domain(s.Fork(), att.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester, s.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
attRoot, err := helpers.ComputeSigningRoot(att.Data, domain)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
att.Signature = sKeys[16].Sign(attRoot[:]).Marshal()
|
||||
|
||||
p.ReceivePubSub("/eth2/%x/beacon_attestation_0", att)
|
||||
|
||||
@@ -2,7 +2,6 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
@@ -12,6 +11,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestDeleteAttsInPool(t *testing.T) {
|
||||
@@ -23,28 +24,16 @@ func TestDeleteAttsInPool(t *testing.T) {
|
||||
att2 := ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b1110}, Data: data}
|
||||
att3 := ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b1011}, Data: data}
|
||||
att4 := ðpb.Attestation{AggregationBits: bitfield.Bitlist{0b1001}, Data: data}
|
||||
if err := r.attPool.SaveAggregatedAttestation(att1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.attPool.SaveAggregatedAttestation(att2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.attPool.SaveAggregatedAttestation(att3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := r.attPool.SaveUnaggregatedAttestation(att4); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.attPool.SaveAggregatedAttestation(att1))
|
||||
require.NoError(t, r.attPool.SaveAggregatedAttestation(att2))
|
||||
require.NoError(t, r.attPool.SaveAggregatedAttestation(att3))
|
||||
require.NoError(t, r.attPool.SaveUnaggregatedAttestation(att4))
|
||||
|
||||
// Seen 1, 3 and 4 in block.
|
||||
if err := r.deleteAttsInPool([]*ethpb.Attestation{att1, att3, att4}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.deleteAttsInPool([]*ethpb.Attestation{att1, att3, att4}))
|
||||
|
||||
// Only 2 should remain.
|
||||
if !reflect.DeepEqual(r.attPool.AggregatedAttestations(), []*ethpb.Attestation{att2}) {
|
||||
t.Error("Did not get wanted attestation from pool")
|
||||
}
|
||||
assert.DeepEqual(t, []*ethpb.Attestation{att2}, r.attPool.AggregatedAttestations(), "Did not get wanted attestations")
|
||||
}
|
||||
|
||||
func TestService_beaconBlockSubscriber(t *testing.T) {
|
||||
@@ -100,19 +89,13 @@ func TestService_beaconBlockSubscriber(t *testing.T) {
|
||||
},
|
||||
attPool: attestations.NewPool(),
|
||||
}
|
||||
if err := s.initCaches(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, s.initCaches())
|
||||
// Set up attestation pool.
|
||||
for _, att := range pooledAttestations {
|
||||
if helpers.IsAggregated(att) {
|
||||
if err := s.attPool.SaveAggregatedAttestation(att); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, s.attPool.SaveAggregatedAttestation(att))
|
||||
} else {
|
||||
if err := s.attPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, s.attPool.SaveUnaggregatedAttestation(att))
|
||||
}
|
||||
}
|
||||
// Perform method under test call.
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -37,18 +39,14 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
|
||||
}
|
||||
var err error
|
||||
p2p.Digest, err = r.forkDigest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
topic := "/eth2/%x/voluntary_exit"
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
r.subscribe(topic, r.noopValidator, func(_ context.Context, msg proto.Message) error {
|
||||
m, ok := msg.(*pb.SignedVoluntaryExit)
|
||||
if !ok {
|
||||
t.Error("Object is not of type *pb.SignedVoluntaryExit")
|
||||
}
|
||||
assert.Equal(t, true, ok, "Object is not of type *pb.SignedVoluntaryExit")
|
||||
if m.Exit == nil || m.Exit.Epoch != 55 {
|
||||
t.Errorf("Unexpected incoming message: %+v", m)
|
||||
}
|
||||
@@ -73,9 +71,7 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
p2p: p2p,
|
||||
@@ -91,9 +87,7 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
r.subscribe(topic, r.noopValidator, func(ctx context.Context, msg proto.Message) error {
|
||||
if err := r.attesterSlashingSubscriber(ctx, msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.attesterSlashingSubscriber(ctx, msg))
|
||||
wg.Done()
|
||||
return nil
|
||||
})
|
||||
@@ -105,26 +99,18 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
|
||||
privKeys[1],
|
||||
1, /* validator index */
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Error generating attester slashing")
|
||||
}
|
||||
require.NoError(t, err, "Error generating attester slashing")
|
||||
err = r.db.SaveState(ctx, beaconState, bytesutil.ToBytes32(attesterSlashing.Attestation_1.Data.BeaconBlockRoot))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
p2p.Digest, err = r.forkDigest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
p2p.ReceivePubSub(topic, attesterSlashing)
|
||||
|
||||
if testutil.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
}
|
||||
as := r.slashingPool.PendingAttesterSlashings(ctx, beaconState)
|
||||
if len(as) != 1 {
|
||||
t.Errorf("Expected attester slashing: %v to be added to slashing pool. got: %v", attesterSlashing, as[0])
|
||||
}
|
||||
assert.Equal(t, 1, len(as), "Expected attester slashing")
|
||||
}
|
||||
|
||||
func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
@@ -136,9 +122,7 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
}
|
||||
d, _ := db.SetupDB(t)
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
p2p: p2p,
|
||||
@@ -154,9 +138,7 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
r.subscribe(topic, r.noopValidator, func(ctx context.Context, msg proto.Message) error {
|
||||
if err := r.proposerSlashingSubscriber(ctx, msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.proposerSlashingSubscriber(ctx, msg))
|
||||
wg.Done()
|
||||
return nil
|
||||
})
|
||||
@@ -168,22 +150,16 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
privKeys[1],
|
||||
1, /* validator index */
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Error generating proposer slashing")
|
||||
}
|
||||
require.NoError(t, err, "Error generating proposer slashing")
|
||||
p2p.Digest, err = r.forkDigest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
p2p.ReceivePubSub(topic, proposerSlashing)
|
||||
|
||||
if testutil.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
}
|
||||
ps := r.slashingPool.PendingProposerSlashings(ctx, beaconState)
|
||||
if len(ps) != 1 {
|
||||
t.Errorf("Expected proposer slashing: %v to be added to slashing pool. got: %v", proposerSlashing, ps)
|
||||
}
|
||||
assert.Equal(t, 1, len(ps), "Expected proposer slashing")
|
||||
}
|
||||
|
||||
func TestSubscribe_HandlesPanic(t *testing.T) {
|
||||
@@ -198,9 +174,7 @@ func TestSubscribe_HandlesPanic(t *testing.T) {
|
||||
}
|
||||
var err error
|
||||
p.Digest, err = r.forkDigest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.SignedVoluntaryExit{})]
|
||||
var wg sync.WaitGroup
|
||||
@@ -230,33 +204,22 @@ func TestRevalidateSubscription_CorrectlyFormatsTopic(t *testing.T) {
|
||||
p2p: p,
|
||||
}
|
||||
digest, err := r.forkDigest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
|
||||
defaultTopic := "/eth2/testing/%#x/committee%d"
|
||||
// committee index 1
|
||||
fullTopic := fmt.Sprintf(defaultTopic, digest, 1) + r.p2p.Encoding().ProtocolSuffix()
|
||||
err = r.p2p.PubSub().RegisterTopicValidator(fullTopic, r.noopValidator)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.p2p.PubSub().RegisterTopicValidator(fullTopic, r.noopValidator))
|
||||
subscriptions[1], err = r.p2p.SubscribeToTopic(fullTopic)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// committee index 2
|
||||
fullTopic = fmt.Sprintf(defaultTopic, digest, 2) + r.p2p.Encoding().ProtocolSuffix()
|
||||
err = r.p2p.PubSub().RegisterTopicValidator(fullTopic, r.noopValidator)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
subscriptions[2], err = r.p2p.SubscribeToTopic(fullTopic)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
r.reValidateSubscriptions(subscriptions, []uint64{2}, defaultTopic, digest)
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Failed to unregister topic validator")
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -28,6 +27,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestVerifyIndexInCommittee_CanVerify(t *testing.T) {
|
||||
@@ -37,9 +38,7 @@ func TestVerifyIndexInCommittee_CanVerify(t *testing.T) {
|
||||
|
||||
validators := uint64(64)
|
||||
s, _ := testutil.DeterministicGenesisState(t, validators)
|
||||
if err := s.SetSlot(params.BeaconConfig().SlotsPerEpoch); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
|
||||
bf := []byte{0xff}
|
||||
att := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
@@ -47,22 +46,13 @@ func TestVerifyIndexInCommittee_CanVerify(t *testing.T) {
|
||||
AggregationBits: bf}
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(s, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
indices := attestationutil.AttestingIndices(att.AggregationBits, committee)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := validateIndexInCommittee(ctx, s, att, indices[0]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, validateIndexInCommittee(ctx, s, att, indices[0]))
|
||||
|
||||
wanted := "validator index 1000 is not within the committee"
|
||||
if err := validateIndexInCommittee(ctx, s, att, 1000); err == nil || !strings.Contains(err.Error(), wanted) {
|
||||
t.Error("Did not receive wanted error")
|
||||
}
|
||||
assert.ErrorContains(t, wanted, validateIndexInCommittee(ctx, s, att, 1000))
|
||||
}
|
||||
|
||||
func TestVerifyIndexInCommittee_ExistsInBeaconCommittee(t *testing.T) {
|
||||
@@ -72,9 +62,7 @@ func TestVerifyIndexInCommittee_ExistsInBeaconCommittee(t *testing.T) {
|
||||
|
||||
validators := uint64(64)
|
||||
s, _ := testutil.DeterministicGenesisState(t, validators)
|
||||
if err := s.SetSlot(params.BeaconConfig().SlotsPerEpoch); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
|
||||
bf := []byte{0xff}
|
||||
att := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
@@ -82,18 +70,12 @@ func TestVerifyIndexInCommittee_ExistsInBeaconCommittee(t *testing.T) {
|
||||
AggregationBits: bf}
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(s, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
if err := validateIndexInCommittee(ctx, s, att, committee[0]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, validateIndexInCommittee(ctx, s, att, committee[0]))
|
||||
|
||||
wanted := "validator index 1000 is not within the committee"
|
||||
if err := validateIndexInCommittee(ctx, s, att, 1000); err == nil || !strings.Contains(err.Error(), wanted) {
|
||||
t.Error("Did not receive wanted error")
|
||||
}
|
||||
assert.ErrorContains(t, wanted, validateIndexInCommittee(ctx, s, att, 1000))
|
||||
}
|
||||
|
||||
func TestVerifySelection_NotAnAggregator(t *testing.T) {
|
||||
@@ -107,9 +89,7 @@ func TestVerifySelection_NotAnAggregator(t *testing.T) {
|
||||
data := ðpb.AttestationData{}
|
||||
|
||||
wanted := "validator is not an aggregator for slot"
|
||||
if err := validateSelection(ctx, beaconState, data, 0, sig.Marshal()); err == nil || !strings.Contains(err.Error(), wanted) {
|
||||
t.Error("Did not receive wanted error")
|
||||
}
|
||||
assert.ErrorContains(t, wanted, validateSelection(ctx, beaconState, data, 0, sig.Marshal()))
|
||||
}
|
||||
|
||||
func TestVerifySelection_BadSignature(t *testing.T) {
|
||||
@@ -121,9 +101,7 @@ func TestVerifySelection_BadSignature(t *testing.T) {
|
||||
data := ðpb.AttestationData{}
|
||||
|
||||
wanted := "could not validate slot signature"
|
||||
if err := validateSelection(ctx, beaconState, data, 0, sig.Marshal()); err == nil || !strings.Contains(err.Error(), wanted) {
|
||||
t.Error("Did not receive wanted error")
|
||||
}
|
||||
assert.ErrorContains(t, wanted, validateSelection(ctx, beaconState, data, 0, sig.Marshal()))
|
||||
}
|
||||
|
||||
func TestVerifySelection_CanVerify(t *testing.T) {
|
||||
@@ -133,18 +111,11 @@ func TestVerifySelection_CanVerify(t *testing.T) {
|
||||
|
||||
data := ðpb.AttestationData{}
|
||||
domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainSelectionProof, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
slotRoot, err := helpers.ComputeSigningRoot(data.Slot, domain)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
sig := privKeys[0].Sign(slotRoot[:])
|
||||
|
||||
if err := validateSelection(ctx, beaconState, data, 0, sig.Marshal()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, validateSelection(ctx, beaconState, data, 0, sig.Marshal()))
|
||||
}
|
||||
|
||||
func TestValidateAggregateAndProof_NoBlock(t *testing.T) {
|
||||
@@ -166,9 +137,7 @@ func TestValidateAggregateAndProof_NoBlock(t *testing.T) {
|
||||
signedAggregateAndProof := ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof}
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
p2p: p,
|
||||
db: db,
|
||||
@@ -181,9 +150,8 @@ func TestValidateAggregateAndProof_NoBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, signedAggregateAndProof); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, signedAggregateAndProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
@@ -207,17 +175,11 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) {
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, validators)
|
||||
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(context.Background(), b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlock(context.Background(), b))
|
||||
root, err := stateutil.BlockRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
s := testutil.NewBeaconState()
|
||||
if err := db.SaveState(context.Background(), s, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveState(context.Background(), s, root))
|
||||
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
@@ -236,14 +198,10 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) {
|
||||
}
|
||||
signedAggregateAndProof := ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof}
|
||||
|
||||
if err := beaconState.SetGenesisTime(uint64(time.Now().Unix())); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix())))
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
p2p: p,
|
||||
db: db,
|
||||
@@ -256,9 +214,8 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, signedAggregateAndProof); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, signedAggregateAndProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
@@ -276,9 +233,8 @@ func TestValidateAggregateAndProof_NotWithinSlotRange(t *testing.T) {
|
||||
att.Data.Slot = 1<<32 - 1
|
||||
|
||||
buf = new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, signedAggregateAndProof); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, signedAggregateAndProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg = &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
@@ -301,13 +257,9 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) {
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, validators)
|
||||
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(context.Background(), b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlock(context.Background(), b))
|
||||
root, err := stateutil.BlockRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
@@ -326,13 +278,9 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) {
|
||||
}
|
||||
signedAggregateAndProof := ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof}
|
||||
|
||||
if err := beaconState.SetGenesisTime(uint64(time.Now().Unix())); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix())))
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
attPool: attestations.NewPool(),
|
||||
p2p: p,
|
||||
@@ -345,9 +293,8 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, signedAggregateAndProof); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, signedAggregateAndProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
@@ -358,9 +305,7 @@ func TestValidateAggregateAndProof_ExistedInPool(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if err := r.attPool.SaveBlockAttestation(att); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, r.attPool.SaveBlockAttestation(att))
|
||||
if r.validateAggregateAndProof(context.Background(), "", msg) == pubsub.ValidationAccept {
|
||||
t.Error("Expected validate to fail")
|
||||
}
|
||||
@@ -377,17 +322,11 @@ func TestValidateAggregateAndProofWithNewStateMgmt_CanValidate(t *testing.T) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, validators)
|
||||
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(context.Background(), b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlock(context.Background(), b))
|
||||
root, err := stateutil.BlockRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
s := testutil.NewBeaconState()
|
||||
if err := db.SaveState(context.Background(), s, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveState(context.Background(), s, root))
|
||||
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
@@ -401,21 +340,13 @@ func TestValidateAggregateAndProofWithNewStateMgmt_CanValidate(t *testing.T) {
|
||||
}
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
attestingIndices := attestationutil.AttestingIndices(att.AggregationBits, committee)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
attesterDomain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
hashTreeRoot, err := helpers.ComputeSigningRoot(att.Data, attesterDomain)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sig := privKeys[indice].Sign(hashTreeRoot[:])
|
||||
@@ -424,13 +355,9 @@ func TestValidateAggregateAndProofWithNewStateMgmt_CanValidate(t *testing.T) {
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()[:]
|
||||
|
||||
selectionDomain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainSelectionProof, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
slotRoot, err := helpers.ComputeSigningRoot(att.Data.Slot, selectionDomain)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
sig := privKeys[22].Sign(slotRoot[:])
|
||||
aggregateAndProof := ðpb.AggregateAttestationAndProof{
|
||||
@@ -441,23 +368,15 @@ func TestValidateAggregateAndProofWithNewStateMgmt_CanValidate(t *testing.T) {
|
||||
signedAggregateAndProof := ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof}
|
||||
|
||||
attesterDomain, err = helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainAggregateAndProof, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := helpers.ComputeSigningRoot(signedAggregateAndProof.Message, attesterDomain)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
aggreSig := privKeys[22].Sign(signingRoot[:]).Marshal()
|
||||
signedAggregateAndProof.Signature = aggreSig[:]
|
||||
|
||||
if err := beaconState.SetGenesisTime(uint64(time.Now().Unix())); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix())))
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
p2p: p,
|
||||
db: db,
|
||||
@@ -474,9 +393,8 @@ func TestValidateAggregateAndProofWithNewStateMgmt_CanValidate(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, signedAggregateAndProof); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, signedAggregateAndProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
@@ -487,13 +405,8 @@ func TestValidateAggregateAndProofWithNewStateMgmt_CanValidate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if r.validateAggregateAndProof(context.Background(), "", msg) != pubsub.ValidationAccept {
|
||||
t.Fatal("Validated status is false")
|
||||
}
|
||||
|
||||
if msg.ValidatorData == nil {
|
||||
t.Error("Did not set validator data")
|
||||
}
|
||||
assert.Equal(t, pubsub.ValidationAccept, r.validateAggregateAndProof(context.Background(), "", msg), "Validated status is false")
|
||||
assert.NotNil(t, msg.ValidatorData, "Did not set validator data")
|
||||
}
|
||||
|
||||
func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) {
|
||||
@@ -504,17 +417,11 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, validators)
|
||||
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(context.Background(), b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlock(context.Background(), b))
|
||||
root, err := stateutil.BlockRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
s := testutil.NewBeaconState()
|
||||
if err := db.SaveState(context.Background(), s, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveState(context.Background(), s, root))
|
||||
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
@@ -528,18 +435,12 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) {
|
||||
}
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
attestingIndices := attestationutil.AttestingIndices(att.AggregationBits, committee)
|
||||
attesterDomain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot, err := helpers.ComputeSigningRoot(att.Data, attesterDomain)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sig := privKeys[indice].Sign(hashTreeRoot[:])
|
||||
@@ -548,13 +449,9 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) {
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()[:]
|
||||
|
||||
selectionDomain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainSelectionProof, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
slotRoot, err := helpers.ComputeSigningRoot(att.Data.Slot, selectionDomain)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
sig := privKeys[22].Sign(slotRoot[:])
|
||||
aggregateAndProof := ðpb.AggregateAttestationAndProof{
|
||||
@@ -565,24 +462,16 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) {
|
||||
signedAggregateAndProof := ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof}
|
||||
|
||||
attesterDomain, err = helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainAggregateAndProof, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := helpers.ComputeSigningRoot(signedAggregateAndProof.Message, attesterDomain)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
aggreSig := privKeys[22].Sign(signingRoot[:]).Marshal()
|
||||
signedAggregateAndProof.Signature = aggreSig[:]
|
||||
|
||||
if err := beaconState.SetGenesisTime(uint64(time.Now().Unix())); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix())))
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
p2p: p,
|
||||
db: db,
|
||||
@@ -601,9 +490,8 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, signedAggregateAndProof); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, signedAggregateAndProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
@@ -614,16 +502,13 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if r.validateAggregateAndProof(context.Background(), "", msg) != pubsub.ValidationAccept {
|
||||
t.Fatal("Validated status is false")
|
||||
}
|
||||
require.Equal(t, pubsub.ValidationAccept, r.validateAggregateAndProof(context.Background(), "", msg), "Validated status is false")
|
||||
|
||||
// Should fail with another attestation in the same epoch.
|
||||
signedAggregateAndProof.Message.Aggregate.Data.Slot++
|
||||
buf = new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, signedAggregateAndProof); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, signedAggregateAndProof)
|
||||
require.NoError(t, err)
|
||||
msg = &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
|
||||
@@ -21,6 +21,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func setupValidAttesterSlashing(t *testing.T) (*ethpb.AttesterSlashing, *stateTrie.BeaconState) {
|
||||
@@ -29,9 +31,7 @@ func setupValidAttesterSlashing(t *testing.T) (*ethpb.AttesterSlashing, *stateTr
|
||||
for _, vv := range vals {
|
||||
vv.WithdrawableEpoch = 1 * params.BeaconConfig().SlotsPerEpoch
|
||||
}
|
||||
if err := state.SetValidators(vals); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, state.SetValidators(vals))
|
||||
|
||||
att1 := ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
@@ -41,13 +41,9 @@ func setupValidAttesterSlashing(t *testing.T) (*ethpb.AttesterSlashing, *stateTr
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
}
|
||||
domain, err := helpers.Domain(state.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, state.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot, err := helpers.ComputeSigningRoot(att1.Data, domain)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
sig0 := privKeys[0].Sign(hashTreeRoot[:])
|
||||
sig1 := privKeys[1].Sign(hashTreeRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
@@ -61,9 +57,7 @@ func setupValidAttesterSlashing(t *testing.T) (*ethpb.AttesterSlashing, *stateTr
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
}
|
||||
hashTreeRoot, err = helpers.ComputeSigningRoot(att2.Data, domain)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
sig0 = privKeys[0].Sign(hashTreeRoot[:])
|
||||
sig1 = privKeys[1].Sign(hashTreeRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
@@ -75,14 +69,11 @@ func setupValidAttesterSlashing(t *testing.T) (*ethpb.AttesterSlashing, *stateTr
|
||||
}
|
||||
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
if err := state.SetSlot(currentSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, state.SetSlot(currentSlot))
|
||||
|
||||
b := make([]byte, 32)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = rand.Read(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
return slashing, state
|
||||
}
|
||||
@@ -94,9 +85,7 @@ func TestValidateAttesterSlashing_ValidSlashing(t *testing.T) {
|
||||
slashing, s := setupValidAttesterSlashing(t)
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
p2p: p,
|
||||
chain: &mock.ChainService{State: s},
|
||||
@@ -105,9 +94,8 @@ func TestValidateAttesterSlashing_ValidSlashing(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, slashing); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, slashing)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
@@ -119,13 +107,8 @@ func TestValidateAttesterSlashing_ValidSlashing(t *testing.T) {
|
||||
}
|
||||
valid := r.validateAttesterSlashing(ctx, "foobar", msg) == pubsub.ValidationAccept
|
||||
|
||||
if !valid {
|
||||
t.Error("Failed Validation")
|
||||
}
|
||||
|
||||
if msg.ValidatorData == nil {
|
||||
t.Error("Decoded message was not set on the message validator data")
|
||||
}
|
||||
assert.Equal(t, true, valid, "Failed Validation")
|
||||
assert.NotNil(t, msg.ValidatorData, "Decoded message was not set on the message validator data")
|
||||
}
|
||||
|
||||
func TestValidateAttesterSlashing_ContextTimeout(t *testing.T) {
|
||||
@@ -138,9 +121,7 @@ func TestValidateAttesterSlashing_ContextTimeout(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
p2p: p,
|
||||
chain: &mock.ChainService{State: state},
|
||||
@@ -149,9 +130,8 @@ func TestValidateAttesterSlashing_ContextTimeout(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, slashing); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, slashing)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
@@ -162,10 +142,7 @@ func TestValidateAttesterSlashing_ContextTimeout(t *testing.T) {
|
||||
},
|
||||
}
|
||||
valid := r.validateAttesterSlashing(ctx, "", msg) == pubsub.ValidationAccept
|
||||
|
||||
if valid {
|
||||
t.Error("slashing from the far distant future should have timed out and returned false")
|
||||
}
|
||||
assert.Equal(t, false, valid, "slashing from the far distant future should have timed out and returned false")
|
||||
}
|
||||
|
||||
func TestValidateAttesterSlashing_Syncing(t *testing.T) {
|
||||
@@ -181,9 +158,8 @@ func TestValidateAttesterSlashing_Syncing(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, slashing); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err := p.Encoding().EncodeGossip(buf, slashing)
|
||||
require.NoError(t, err)
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -193,7 +169,5 @@ func TestValidateAttesterSlashing_Syncing(t *testing.T) {
|
||||
},
|
||||
}
|
||||
valid := r.validateAttesterSlashing(ctx, "", msg) == pubsub.ValidationAccept
|
||||
if valid {
|
||||
t.Error("Passed validation")
|
||||
}
|
||||
assert.Equal(t, false, valid, "Passed validation")
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
@@ -38,9 +39,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
}
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
s := &Service{
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
p2p: p,
|
||||
@@ -51,32 +50,22 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
stateSummaryCache: cache.NewStateSummaryCache(),
|
||||
}
|
||||
digest, err := s.forkDigest()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 1,
|
||||
},
|
||||
}
|
||||
if err := db.SaveBlock(ctx, blk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
|
||||
validBlockRoot, err := stateutil.BlockRoot(blk.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
validators := uint64(64)
|
||||
savedState, keys := testutil.DeterministicGenesisState(t, validators)
|
||||
if err := savedState.SetSlot(1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(context.Background(), savedState, validBlockRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, savedState.SetSlot(1))
|
||||
require.NoError(t, db.SaveState(context.Background(), savedState, validBlockRoot))
|
||||
chain.State = savedState
|
||||
|
||||
tests := []struct {
|
||||
@@ -186,17 +175,11 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
chain.ValidAttestation = tt.validAttestationSignature
|
||||
if tt.validAttestationSignature {
|
||||
com, err := helpers.BeaconCommitteeFromState(savedState, tt.msg.Data.Slot, tt.msg.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
domain, err := helpers.Domain(savedState.Fork(), tt.msg.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester, savedState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
attRoot, err := helpers.ComputeSigningRoot(tt.msg.Data, domain)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
for i := 0; ; i++ {
|
||||
if tt.msg.AggregationBits.BitAt(uint64(i)) {
|
||||
tt.msg.Signature = keys[com[i]].Sign(attRoot[:]).Marshal()
|
||||
@@ -206,9 +189,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, tt.msg)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
|
||||
@@ -28,6 +28,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -49,9 +51,7 @@ func TestValidateBeaconBlockPubSub_InvalidSignature(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
chainService := &mock.ChainService{Genesis: time.Now(),
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
@@ -66,9 +66,8 @@ func TestValidateBeaconBlockPubSub_InvalidSignature(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -78,10 +77,7 @@ func TestValidateBeaconBlockPubSub_InvalidSignature(t *testing.T) {
|
||||
},
|
||||
}
|
||||
result := r.validateBeaconBlockPubSub(ctx, "", m) == pubsub.ValidationAccept
|
||||
|
||||
if result {
|
||||
t.Error("Expected false result, got true")
|
||||
}
|
||||
assert.Equal(t, false, result)
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_BlockAlreadyPresentInDB(t *testing.T) {
|
||||
@@ -95,14 +91,10 @@ func TestValidateBeaconBlockPubSub_BlockAlreadyPresentInDB(t *testing.T) {
|
||||
ParentRoot: testutil.Random32Bytes(t),
|
||||
},
|
||||
}
|
||||
if err := db.SaveBlock(context.Background(), msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlock(context.Background(), msg))
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
chainService := &mock.ChainService{Genesis: time.Now()}
|
||||
r := &Service{
|
||||
db: db,
|
||||
@@ -115,9 +107,8 @@ func TestValidateBeaconBlockPubSub_BlockAlreadyPresentInDB(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
@@ -128,9 +119,7 @@ func TestValidateBeaconBlockPubSub_BlockAlreadyPresentInDB(t *testing.T) {
|
||||
},
|
||||
}
|
||||
result := r.validateBeaconBlockPubSub(ctx, "", m) == pubsub.ValidationAccept
|
||||
if result {
|
||||
t.Error("Expected false result, got true")
|
||||
}
|
||||
assert.Equal(t, false, result)
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_ValidProposerSignature(t *testing.T) {
|
||||
@@ -144,26 +133,15 @@ func TestValidateBeaconBlockPubSub_ValidProposerSignature(t *testing.T) {
|
||||
Slot: 0,
|
||||
},
|
||||
}
|
||||
if err := db.SaveBlock(ctx, parentBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlock(ctx, parentBlock))
|
||||
bRoot, err := stateutil.BlockRoot(parentBlock.Block)
|
||||
if err := db.SaveState(ctx, beaconState, bRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveStateSummary(ctx, &pb.StateSummary{
|
||||
Root: bRoot[:],
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, &pb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
if err := copied.SetSlot(1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(copied)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
msg := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
ProposerIndex: proposerIdx,
|
||||
@@ -173,20 +151,14 @@ func TestValidateBeaconBlockPubSub_ValidProposerSignature(t *testing.T) {
|
||||
}
|
||||
|
||||
domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := helpers.ComputeSigningRoot(msg.Block, domain)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
blockSig := privKeys[proposerIdx].Sign(signingRoot[:]).Marshal()
|
||||
msg.Signature = blockSig[:]
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
stateGen := stategen.New(db, stateSummaryCache)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
|
||||
State: beaconState,
|
||||
@@ -206,9 +178,8 @@ func TestValidateBeaconBlockPubSub_ValidProposerSignature(t *testing.T) {
|
||||
stateGen: stateGen,
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -218,13 +189,8 @@ func TestValidateBeaconBlockPubSub_ValidProposerSignature(t *testing.T) {
|
||||
},
|
||||
}
|
||||
result := r.validateBeaconBlockPubSub(ctx, "", m) == pubsub.ValidationAccept
|
||||
if !result {
|
||||
t.Error("Expected true result, got false")
|
||||
}
|
||||
|
||||
if m.ValidatorData == nil {
|
||||
t.Error("Decoded message was not set on the message validator data")
|
||||
}
|
||||
assert.Equal(t, true, result)
|
||||
assert.NotNil(t, m.ValidatorData, "Decoded message was not set on the message validator data")
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_AdvanceEpochsForState(t *testing.T) {
|
||||
@@ -238,29 +204,18 @@ func TestValidateBeaconBlockPubSub_AdvanceEpochsForState(t *testing.T) {
|
||||
Slot: 0,
|
||||
},
|
||||
}
|
||||
if err := db.SaveBlock(ctx, parentBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlock(ctx, parentBlock))
|
||||
bRoot, err := stateutil.BlockRoot(parentBlock.Block)
|
||||
if err := db.SaveState(ctx, beaconState, bRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveStateSummary(ctx, &pb.StateSummary{
|
||||
Root: bRoot[:],
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, &pb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
// The next block is at least 2 epochs ahead to induce shuffling and a new seed.
|
||||
blkSlot := params.BeaconConfig().SlotsPerEpoch * 2
|
||||
copied, err = state.ProcessSlots(context.Background(), copied, blkSlot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(copied)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
msg := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
ProposerIndex: proposerIdx,
|
||||
@@ -270,20 +225,14 @@ func TestValidateBeaconBlockPubSub_AdvanceEpochsForState(t *testing.T) {
|
||||
}
|
||||
|
||||
domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := helpers.ComputeSigningRoot(msg.Block, domain)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
blockSig := privKeys[proposerIdx].Sign(signingRoot[:]).Marshal()
|
||||
msg.Signature = blockSig[:]
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
stateGen := stategen.New(db, stateSummaryCache)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(blkSlot*params.BeaconConfig().SecondsPerSlot), 0),
|
||||
State: beaconState,
|
||||
@@ -303,9 +252,8 @@ func TestValidateBeaconBlockPubSub_AdvanceEpochsForState(t *testing.T) {
|
||||
stateGen: stateGen,
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -315,13 +263,8 @@ func TestValidateBeaconBlockPubSub_AdvanceEpochsForState(t *testing.T) {
|
||||
},
|
||||
}
|
||||
result := r.validateBeaconBlockPubSub(ctx, "", m) == pubsub.ValidationAccept
|
||||
if !result {
|
||||
t.Error("Expected true result, got false")
|
||||
}
|
||||
|
||||
if m.ValidatorData == nil {
|
||||
t.Error("Decoded message was not set on the message validator data")
|
||||
}
|
||||
assert.Equal(t, true, result)
|
||||
assert.NotNil(t, m.ValidatorData, "Decoded message was not set on the message validator data")
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_Syncing(t *testing.T) {
|
||||
@@ -331,9 +274,7 @@ func TestValidateBeaconBlockPubSub_Syncing(t *testing.T) {
|
||||
b := []byte("sk")
|
||||
b32 := bytesutil.ToBytes32(b)
|
||||
sk, err := bls.SecretKeyFromBytes(b32[:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
msg := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
ParentRoot: testutil.Random32Bytes(t),
|
||||
@@ -354,9 +295,8 @@ func TestValidateBeaconBlockPubSub_Syncing(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -366,9 +306,7 @@ func TestValidateBeaconBlockPubSub_Syncing(t *testing.T) {
|
||||
},
|
||||
}
|
||||
result := r.validateBeaconBlockPubSub(ctx, "", m) == pubsub.ValidationAccept
|
||||
if result {
|
||||
t.Error("Expected false result, got true")
|
||||
}
|
||||
assert.Equal(t, false, result)
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_RejectBlocksFromFuture(t *testing.T) {
|
||||
@@ -378,9 +316,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromFuture(t *testing.T) {
|
||||
b := []byte("sk")
|
||||
b32 := bytesutil.ToBytes32(b)
|
||||
sk, err := bls.SecretKeyFromBytes(b32[:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
msg := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
ParentRoot: testutil.Random32Bytes(t),
|
||||
@@ -390,9 +326,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromFuture(t *testing.T) {
|
||||
}
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
chainService := &mock.ChainService{Genesis: time.Now()}
|
||||
r := &Service{
|
||||
p2p: p,
|
||||
@@ -406,9 +340,8 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromFuture(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -418,9 +351,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromFuture(t *testing.T) {
|
||||
},
|
||||
}
|
||||
result := r.validateBeaconBlockPubSub(ctx, "", m) == pubsub.ValidationAccept
|
||||
if result {
|
||||
t.Error("Expected false result, got true")
|
||||
}
|
||||
assert.Equal(t, false, result)
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_RejectBlocksFromThePast(t *testing.T) {
|
||||
@@ -430,9 +361,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromThePast(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
sk, err := bls.SecretKeyFromBytes(b32[:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
msg := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
ParentRoot: testutil.Random32Bytes(t),
|
||||
@@ -443,9 +372,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromThePast(t *testing.T) {
|
||||
|
||||
genesisTime := time.Now()
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
chainService := &mock.ChainService{
|
||||
Genesis: time.Unix(genesisTime.Unix()-1000, 0),
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
@@ -461,9 +388,8 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromThePast(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -473,10 +399,7 @@ func TestValidateBeaconBlockPubSub_RejectBlocksFromThePast(t *testing.T) {
|
||||
},
|
||||
}
|
||||
result := r.validateBeaconBlockPubSub(ctx, "", m) == pubsub.ValidationAccept
|
||||
|
||||
if result {
|
||||
t.Error("Expected false result, got true")
|
||||
}
|
||||
assert.Equal(t, false, result)
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) {
|
||||
@@ -490,20 +413,12 @@ func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) {
|
||||
Slot: 0,
|
||||
},
|
||||
}
|
||||
if err := db.SaveBlock(ctx, parentBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlock(ctx, parentBlock))
|
||||
bRoot, err := stateutil.BlockRoot(parentBlock.Block)
|
||||
if err := db.SaveState(ctx, beaconState, bRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(beaconState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
@@ -514,20 +429,14 @@ func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) {
|
||||
}
|
||||
|
||||
domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := helpers.ComputeSigningRoot(msg.Block, domain)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
blockSig := privKeys[proposerIdx].Sign(signingRoot[:]).Marshal()
|
||||
msg.Signature = blockSig[:]
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
@@ -546,9 +455,8 @@ func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -560,9 +468,7 @@ func TestValidateBeaconBlockPubSub_SeenProposerSlot(t *testing.T) {
|
||||
r.setSeenBlockIndexSlot(msg.Block.Slot, msg.Block.ProposerIndex)
|
||||
time.Sleep(10 * time.Millisecond) // Wait for cached value to pass through buffers.
|
||||
result := r.validateBeaconBlockPubSub(ctx, "", m) == pubsub.ValidationAccept
|
||||
if result {
|
||||
t.Error("Expected false result, got true")
|
||||
}
|
||||
assert.Equal(t, false, result)
|
||||
}
|
||||
|
||||
func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) {
|
||||
@@ -571,21 +477,15 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
|
||||
parent := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(context.Background(), parent); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlock(context.Background(), parent))
|
||||
parentRoot, err := stateutil.BlockRoot(parent.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
chain := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
}}
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
db: db,
|
||||
p2p: p,
|
||||
@@ -600,9 +500,8 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) {
|
||||
Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: parentRoot[:], Body: ðpb.BeaconBlockBody{}},
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, b)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -618,9 +517,8 @@ func TestValidateBeaconBlockPubSub_FilterByFinalizedEpoch(t *testing.T) {
|
||||
hook.Reset()
|
||||
b.Block.Slot = params.BeaconConfig().SlotsPerEpoch
|
||||
buf = new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, b)
|
||||
require.NoError(t, err)
|
||||
m = &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -646,26 +544,15 @@ func TestValidateBeaconBlockPubSub_ParentNotFinalizedDescendant(t *testing.T) {
|
||||
Slot: 0,
|
||||
},
|
||||
}
|
||||
if err := db.SaveBlock(ctx, parentBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, db.SaveBlock(ctx, parentBlock))
|
||||
bRoot, err := stateutil.BlockRoot(parentBlock.Block)
|
||||
if err := db.SaveState(ctx, beaconState, bRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveStateSummary(ctx, &pb.StateSummary{
|
||||
Root: bRoot[:],
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, &pb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
if err := copied.SetSlot(1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(copied)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
msg := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
ProposerIndex: proposerIdx,
|
||||
@@ -675,20 +562,14 @@ func TestValidateBeaconBlockPubSub_ParentNotFinalizedDescendant(t *testing.T) {
|
||||
}
|
||||
|
||||
domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := helpers.ComputeSigningRoot(msg.Block, domain)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
blockSig := privKeys[proposerIdx].Sign(signingRoot[:]).Marshal()
|
||||
msg.Signature = blockSig[:]
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
stateGen := stategen.New(db, stateSummaryCache)
|
||||
chainService := &mock.ChainService{Genesis: time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0),
|
||||
State: beaconState,
|
||||
@@ -710,9 +591,8 @@ func TestValidateBeaconBlockPubSub_ParentNotFinalizedDescendant(t *testing.T) {
|
||||
stateGen: stateGen,
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -721,8 +601,6 @@ func TestValidateBeaconBlockPubSub_ParentNotFinalizedDescendant(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
if res := r.validateBeaconBlockPubSub(ctx, "", m); res != pubsub.ValidationReject {
|
||||
t.Error("Wrong validation result returned")
|
||||
}
|
||||
assert.Equal(t, pubsub.ValidationReject, r.validateBeaconBlockPubSub(ctx, "", m), "Wrong validation result returned")
|
||||
testutil.AssertLogsContain(t, hook, "not part of finalized chain")
|
||||
}
|
||||
|
||||
@@ -22,6 +22,8 @@ import (
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func setupValidProposerSlashing(t *testing.T) (*ethpb.ProposerSlashing, *stateTrie.BeaconState) {
|
||||
@@ -57,9 +59,7 @@ func setupValidProposerSlashing(t *testing.T) (*ethpb.ProposerSlashing, *stateTr
|
||||
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
domain, err := helpers.Domain(
|
||||
state.Fork(),
|
||||
@@ -67,9 +67,7 @@ func setupValidProposerSlashing(t *testing.T) (*ethpb.ProposerSlashing, *stateTr
|
||||
params.BeaconConfig().DomainBeaconProposer,
|
||||
state.GenesisValidatorRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
privKey := bls.RandKey()
|
||||
|
||||
someRoot := [32]byte{1, 2, 3}
|
||||
@@ -84,9 +82,7 @@ func setupValidProposerSlashing(t *testing.T) (*ethpb.ProposerSlashing, *stateTr
|
||||
},
|
||||
}
|
||||
signingRoot, err := helpers.ComputeSigningRoot(header1.Header, domain)
|
||||
if err != nil {
|
||||
t.Errorf("Could not get signing root of beacon block header: %v", err)
|
||||
}
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
header1.Signature = privKey.Sign(signingRoot[:]).Marshal()[:]
|
||||
|
||||
header2 := ðpb.SignedBeaconBlockHeader{
|
||||
@@ -99,9 +95,7 @@ func setupValidProposerSlashing(t *testing.T) (*ethpb.ProposerSlashing, *stateTr
|
||||
},
|
||||
}
|
||||
signingRoot, err = helpers.ComputeSigningRoot(header2.Header, domain)
|
||||
if err != nil {
|
||||
t.Errorf("Could not get signing root of beacon block header: %v", err)
|
||||
}
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
header2.Signature = privKey.Sign(signingRoot[:]).Marshal()[:]
|
||||
|
||||
slashing := ðpb.ProposerSlashing{
|
||||
@@ -109,18 +103,13 @@ func setupValidProposerSlashing(t *testing.T) (*ethpb.ProposerSlashing, *stateTr
|
||||
Header_2: header2,
|
||||
}
|
||||
val, err := state.ValidatorAtIndex(1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
val.PublicKey = privKey.PublicKey().Marshal()[:]
|
||||
if err := state.UpdateValidatorAtIndex(1, val); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, state.UpdateValidatorAtIndex(1, val))
|
||||
|
||||
b := make([]byte, 32)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = rand.Read(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
return slashing, state
|
||||
}
|
||||
@@ -132,9 +121,7 @@ func TestValidateProposerSlashing_ValidSlashing(t *testing.T) {
|
||||
slashing, s := setupValidProposerSlashing(t)
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
p2p: p,
|
||||
chain: &mock.ChainService{State: s},
|
||||
@@ -143,9 +130,8 @@ func TestValidateProposerSlashing_ValidSlashing(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, slashing); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, slashing)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -156,13 +142,8 @@ func TestValidateProposerSlashing_ValidSlashing(t *testing.T) {
|
||||
}
|
||||
|
||||
valid := r.validateProposerSlashing(ctx, "", m) == pubsub.ValidationAccept
|
||||
if !valid {
|
||||
t.Error("Failed validation")
|
||||
}
|
||||
|
||||
if m.ValidatorData == nil {
|
||||
t.Error("Decoded message was not set on the message validator data")
|
||||
}
|
||||
assert.Equal(t, true, valid, "Failed validation")
|
||||
assert.NotNil(t, m.ValidatorData, "Decoded message was not set on the message validator data")
|
||||
}
|
||||
|
||||
func TestValidateProposerSlashing_ContextTimeout(t *testing.T) {
|
||||
@@ -171,20 +152,14 @@ func TestValidateProposerSlashing_ContextTimeout(t *testing.T) {
|
||||
slashing, state := setupValidProposerSlashing(t)
|
||||
slashing.Header_1.Header.Slot = 100000000
|
||||
err := state.SetJustificationBits(bitfield.Bitvector4{0x0F}) // 0b1111
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = state.SetPreviousJustifiedCheckpoint(ðpb.Checkpoint{Epoch: 0, Root: []byte{}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
p2p: p,
|
||||
chain: &mock.ChainService{State: state},
|
||||
@@ -193,9 +168,8 @@ func TestValidateProposerSlashing_ContextTimeout(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, slashing); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, slashing)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -205,9 +179,7 @@ func TestValidateProposerSlashing_ContextTimeout(t *testing.T) {
|
||||
},
|
||||
}
|
||||
valid := r.validateProposerSlashing(ctx, "", m) == pubsub.ValidationAccept
|
||||
if valid {
|
||||
t.Error("slashing from the far distant future should have timed out and returned false")
|
||||
}
|
||||
assert.Equal(t, false, valid, "Slashing from the far distant future should have timed out and returned false")
|
||||
}
|
||||
|
||||
func TestValidateProposerSlashing_Syncing(t *testing.T) {
|
||||
@@ -223,9 +195,8 @@ func TestValidateProposerSlashing_Syncing(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, slashing); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err := p.Encoding().EncodeGossip(buf, slashing)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -235,7 +206,5 @@ func TestValidateProposerSlashing_Syncing(t *testing.T) {
|
||||
},
|
||||
}
|
||||
valid := r.validateProposerSlashing(ctx, "", m) == pubsub.ValidationAccept
|
||||
if valid {
|
||||
t.Error("Did not fail validation")
|
||||
}
|
||||
assert.Equal(t, false, valid, "Did not fail validation")
|
||||
}
|
||||
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func setupValidExit(t *testing.T) (*ethpb.SignedVoluntaryExit, *stateTrie.BeaconState) {
|
||||
@@ -43,40 +45,26 @@ func setupValidExit(t *testing.T) (*ethpb.SignedVoluntaryExit, *stateTrie.Beacon
|
||||
},
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := state.SetSlot(
|
||||
state.Slot() + (params.BeaconConfig().ShardCommitteePeriod * params.BeaconConfig().SlotsPerEpoch),
|
||||
); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = state.SetSlot(state.Slot() + (params.BeaconConfig().ShardCommitteePeriod * params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, err)
|
||||
domain, err := helpers.Domain(state.Fork(), helpers.CurrentEpoch(state), params.BeaconConfig().DomainVoluntaryExit, state.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := helpers.ComputeSigningRoot(exit.Exit, domain)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
priv := bls.RandKey()
|
||||
|
||||
sig := priv.Sign(signingRoot[:])
|
||||
exit.Signature = sig.Marshal()
|
||||
|
||||
val, err := state.ValidatorAtIndex(0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
val.PublicKey = priv.PublicKey().Marshal()[:]
|
||||
if err := state.UpdateValidatorAtIndex(0, val); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, state.UpdateValidatorAtIndex(0, val))
|
||||
|
||||
b := make([]byte, 32)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = rand.Read(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
return exit, state
|
||||
}
|
||||
@@ -88,9 +76,7 @@ func TestValidateVoluntaryExit_ValidExit(t *testing.T) {
|
||||
exit, s := setupValidExit(t)
|
||||
|
||||
c, err := lru.New(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
p2p: p,
|
||||
chain: &mock.ChainService{
|
||||
@@ -101,9 +87,8 @@ func TestValidateVoluntaryExit_ValidExit(t *testing.T) {
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, exit); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = p.Encoding().EncodeGossip(buf, exit)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -113,13 +98,8 @@ func TestValidateVoluntaryExit_ValidExit(t *testing.T) {
|
||||
},
|
||||
}
|
||||
valid := r.validateVoluntaryExit(ctx, "", m) == pubsub.ValidationAccept
|
||||
if !valid {
|
||||
t.Error("Failed validation")
|
||||
}
|
||||
|
||||
if m.ValidatorData == nil {
|
||||
t.Error("Decoded message was not set on the message validator data")
|
||||
}
|
||||
assert.Equal(t, true, valid, "Failed validation")
|
||||
assert.NotNil(t, m.ValidatorData, "Decoded message was not set on the message validator data")
|
||||
}
|
||||
|
||||
func TestValidateVoluntaryExit_ValidExit_Syncing(t *testing.T) {
|
||||
@@ -136,9 +116,8 @@ func TestValidateVoluntaryExit_ValidExit_Syncing(t *testing.T) {
|
||||
initialSync: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := p.Encoding().EncodeGossip(buf, exit); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err := p.Encoding().EncodeGossip(buf, exit)
|
||||
require.NoError(t, err)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -148,7 +127,5 @@ func TestValidateVoluntaryExit_ValidExit_Syncing(t *testing.T) {
|
||||
},
|
||||
}
|
||||
valid := r.validateVoluntaryExit(ctx, "", m) == pubsub.ValidationAccept
|
||||
if valid {
|
||||
t.Error("Validation should have failed")
|
||||
}
|
||||
assert.Equal(t, false, valid, "Validation should have failed")
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user