mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Compare commits
361 Commits
ba2333069a
...
v2.0.0-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
49dce52ae9 | ||
|
|
0b7f2b3da8 | ||
|
|
7757b49f06 | ||
|
|
24bf5dda2a | ||
|
|
6479fc2679 | ||
|
|
19c67989dd | ||
|
|
f74aea279b | ||
|
|
af2d31be5d | ||
|
|
76f190b9be | ||
|
|
19b1799208 | ||
|
|
f485bb4f3c | ||
|
|
dda40d6979 | ||
|
|
aa8cd78153 | ||
|
|
f46204999f | ||
|
|
27923f9669 | ||
|
|
ac911a0331 | ||
|
|
08d6c8895d | ||
|
|
8da30cea01 | ||
|
|
37d173fa32 | ||
|
|
46bc97dccb | ||
|
|
86ba20f45c | ||
|
|
a54a81dbc7 | ||
|
|
6af3be41a3 | ||
|
|
09a262ce22 | ||
|
|
0b41b3ac30 | ||
|
|
855dbaef88 | ||
|
|
ca3ce419bb | ||
|
|
7fcf97bf7a | ||
|
|
7030eea11b | ||
|
|
bdbf480541 | ||
|
|
c41b1e2486 | ||
|
|
0b7a0582fe | ||
|
|
e68843c28c | ||
|
|
d4538433fa | ||
|
|
5649744f80 | ||
|
|
e9b172a453 | ||
|
|
81d2014be8 | ||
|
|
0c481fd840 | ||
|
|
6ae07cc141 | ||
|
|
147db9aa80 | ||
|
|
119542a949 | ||
|
|
6f53ff5253 | ||
|
|
c3bd437a7b | ||
|
|
86d20605ea | ||
|
|
4db77ce691 | ||
|
|
1919484cae | ||
|
|
89637634f6 | ||
|
|
b870b126f1 | ||
|
|
873335c93f | ||
|
|
8e9976be54 | ||
|
|
aa2f058a8e | ||
|
|
61702e101b | ||
|
|
bcaefd34e7 | ||
|
|
df2cebb4e3 | ||
|
|
8813ed35d4 | ||
|
|
e80ce8be95 | ||
|
|
75a4ad364d | ||
|
|
acdf29ef44 | ||
|
|
1691cea615 | ||
|
|
621aff1b86 | ||
|
|
aad0dd80d1 | ||
|
|
6dc70c53ec | ||
|
|
17f3816976 | ||
|
|
f05ac82c6f | ||
|
|
e78e554d27 | ||
|
|
6c435382dc | ||
|
|
1aa1165428 | ||
|
|
6c086080c5 | ||
|
|
c9dd3faf7a | ||
|
|
86259e76de | ||
|
|
d26f52a7cc | ||
|
|
f3a8397f75 | ||
|
|
5212cc649a | ||
|
|
043850d972 | ||
|
|
e2238bd6d1 | ||
|
|
df291e2ffb | ||
|
|
5ba5b303d3 | ||
|
|
f2ce4dcab3 | ||
|
|
8765c3ac42 | ||
|
|
57fff2d88e | ||
|
|
c010a972e7 | ||
|
|
c02ed805b0 | ||
|
|
93adf4980a | ||
|
|
3fe969992a | ||
|
|
2135108830 | ||
|
|
4c146dc896 | ||
|
|
042a3cda02 | ||
|
|
b8676480f0 | ||
|
|
711022d34e | ||
|
|
eec93be4ed | ||
|
|
21d096622f | ||
|
|
62846d61b8 | ||
|
|
a228a407be | ||
|
|
f527b676da | ||
|
|
5bd4e10dd6 | ||
|
|
d19e13352b | ||
|
|
6bda9a0bf2 | ||
|
|
2da6b7bb97 | ||
|
|
7faed861c4 | ||
|
|
8b9129d84e | ||
|
|
8b219b14da | ||
|
|
5bf9bd3d73 | ||
|
|
59f12c8ac1 | ||
|
|
1094ca0838 | ||
|
|
ebe4b309c0 | ||
|
|
ea94f0e70d | ||
|
|
47443e130d | ||
|
|
4dfa5c2757 | ||
|
|
1851d40f74 | ||
|
|
eee1d47655 | ||
|
|
7ce76652fb | ||
|
|
19e6f0c19a | ||
|
|
6470e2718a | ||
|
|
30cd5c076e | ||
|
|
03d8af5cda | ||
|
|
194f0cb76d | ||
|
|
2a0e8510d4 | ||
|
|
5e35f778b9 | ||
|
|
972ae7f169 | ||
|
|
80fafaddff | ||
|
|
e6ecdfde0d | ||
|
|
1daf51788d | ||
|
|
35055539a7 | ||
|
|
81ab3ca46c | ||
|
|
5895b10678 | ||
|
|
7ea645ed37 | ||
|
|
a900792160 | ||
|
|
cd87bfd8ab | ||
|
|
98477a0286 | ||
|
|
2d1a63d9f4 | ||
|
|
52be270f0a | ||
|
|
895a86fd53 | ||
|
|
af6246a5f9 | ||
|
|
5e80ceeff9 | ||
|
|
ee661971f0 | ||
|
|
cc7e36776d | ||
|
|
14a9d9a1ad | ||
|
|
2b9fb29ed2 | ||
|
|
9300d1026f | ||
|
|
48345eb68e | ||
|
|
60d14f1806 | ||
|
|
d80d4d01a6 | ||
|
|
275192680f | ||
|
|
9ca958064e | ||
|
|
604958da6c | ||
|
|
ade94444f2 | ||
|
|
4df2f4c790 | ||
|
|
3f8f5edb3f | ||
|
|
0ad4e433a5 | ||
|
|
1be2503e82 | ||
|
|
0dd228bb94 | ||
|
|
78450ea557 | ||
|
|
f0e6d4a0bd | ||
|
|
97901c90a5 | ||
|
|
1379dbfc23 | ||
|
|
19dbc7e249 | ||
|
|
76a70065f2 | ||
|
|
51f513b246 | ||
|
|
2b349a1b06 | ||
|
|
a819caca16 | ||
|
|
e7116d4ea8 | ||
|
|
f8cd989161 | ||
|
|
4c19265ac5 | ||
|
|
f361bf781f | ||
|
|
a458e556e0 | ||
|
|
773b259cd5 | ||
|
|
2bb3da1ba3 | ||
|
|
47367d98b4 | ||
|
|
1ff18c07a4 | ||
|
|
279a95deba | ||
|
|
c0bfa6ef79 | ||
|
|
7e961c2be9 | ||
|
|
c7c7f9bf1b | ||
|
|
7ce85cac31 | ||
|
|
2d836f485d | ||
|
|
780253b786 | ||
|
|
710bb98575 | ||
|
|
d5387851d0 | ||
|
|
ab8dd3788f | ||
|
|
bf1b550b7d | ||
|
|
705564108c | ||
|
|
3df82e7540 | ||
|
|
d0a749ce4b | ||
|
|
081c80998c | ||
|
|
8c62f10b74 | ||
|
|
e232b3ce30 | ||
|
|
17153bb4e9 | ||
|
|
329a45c06a | ||
|
|
1c82394a69 | ||
|
|
856081c80c | ||
|
|
6fff327864 | ||
|
|
e2879f8352 | ||
|
|
523fe58f61 | ||
|
|
04a303c8d2 | ||
|
|
0844bd62ea | ||
|
|
816dc47b17 | ||
|
|
caeec851d4 | ||
|
|
062933af35 | ||
|
|
169573c32e | ||
|
|
e7a7b2861e | ||
|
|
fe6c80fe95 | ||
|
|
2f52dfe96e | ||
|
|
93a7b96f16 | ||
|
|
f078b62c3e | ||
|
|
f476c39708 | ||
|
|
c02e507422 | ||
|
|
ece07e5fbb | ||
|
|
6bbe3dbd10 | ||
|
|
18bfc2a34e | ||
|
|
b774af9535 | ||
|
|
719a5fca02 | ||
|
|
b4a0e4375a | ||
|
|
4d276d2fdf | ||
|
|
8797179cfb | ||
|
|
7cc38108aa | ||
|
|
365ced285e | ||
|
|
97e5730fd9 | ||
|
|
3919b49000 | ||
|
|
7f13396e44 | ||
|
|
ae3e5718e6 | ||
|
|
97a49240ba | ||
|
|
232d519445 | ||
|
|
afd815bb5d | ||
|
|
56d383a354 | ||
|
|
733023df03 | ||
|
|
9d9ce13753 | ||
|
|
975e7a76bf | ||
|
|
20ae23bd42 | ||
|
|
63cf429fa0 | ||
|
|
2aab4e2efe | ||
|
|
e2156f25e0 | ||
|
|
a537833f75 | ||
|
|
61bf95e4e2 | ||
|
|
357d3f3b6a | ||
|
|
b0bbfcab7f | ||
|
|
06801a5230 | ||
|
|
278857d576 | ||
|
|
9c5c70fb32 | ||
|
|
54326af141 | ||
|
|
6020682ad1 | ||
|
|
3f45d54986 | ||
|
|
ecb51dc55d | ||
|
|
cbf4aeb859 | ||
|
|
b107bd2a5a | ||
|
|
a5e2c3f551 | ||
|
|
efffaeb359 | ||
|
|
144576cf36 | ||
|
|
fcf2be08d8 | ||
|
|
2b5cd139f0 | ||
|
|
77a4fdb509 | ||
|
|
a14d37b0ad | ||
|
|
38e28af51e | ||
|
|
6dbe6cfd8c | ||
|
|
c156c1fb91 | ||
|
|
393a744091 | ||
|
|
f83993b211 | ||
|
|
41433f8b2e | ||
|
|
95f62de465 | ||
|
|
fbb140eff7 | ||
|
|
22483a285a | ||
|
|
1d835d9859 | ||
|
|
98f8ab331a | ||
|
|
0e88418b12 | ||
|
|
ef3ff6f1d5 | ||
|
|
7c22496c65 | ||
|
|
c5256d09e0 | ||
|
|
d3d1eb833e | ||
|
|
2d9fd4ea29 | ||
|
|
be168e4034 | ||
|
|
f7c2b9c197 | ||
|
|
89b7cf9be3 | ||
|
|
3591f85a66 | ||
|
|
6ba5ad0325 | ||
|
|
76b16a8989 | ||
|
|
74a19741b4 | ||
|
|
fdb68c482e | ||
|
|
b51729bd2f | ||
|
|
aef1269223 | ||
|
|
9fc1683ec7 | ||
|
|
3790c5edb2 | ||
|
|
c6c7f8234d | ||
|
|
c66ea88da8 | ||
|
|
9a8facd76b | ||
|
|
ca7e0e4807 | ||
|
|
6acedb7dfd | ||
|
|
a3183bc33e | ||
|
|
f6caf627e1 | ||
|
|
fa696a883d | ||
|
|
cbbf188637 | ||
|
|
4d3e65bdcd | ||
|
|
0be2bde4cc | ||
|
|
d5662556bc | ||
|
|
26a10ca56e | ||
|
|
bda70352ca | ||
|
|
fbd45dbf50 | ||
|
|
119ef0f8fa | ||
|
|
04f38324ba | ||
|
|
fa27b6e24c | ||
|
|
fe647e99fc | ||
|
|
bbcaa7eaf2 | ||
|
|
1c1b2eb811 | ||
|
|
427e792073 | ||
|
|
463481febe | ||
|
|
6e41923388 | ||
|
|
17798f878a | ||
|
|
d502f0825a | ||
|
|
96fe2b76bf | ||
|
|
a51a4ca9eb | ||
|
|
9dd8a1737c | ||
|
|
c97f74ccef | ||
|
|
806a923974 | ||
|
|
4b4c2b97b7 | ||
|
|
9d22ea840e | ||
|
|
8a507d749a | ||
|
|
2850581611 | ||
|
|
59bc0c679c | ||
|
|
969dec8ad2 | ||
|
|
91fb8eea8c | ||
|
|
e7ebdb11be | ||
|
|
ff3bb0aa8a | ||
|
|
5945849cb4 | ||
|
|
3435a61413 | ||
|
|
a3b69600ef | ||
|
|
01841434ec | ||
|
|
f60edb055c | ||
|
|
ee3d106a36 | ||
|
|
9b41a069eb | ||
|
|
dc1d5b778b | ||
|
|
224b92781f | ||
|
|
6f54a9d057 | ||
|
|
7906e571a8 | ||
|
|
458817d5ad | ||
|
|
06290c6805 | ||
|
|
1adf1f1bef | ||
|
|
af57cf5e96 | ||
|
|
d59ba818f0 | ||
|
|
9aa2dd1ae6 | ||
|
|
f3abe70838 | ||
|
|
fe4a852e78 | ||
|
|
6af0f619c9 | ||
|
|
3d405910e7 | ||
|
|
2779daee32 | ||
|
|
a0ba4a8563 | ||
|
|
926b3725a1 | ||
|
|
5cc9f4df0b | ||
|
|
fd297999b8 | ||
|
|
0d45eeac56 | ||
|
|
e2fcd25039 | ||
|
|
2436d84370 | ||
|
|
5418d8c367 | ||
|
|
55e5dee7ab | ||
|
|
6a06a4bf98 | ||
|
|
a9d981dce1 | ||
|
|
a69947ba51 | ||
|
|
6a32b18ca9 | ||
|
|
9ebf8651b4 | ||
|
|
8467485aec | ||
|
|
fdb6cf9b57 | ||
|
|
3da55ad7a4 | ||
|
|
773d561361 | ||
|
|
7f6d3ccb36 |
0
.bazelignore
Normal file
0
.bazelignore
Normal file
@@ -5,7 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
altair "github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
|
||||
@@ -58,6 +58,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"altair_transition_no_verify_sig_test.go",
|
||||
"benchmarks_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"state_fuzz_test.go",
|
||||
@@ -76,6 +77,7 @@ go_test(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -84,6 +86,7 @@ go_test(
|
||||
"//shared/benchutil:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/copyutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
|
||||
221
beacon-chain/core/state/altair_transition_no_verify_sig_test.go
Normal file
221
beacon-chain/core/state/altair_transition_no_verify_sig_test.go
Normal file
@@ -0,0 +1,221 @@
|
||||
package state_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
p2pType "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/copyutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestExecuteAltairStateTransitionNoVerify_FullProcess(t *testing.T) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 100)
|
||||
|
||||
syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee))
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositCount: 100,
|
||||
DepositRoot: bytesutil.PadTo([]byte{2}, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
}
|
||||
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
|
||||
e := beaconState.Eth1Data()
|
||||
e.DepositCount = 100
|
||||
require.NoError(t, beaconState.SetEth1Data(e))
|
||||
bh := beaconState.LatestBlockHeader()
|
||||
bh.Slot = beaconState.Slot()
|
||||
require.NoError(t, beaconState.SetLatestBlockHeader(bh))
|
||||
require.NoError(t, beaconState.SetEth1DataVotes([]*ethpb.Eth1Data{eth1Data}))
|
||||
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()+1))
|
||||
epoch := helpers.CurrentEpoch(beaconState)
|
||||
randaoReveal, err := testutil.RandaoReveal(beaconState, epoch, privKeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
|
||||
nextSlotState, err := core.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(nextSlotState)
|
||||
require.NoError(t, err)
|
||||
block := testutil.NewBeaconBlockAltair()
|
||||
block.Block.ProposerIndex = proposerIdx
|
||||
block.Block.Slot = beaconState.Slot() + 1
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
block.Block.Body.RandaoReveal = randaoReveal
|
||||
block.Block.Body.Eth1Data = eth1Data
|
||||
|
||||
syncBits := bitfield.NewBitvector512()
|
||||
for i := range syncBits {
|
||||
syncBits[i] = 0xff
|
||||
}
|
||||
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
h := copyutil.CopyBeaconBlockHeader(beaconState.LatestBlockHeader())
|
||||
prevStateRoot, err := beaconState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
h.StateRoot = prevStateRoot[:]
|
||||
pbr, err := h.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
syncSigs := make([]bls.Signature, len(indices))
|
||||
for i, indice := range indices {
|
||||
b := p2pType.SSZBytes(pbr[:])
|
||||
sb, err := helpers.ComputeDomainAndSign(beaconState, helpers.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
syncSigs[i] = sig
|
||||
}
|
||||
aggregatedSig := bls.AggregateSignatures(syncSigs).Marshal()
|
||||
syncAggregate := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: syncBits,
|
||||
SyncCommitteeSignature: aggregatedSig,
|
||||
}
|
||||
block.Block.Body.SyncAggregate = syncAggregate
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := core.CalculateStateRoot(context.Background(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
|
||||
c := beaconState.Copy()
|
||||
sig, err := testutil.BlockSignatureAltair(c, block.Block, privKeys)
|
||||
require.NoError(t, err)
|
||||
block.Signature = sig.Marshal()
|
||||
|
||||
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
set, _, err := core.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, "Could not verify signature set")
|
||||
}
|
||||
|
||||
func TestExecuteAltairStateTransitionNoVerifySignature_CouldNotVerifyStateRoot(t *testing.T) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 100)
|
||||
|
||||
syncCommittee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(syncCommittee))
|
||||
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositCount: 100,
|
||||
DepositRoot: bytesutil.PadTo([]byte{2}, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
}
|
||||
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch-1))
|
||||
e := beaconState.Eth1Data()
|
||||
e.DepositCount = 100
|
||||
require.NoError(t, beaconState.SetEth1Data(e))
|
||||
bh := beaconState.LatestBlockHeader()
|
||||
bh.Slot = beaconState.Slot()
|
||||
require.NoError(t, beaconState.SetLatestBlockHeader(bh))
|
||||
require.NoError(t, beaconState.SetEth1DataVotes([]*ethpb.Eth1Data{eth1Data}))
|
||||
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()+1))
|
||||
epoch := helpers.CurrentEpoch(beaconState)
|
||||
randaoReveal, err := testutil.RandaoReveal(beaconState, epoch, privKeys)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()-1))
|
||||
|
||||
nextSlotState, err := core.ProcessSlots(context.Background(), beaconState.Copy(), beaconState.Slot()+1)
|
||||
require.NoError(t, err)
|
||||
parentRoot, err := nextSlotState.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(nextSlotState)
|
||||
require.NoError(t, err)
|
||||
block := testutil.NewBeaconBlockAltair()
|
||||
block.Block.ProposerIndex = proposerIdx
|
||||
block.Block.Slot = beaconState.Slot() + 1
|
||||
block.Block.ParentRoot = parentRoot[:]
|
||||
block.Block.Body.RandaoReveal = randaoReveal
|
||||
block.Block.Body.Eth1Data = eth1Data
|
||||
|
||||
syncBits := bitfield.NewBitvector512()
|
||||
for i := range syncBits {
|
||||
syncBits[i] = 0xff
|
||||
}
|
||||
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
h := copyutil.CopyBeaconBlockHeader(beaconState.LatestBlockHeader())
|
||||
prevStateRoot, err := beaconState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
h.StateRoot = prevStateRoot[:]
|
||||
pbr, err := h.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
syncSigs := make([]bls.Signature, len(indices))
|
||||
for i, indice := range indices {
|
||||
b := p2pType.SSZBytes(pbr[:])
|
||||
sb, err := helpers.ComputeDomainAndSign(beaconState, helpers.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
syncSigs[i] = sig
|
||||
}
|
||||
aggregatedSig := bls.AggregateSignatures(syncSigs).Marshal()
|
||||
syncAggregate := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: syncBits,
|
||||
SyncCommitteeSignature: aggregatedSig,
|
||||
}
|
||||
block.Block.Body.SyncAggregate = syncAggregate
|
||||
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
stateRoot, err := core.CalculateStateRoot(context.Background(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
block.Block.StateRoot = stateRoot[:]
|
||||
|
||||
c := beaconState.Copy()
|
||||
sig, err := testutil.BlockSignatureAltair(c, block.Block, privKeys)
|
||||
require.NoError(t, err)
|
||||
block.Signature = sig.Marshal()
|
||||
|
||||
block.Block.StateRoot = bytesutil.PadTo([]byte{'a'}, 32)
|
||||
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, _, err = core.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, "could not validate state root", err)
|
||||
}
|
||||
|
||||
func TestExecuteStateTransitionNoVerifyAnySig_PassesProcessingConditions(t *testing.T) {
|
||||
beaconState, block := createFullAltairBlockWithOperations(t)
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
set, _, err := core.ExecuteStateTransitionNoVerifyAnySig(context.Background(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
// Test Signature set verifies.
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, "Could not verify signature set")
|
||||
}
|
||||
|
||||
func createFullAltairBlockWithOperations(t *testing.T) (state.BeaconStateAltair,
|
||||
*ethpb.SignedBeaconBlockAltair) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 32)
|
||||
sCom, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, beaconState.SetCurrentSyncCommittee(sCom))
|
||||
tState := beaconState.Copy()
|
||||
blk, err := testutil.GenerateFullBlockAltair(tState, privKeys,
|
||||
&testutil.BlockGenConfig{NumAttestations: 1, NumVoluntaryExits: 0, NumDeposits: 0}, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
return beaconState, blk
|
||||
}
|
||||
@@ -4,10 +4,8 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
@@ -160,17 +158,3 @@ func TestCalculateStateRootAltair_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, params.BeaconConfig().ZeroHash, r)
|
||||
}
|
||||
|
||||
func createFullAltairBlockWithOperations(t *testing.T) (stateAltair.BeaconStateAltair,
|
||||
*ethpb.SignedBeaconBlockAltair) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 32)
|
||||
sCom, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, beaconState.SetCurrentSyncCommittee(sCom))
|
||||
tState := beaconState.Copy()
|
||||
blk, err := testutil.GenerateFullBlockAltair(tState, privKeys,
|
||||
&testutil.BlockGenConfig{NumAttestations: 1, NumVoluntaryExits: 0, NumDeposits: 0}, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
return beaconState, blk
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ go_library(
|
||||
"//beacon-chain/node/registration:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/node/registration"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
@@ -59,23 +60,24 @@ const testSkipPowFlag = "test-skip-pow"
|
||||
// full PoS node. It handles the lifecycle of the entire system and registers
|
||||
// services to a service registry.
|
||||
type BeaconNode struct {
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *shared.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
depositCache *depositcache.DepositCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
forkChoiceStore forkchoice.ForkChoicer
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
cliCtx *cli.Context
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
services *shared.ServiceRegistry
|
||||
lock sync.RWMutex
|
||||
stop chan struct{} // Channel to wait for termination notifications.
|
||||
db db.Database
|
||||
attestationPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
depositCache *depositcache.DepositCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
forkChoiceStore forkchoice.ForkChoicer
|
||||
stateGen *stategen.State
|
||||
collector *bcnodeCollector
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -95,21 +97,25 @@ func New(cliCtx *cli.Context) (*BeaconNode, error) {
|
||||
configureNetwork(cliCtx)
|
||||
configureInteropConfig(cliCtx)
|
||||
|
||||
// Initializes any forks here.
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
registry := shared.NewServiceRegistry()
|
||||
|
||||
ctx, cancel := context.WithCancel(cliCtx.Context)
|
||||
beacon := &BeaconNode{
|
||||
cliCtx: cliCtx,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
services: registry,
|
||||
stop: make(chan struct{}),
|
||||
stateFeed: new(event.Feed),
|
||||
blockFeed: new(event.Feed),
|
||||
opFeed: new(event.Feed),
|
||||
attestationPool: attestations.NewPool(),
|
||||
exitPool: voluntaryexits.NewPool(),
|
||||
slashingsPool: slashings.NewPool(),
|
||||
cliCtx: cliCtx,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
services: registry,
|
||||
stop: make(chan struct{}),
|
||||
stateFeed: new(event.Feed),
|
||||
blockFeed: new(event.Feed),
|
||||
opFeed: new(event.Feed),
|
||||
attestationPool: attestations.NewPool(),
|
||||
exitPool: voluntaryexits.NewPool(),
|
||||
slashingsPool: slashings.NewPool(),
|
||||
syncCommitteePool: synccommittee.NewPool(),
|
||||
}
|
||||
|
||||
depositAddress, err := registration.DepositContractAddress()
|
||||
@@ -500,6 +506,7 @@ func (b *BeaconNode) registerSyncService() error {
|
||||
AttPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingPool: b.slashingsPool,
|
||||
SyncCommsPool: b.syncCommitteePool,
|
||||
StateGen: b.stateGen,
|
||||
})
|
||||
|
||||
@@ -588,6 +595,7 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
AttestationsPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingsPool: b.slashingsPool,
|
||||
SyncCommitteeObjectPool: b.syncCommitteePool,
|
||||
POWChainService: web3Service,
|
||||
ChainStartFetcher: chainStartFetcher,
|
||||
MockEth1Votes: mockEth1DataVotes,
|
||||
|
||||
@@ -2,20 +2,22 @@ package encoder
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
ssz "github.com/ferranbt/fastssz"
|
||||
)
|
||||
|
||||
// NetworkEncoding represents an encoder compatible with Ethereum consensus p2p.
|
||||
type NetworkEncoding interface {
|
||||
// DecodeGossip to the provided gossip message. The interface must be a pointer to the decoding destination.
|
||||
DecodeGossip([]byte, interface{}) error
|
||||
DecodeGossip([]byte, ssz.Unmarshaler) error
|
||||
// DecodeWithMaxLength a bytes from a reader with a varint length prefix. The interface must be a pointer to the
|
||||
// decoding destination. The length of the message should not be more than the provided limit.
|
||||
DecodeWithMaxLength(io.Reader, interface{}) error
|
||||
DecodeWithMaxLength(io.Reader, ssz.Unmarshaler) error
|
||||
// EncodeGossip an arbitrary gossip message to the provided writer. The interface must be a pointer object to encode.
|
||||
EncodeGossip(io.Writer, interface{}) (int, error)
|
||||
EncodeGossip(io.Writer, ssz.Marshaler) (int, error)
|
||||
// EncodeWithMaxLength an arbitrary message to the provided writer with a varint length prefix. The interface must be
|
||||
// a pointer object to encode. The encoded message should not be bigger than the provided limit.
|
||||
EncodeWithMaxLength(io.Writer, interface{}) (int, error)
|
||||
EncodeWithMaxLength(io.Writer, ssz.Marshaler) (int, error)
|
||||
// ProtocolSuffix returns the last part of the protocol ID to indicate the encoding scheme.
|
||||
ProtocolSuffix() string
|
||||
}
|
||||
|
||||
@@ -33,15 +33,12 @@ type SszNetworkEncoder struct{}
|
||||
// ProtocolSuffixSSZSnappy is the last part of the topic string to identify the encoding protocol.
|
||||
const ProtocolSuffixSSZSnappy = "ssz_snappy"
|
||||
|
||||
func (e SszNetworkEncoder) doEncode(msg interface{}) ([]byte, error) {
|
||||
if v, ok := msg.(fastssz.Marshaler); ok {
|
||||
return v.MarshalSSZ()
|
||||
}
|
||||
return nil, errors.Errorf("non-supported type: %T", msg)
|
||||
func (e SszNetworkEncoder) doEncode(msg fastssz.Marshaler) ([]byte, error) {
|
||||
return msg.MarshalSSZ()
|
||||
}
|
||||
|
||||
// EncodeGossip the proto gossip message to the io.Writer.
|
||||
func (e SszNetworkEncoder) EncodeGossip(w io.Writer, msg interface{}) (int, error) {
|
||||
func (e SszNetworkEncoder) EncodeGossip(w io.Writer, msg fastssz.Marshaler) (int, error) {
|
||||
if msg == nil {
|
||||
return 0, nil
|
||||
}
|
||||
@@ -58,7 +55,7 @@ func (e SszNetworkEncoder) EncodeGossip(w io.Writer, msg interface{}) (int, erro
|
||||
|
||||
// EncodeWithMaxLength the proto message to the io.Writer. This encoding prefixes the byte slice with a protobuf varint
|
||||
// to indicate the size of the message. This checks that the encoded message isn't larger than the provided max limit.
|
||||
func (e SszNetworkEncoder) EncodeWithMaxLength(w io.Writer, msg interface{}) (int, error) {
|
||||
func (e SszNetworkEncoder) EncodeWithMaxLength(w io.Writer, msg fastssz.Marshaler) (int, error) {
|
||||
if msg == nil {
|
||||
return 0, nil
|
||||
}
|
||||
@@ -81,15 +78,12 @@ func (e SszNetworkEncoder) EncodeWithMaxLength(w io.Writer, msg interface{}) (in
|
||||
return writeSnappyBuffer(w, b)
|
||||
}
|
||||
|
||||
func (e SszNetworkEncoder) doDecode(b []byte, to interface{}) error {
|
||||
if v, ok := to.(fastssz.Unmarshaler); ok {
|
||||
return v.UnmarshalSSZ(b)
|
||||
}
|
||||
return errors.Errorf("non-supported type: %T", to)
|
||||
func (e SszNetworkEncoder) doDecode(b []byte, to fastssz.Unmarshaler) error {
|
||||
return to.UnmarshalSSZ(b)
|
||||
}
|
||||
|
||||
// DecodeGossip decodes the bytes to the protobuf gossip message provided.
|
||||
func (e SszNetworkEncoder) DecodeGossip(b []byte, to interface{}) error {
|
||||
func (e SszNetworkEncoder) DecodeGossip(b []byte, to fastssz.Unmarshaler) error {
|
||||
b, err := DecodeSnappy(b, MaxGossipSize)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -115,7 +109,7 @@ func DecodeSnappy(msg []byte, maxSize uint64) ([]byte, error) {
|
||||
|
||||
// DecodeWithMaxLength the bytes from io.Reader to the protobuf message provided.
|
||||
// This checks that the decoded message isn't larger than the provided max limit.
|
||||
func (e SszNetworkEncoder) DecodeWithMaxLength(r io.Reader, to interface{}) error {
|
||||
func (e SszNetworkEncoder) DecodeWithMaxLength(r io.Reader, to fastssz.Unmarshaler) error {
|
||||
msgLen, err := readVarint(r)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
gogo "github.com/gogo/protobuf/proto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -26,7 +26,7 @@ func TestSszNetworkEncoder_RoundTrip(t *testing.T) {
|
||||
|
||||
func TestSszNetworkEncoder_FailsSnappyLength(t *testing.T) {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
att := &statepb.Fork{}
|
||||
att := ðpb.Fork{}
|
||||
data := make([]byte, 32)
|
||||
binary.PutUvarint(data, encoder.MaxGossipSize+32)
|
||||
err := e.DecodeGossip(data, att)
|
||||
@@ -35,14 +35,14 @@ func TestSszNetworkEncoder_FailsSnappyLength(t *testing.T) {
|
||||
|
||||
func testRoundTripWithLength(t *testing.T, e *encoder.SszNetworkEncoder) {
|
||||
buf := new(bytes.Buffer)
|
||||
msg := &statepb.Fork{
|
||||
msg := ðpb.Fork{
|
||||
PreviousVersion: []byte("fooo"),
|
||||
CurrentVersion: []byte("barr"),
|
||||
Epoch: 9001,
|
||||
}
|
||||
_, err := e.EncodeWithMaxLength(buf, msg)
|
||||
require.NoError(t, err)
|
||||
decoded := &statepb.Fork{}
|
||||
decoded := ðpb.Fork{}
|
||||
require.NoError(t, e.DecodeWithMaxLength(buf, decoded))
|
||||
if !proto.Equal(decoded, msg) {
|
||||
t.Logf("decoded=%+v\n", decoded)
|
||||
@@ -52,14 +52,14 @@ func testRoundTripWithLength(t *testing.T, e *encoder.SszNetworkEncoder) {
|
||||
|
||||
func testRoundTripWithGossip(t *testing.T, e *encoder.SszNetworkEncoder) {
|
||||
buf := new(bytes.Buffer)
|
||||
msg := &statepb.Fork{
|
||||
msg := ðpb.Fork{
|
||||
PreviousVersion: []byte("fooo"),
|
||||
CurrentVersion: []byte("barr"),
|
||||
Epoch: 9001,
|
||||
}
|
||||
_, err := e.EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
decoded := &statepb.Fork{}
|
||||
decoded := ðpb.Fork{}
|
||||
require.NoError(t, e.DecodeGossip(buf.Bytes(), decoded))
|
||||
if !proto.Equal(decoded, msg) {
|
||||
t.Logf("decoded=%+v\n", decoded)
|
||||
@@ -69,7 +69,7 @@ func testRoundTripWithGossip(t *testing.T, e *encoder.SszNetworkEncoder) {
|
||||
|
||||
func TestSszNetworkEncoder_EncodeWithMaxLength(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
msg := &statepb.Fork{
|
||||
msg := ðpb.Fork{
|
||||
PreviousVersion: []byte("fooo"),
|
||||
CurrentVersion: []byte("barr"),
|
||||
Epoch: 9001,
|
||||
@@ -86,7 +86,7 @@ func TestSszNetworkEncoder_EncodeWithMaxLength(t *testing.T) {
|
||||
|
||||
func TestSszNetworkEncoder_DecodeWithMaxLength(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
msg := &statepb.Fork{
|
||||
msg := ðpb.Fork{
|
||||
PreviousVersion: []byte("fooo"),
|
||||
CurrentVersion: []byte("barr"),
|
||||
Epoch: 4242,
|
||||
@@ -99,7 +99,7 @@ func TestSszNetworkEncoder_DecodeWithMaxLength(t *testing.T) {
|
||||
params.OverrideBeaconNetworkConfig(c)
|
||||
_, err := e.EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
decoded := &statepb.Fork{}
|
||||
decoded := ðpb.Fork{}
|
||||
err = e.DecodeWithMaxLength(buf, decoded)
|
||||
wanted := fmt.Sprintf("goes over the provided max limit of %d", maxChunkSize)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
@@ -115,13 +115,13 @@ func TestSszNetworkEncoder_DecodeWithMultipleFrames(t *testing.T) {
|
||||
maxChunkSize := uint64(1 << 22)
|
||||
c.MaxChunkSize = maxChunkSize
|
||||
params.OverrideBeaconNetworkConfig(c)
|
||||
_, err := e.EncodeWithMaxLength(buf, st.InnerStateUnsafe())
|
||||
_, err := e.EncodeWithMaxLength(buf, st.InnerStateUnsafe().(*ethpb.BeaconState))
|
||||
require.NoError(t, err)
|
||||
// Max snappy block size
|
||||
if buf.Len() <= 76490 {
|
||||
t.Errorf("buffer smaller than expected, wanted > %d but got %d", 76490, buf.Len())
|
||||
}
|
||||
decoded := new(statepb.BeaconState)
|
||||
decoded := new(ethpb.BeaconState)
|
||||
err = e.DecodeWithMaxLength(buf, decoded)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -144,7 +144,7 @@ func TestSszNetworkEncoder_MaxInt64(t *testing.T) {
|
||||
func TestSszNetworkEncoder_DecodeWithBadSnappyStream(t *testing.T) {
|
||||
st := newBadSnappyStream()
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
decoded := new(statepb.Fork)
|
||||
decoded := new(ethpb.Fork)
|
||||
err := e.DecodeWithMaxLength(st, decoded)
|
||||
assert.ErrorContains(t, io.EOF.Error(), err)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
|
||||
@@ -34,7 +34,7 @@ type singleAttestationVerificationFailure struct {
|
||||
// ListPoolAttestations retrieves attestations known by the node but
|
||||
// not necessarily incorporated into any block. Allows filtering by committee index or slot.
|
||||
func (bs *Server) ListPoolAttestations(ctx context.Context, req *ethpb.AttestationsPoolRequest) (*ethpb.AttestationsPoolResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.ListPoolAttestations")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolAttestations")
|
||||
defer span.End()
|
||||
|
||||
attestations := bs.AttestationsPool.AggregatedAttestations()
|
||||
@@ -70,7 +70,7 @@ func (bs *Server) ListPoolAttestations(ctx context.Context, req *ethpb.Attestati
|
||||
// SubmitAttestations submits Attestation object to node. If attestation passes all validation
|
||||
// constraints, node MUST publish attestation on appropriate subnet.
|
||||
func (bs *Server) SubmitAttestations(ctx context.Context, req *ethpb.SubmitAttestationsRequest) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.SubmitAttestation")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitAttestation")
|
||||
defer span.End()
|
||||
|
||||
var validAttestations []*eth.Attestation
|
||||
@@ -145,7 +145,7 @@ func (bs *Server) SubmitAttestations(ctx context.Context, req *ethpb.SubmitAttes
|
||||
// ListPoolAttesterSlashings retrieves attester slashings known by the node but
|
||||
// not necessarily incorporated into any block.
|
||||
func (bs *Server) ListPoolAttesterSlashings(ctx context.Context, req *emptypb.Empty) (*ethpb.AttesterSlashingsPoolResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.ListPoolAttesterSlashings")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolAttesterSlashings")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
@@ -167,7 +167,7 @@ func (bs *Server) ListPoolAttesterSlashings(ctx context.Context, req *emptypb.Em
|
||||
// SubmitAttesterSlashing submits AttesterSlashing object to node's pool and
|
||||
// if passes validation node MUST broadcast it to network.
|
||||
func (bs *Server) SubmitAttesterSlashing(ctx context.Context, req *ethpb.AttesterSlashing) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.SubmitAttesterSlashing")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
@@ -197,7 +197,7 @@ func (bs *Server) SubmitAttesterSlashing(ctx context.Context, req *ethpb.Atteste
|
||||
// ListPoolProposerSlashings retrieves proposer slashings known by the node
|
||||
// but not necessarily incorporated into any block.
|
||||
func (bs *Server) ListPoolProposerSlashings(ctx context.Context, req *emptypb.Empty) (*ethpb.ProposerSlashingPoolResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.ListPoolProposerSlashings")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolProposerSlashings")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
@@ -219,7 +219,7 @@ func (bs *Server) ListPoolProposerSlashings(ctx context.Context, req *emptypb.Em
|
||||
// SubmitProposerSlashing submits AttesterSlashing object to node's pool and if
|
||||
// passes validation node MUST broadcast it to network.
|
||||
func (bs *Server) SubmitProposerSlashing(ctx context.Context, req *ethpb.ProposerSlashing) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.SubmitProposerSlashing")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
@@ -249,7 +249,7 @@ func (bs *Server) SubmitProposerSlashing(ctx context.Context, req *ethpb.Propose
|
||||
// ListPoolVoluntaryExits retrieves voluntary exits known by the node but
|
||||
// not necessarily incorporated into any block.
|
||||
func (bs *Server) ListPoolVoluntaryExits(ctx context.Context, req *emptypb.Empty) (*ethpb.VoluntaryExitsPoolResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.ListPoolVoluntaryExits")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.ListPoolVoluntaryExits")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
@@ -272,7 +272,7 @@ func (bs *Server) ListPoolVoluntaryExits(ctx context.Context, req *emptypb.Empty
|
||||
// SubmitVoluntaryExit submits SignedVoluntaryExit object to node's pool
|
||||
// and if passes validation node MUST broadcast it to network.
|
||||
func (bs *Server) SubmitVoluntaryExit(ctx context.Context, req *ethpb.SignedVoluntaryExit) (*emptypb.Empty, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beaconv1.SubmitVoluntaryExit")
|
||||
ctx, span := trace.StartSpan(ctx, "beacon.SubmitVoluntaryExit")
|
||||
defer span.End()
|
||||
|
||||
headState, err := bs.ChainInfoFetcher.HeadState(ctx)
|
||||
|
||||
@@ -37,7 +37,7 @@ var (
|
||||
|
||||
// GetIdentity retrieves data about the node's network presence.
|
||||
func (ns *Server) GetIdentity(ctx context.Context, _ *emptypb.Empty) (*ethpb.IdentityResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "nodeV1.GetIdentity")
|
||||
ctx, span := trace.StartSpan(ctx, "nodev1V1.GetIdentity")
|
||||
defer span.End()
|
||||
|
||||
peerId := ns.PeerManager.PeerID().Pretty()
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
@@ -49,6 +50,7 @@ go_library(
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"//shared/slotutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
|
||||
@@ -11,16 +11,26 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/cmd"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/pagination"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// BlockContainer represents an instance of
|
||||
// block along with its relevant metadata.
|
||||
type BlockContainer struct {
|
||||
Blk block.SignedBeaconBlock
|
||||
Root [32]byte
|
||||
IsCanonical bool
|
||||
}
|
||||
|
||||
// ListBlocks retrieves blocks by root, slot, or epoch.
|
||||
//
|
||||
// The server may return multiple blocks in the case that a slot or epoch is
|
||||
@@ -37,166 +47,319 @@ func (bs *Server) ListBlocks(
|
||||
|
||||
switch q := req.QueryFilter.(type) {
|
||||
case *ethpb.ListBlocksRequest_Epoch:
|
||||
blks, _, err := bs.BeaconDB.Blocks(ctx, filters.NewFilter().SetStartEpoch(q.Epoch).SetEndEpoch(q.Epoch))
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForEpoch(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blocks: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
numBlks := len(blks)
|
||||
if numBlks == 0 {
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: make([]*ethpb.BeaconBlockContainer, 0),
|
||||
TotalSize: 0,
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
start, end, nextPageToken, err := pagination.StartAndEndPage(req.PageToken, int(req.PageSize), numBlks)
|
||||
blkContainers, err := convertToProto(ctrs)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not paginate blocks: %v", err)
|
||||
}
|
||||
|
||||
returnedBlks := blks[start:end]
|
||||
containers := make([]*ethpb.BeaconBlockContainer, len(returnedBlks))
|
||||
for i, b := range returnedBlks {
|
||||
root, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
canonical, err := bs.CanonicalFetcher.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block is canonical: %v", err)
|
||||
}
|
||||
phBlk, err := b.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get phase 0 block: %v", err)
|
||||
}
|
||||
containers[i] = ðpb.BeaconBlockContainer{
|
||||
Block: phBlk,
|
||||
BlockRoot: root[:],
|
||||
Canonical: canonical,
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: containers,
|
||||
BlockContainers: blkContainers,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
case *ethpb.ListBlocksRequest_Root:
|
||||
blk, err := bs.BeaconDB.Block(ctx, bytesutil.ToBytes32(q.Root))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve block: %v", err)
|
||||
}
|
||||
if blk == nil || blk.IsNil() {
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: make([]*ethpb.BeaconBlockContainer, 0),
|
||||
TotalSize: 0,
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
}, nil
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForRoot(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
canonical, err := bs.CanonicalFetcher.IsCanonical(ctx, root)
|
||||
blkContainers, err := convertToProto(ctrs)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block is canonical: %v", err)
|
||||
}
|
||||
phBlk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block is phase 0 block: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{{
|
||||
Block: phBlk,
|
||||
BlockRoot: root[:],
|
||||
Canonical: canonical},
|
||||
},
|
||||
TotalSize: 1,
|
||||
BlockContainers: blkContainers,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
|
||||
case *ethpb.ListBlocksRequest_Slot:
|
||||
hasBlocks, blks, err := bs.BeaconDB.BlocksBySlot(ctx, q.Slot)
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForSlot(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve blocks for slot %d: %v", q.Slot, err)
|
||||
return nil, err
|
||||
}
|
||||
if !hasBlocks {
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: make([]*ethpb.BeaconBlockContainer, 0),
|
||||
TotalSize: 0,
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
numBlks := len(blks)
|
||||
|
||||
start, end, nextPageToken, err := pagination.StartAndEndPage(req.PageToken, int(req.PageSize), numBlks)
|
||||
blkContainers, err := convertToProto(ctrs)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not paginate blocks: %v", err)
|
||||
}
|
||||
|
||||
returnedBlks := blks[start:end]
|
||||
containers := make([]*ethpb.BeaconBlockContainer, len(returnedBlks))
|
||||
for i, b := range returnedBlks {
|
||||
root, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
canonical, err := bs.CanonicalFetcher.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block is canonical: %v", err)
|
||||
}
|
||||
phBlk, err := b.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block is phase 0 block: %v", err)
|
||||
}
|
||||
containers[i] = ðpb.BeaconBlockContainer{
|
||||
Block: phBlk,
|
||||
BlockRoot: root[:],
|
||||
Canonical: canonical,
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: containers,
|
||||
BlockContainers: blkContainers,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
case *ethpb.ListBlocksRequest_Genesis:
|
||||
genBlk, err := bs.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve blocks for genesis slot: %v", err)
|
||||
}
|
||||
if genBlk == nil || genBlk.IsNil() {
|
||||
return nil, status.Error(codes.Internal, "Could not find genesis block")
|
||||
}
|
||||
root, err := genBlk.Block().HashTreeRoot()
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForGenesis(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
phBlk, err := genBlk.PbPhase0Block()
|
||||
blkContainers, err := convertToProto(ctrs)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if block is phase 0 block: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
containers := []*ethpb.BeaconBlockContainer{
|
||||
{
|
||||
Block: phBlk,
|
||||
BlockRoot: root[:],
|
||||
Canonical: true,
|
||||
},
|
||||
}
|
||||
|
||||
return ðpb.ListBlocksResponse{
|
||||
BlockContainers: containers,
|
||||
TotalSize: int32(1),
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
BlockContainers: blkContainers,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, "Must specify a filter criteria for fetching blocks")
|
||||
}
|
||||
|
||||
// ListBlocksAltair retrieves blocks by root, slot, or epoch.
|
||||
//
|
||||
// The server may return multiple blocks in the case that a slot or epoch is
|
||||
// provided as the filter criteria. The server may return an empty list when
|
||||
// no blocks in their database match the filter criteria. This RPC should
|
||||
// not return NOT_FOUND. Only one filter criteria should be used.
|
||||
func (bs *Server) ListBlocksAltair(
|
||||
ctx context.Context, req *ethpb.ListBlocksRequest,
|
||||
) (*ethpb.ListBlocksResponseAltair, error) {
|
||||
if int(req.PageSize) > cmd.Get().MaxRPCPageSize {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "Requested page size %d can not be greater than max size %d",
|
||||
req.PageSize, cmd.Get().MaxRPCPageSize)
|
||||
}
|
||||
|
||||
switch q := req.QueryFilter.(type) {
|
||||
case *ethpb.ListBlocksRequest_Epoch:
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForEpoch(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
altCtrs, err := convertFromV1Containers(ctrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: altCtrs,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
case *ethpb.ListBlocksRequest_Root:
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForRoot(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
altCtrs, err := convertFromV1Containers(ctrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: altCtrs,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
|
||||
case *ethpb.ListBlocksRequest_Slot:
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForSlot(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
altCtrs, err := convertFromV1Containers(ctrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: altCtrs,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
case *ethpb.ListBlocksRequest_Genesis:
|
||||
ctrs, numBlks, nextPageToken, err := bs.ListBlocksForGenesis(ctx, req, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
altCtrs, err := convertFromV1Containers(ctrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: altCtrs,
|
||||
TotalSize: int32(numBlks),
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, "Must specify a filter criteria for fetching blocks")
|
||||
}
|
||||
|
||||
func convertFromV1Containers(ctrs []BlockContainer) ([]*ethpb.BeaconBlockContainerAltair, error) {
|
||||
protoCtrs := make([]*ethpb.BeaconBlockContainerAltair, len(ctrs))
|
||||
var err error
|
||||
for i, c := range ctrs {
|
||||
protoCtrs[i], err = convertToBlockContainer(c.Blk, c.Root, c.IsCanonical)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get block container: %v", err)
|
||||
}
|
||||
}
|
||||
return protoCtrs, nil
|
||||
}
|
||||
|
||||
func convertToBlockContainer(blk block.SignedBeaconBlock, root [32]byte, isCanonical bool) (*ethpb.BeaconBlockContainerAltair, error) {
|
||||
ctr := ðpb.BeaconBlockContainerAltair{
|
||||
BlockRoot: root[:],
|
||||
Canonical: isCanonical,
|
||||
}
|
||||
|
||||
switch blk.Version() {
|
||||
case version.Phase0:
|
||||
rBlk, err := blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctr.Block = ðpb.BeaconBlockContainerAltair_Phase0Block{Phase0Block: rBlk}
|
||||
case version.Altair:
|
||||
rBlk, err := blk.PbAltairBlock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctr.Block = ðpb.BeaconBlockContainerAltair_AltairBlock{AltairBlock: rBlk}
|
||||
}
|
||||
return ctr, nil
|
||||
}
|
||||
|
||||
// ListBlocksForEpoch retrieves all blocks for the provided epoch.
|
||||
func (bs *Server) ListBlocksForEpoch(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Epoch) ([]BlockContainer, int, string, error) {
|
||||
blks, _, err := bs.BeaconDB.Blocks(ctx, filters.NewFilter().SetStartEpoch(q.Epoch).SetEndEpoch(q.Epoch))
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not get blocks: %v", err)
|
||||
}
|
||||
|
||||
numBlks := len(blks)
|
||||
if len(blks) == 0 {
|
||||
return []BlockContainer{}, numBlks, strconv.Itoa(0), nil
|
||||
}
|
||||
|
||||
start, end, nextPageToken, err := pagination.StartAndEndPage(req.PageToken, int(req.PageSize), numBlks)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not paginate blocks: %v", err)
|
||||
}
|
||||
|
||||
returnedBlks := blks[start:end]
|
||||
containers := make([]BlockContainer, len(returnedBlks))
|
||||
for i, b := range returnedBlks {
|
||||
root, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), err
|
||||
}
|
||||
canonical, err := bs.CanonicalFetcher.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not determine if block is canonical: %v", err)
|
||||
}
|
||||
containers[i] = BlockContainer{
|
||||
Blk: b,
|
||||
Root: root,
|
||||
IsCanonical: canonical,
|
||||
}
|
||||
}
|
||||
|
||||
return containers, numBlks, nextPageToken, nil
|
||||
}
|
||||
|
||||
// ListBlocksForRoot retrieves the block for the provided root.
|
||||
func (bs *Server) ListBlocksForRoot(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Root) ([]BlockContainer, int, string, error) {
|
||||
blk, err := bs.BeaconDB.Block(ctx, bytesutil.ToBytes32(q.Root))
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve block: %v", err)
|
||||
}
|
||||
if blk == nil || blk.IsNil() {
|
||||
return []BlockContainer{}, 0, strconv.Itoa(0), nil
|
||||
|
||||
}
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not determine block root: %v", err)
|
||||
}
|
||||
canonical, err := bs.CanonicalFetcher.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not determine if block is canonical: %v", err)
|
||||
}
|
||||
return []BlockContainer{{
|
||||
Blk: blk,
|
||||
Root: root,
|
||||
IsCanonical: canonical,
|
||||
}}, 1, strconv.Itoa(0), nil
|
||||
}
|
||||
|
||||
// ListBlocksForSlot retrieves all blocks for the provided slot.
|
||||
func (bs *Server) ListBlocksForSlot(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Slot) ([]BlockContainer, int, string, error) {
|
||||
hasBlocks, blks, err := bs.BeaconDB.BlocksBySlot(ctx, q.Slot)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve blocks for slot %d: %v", q.Slot, err)
|
||||
}
|
||||
if !hasBlocks {
|
||||
return []BlockContainer{}, 0, strconv.Itoa(0), nil
|
||||
}
|
||||
|
||||
numBlks := len(blks)
|
||||
|
||||
start, end, nextPageToken, err := pagination.StartAndEndPage(req.PageToken, int(req.PageSize), numBlks)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not paginate blocks: %v", err)
|
||||
}
|
||||
|
||||
returnedBlks := blks[start:end]
|
||||
containers := make([]BlockContainer, len(returnedBlks))
|
||||
for i, b := range returnedBlks {
|
||||
root, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not determine block root: %v", err)
|
||||
}
|
||||
canonical, err := bs.CanonicalFetcher.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not determine if block is canonical: %v", err)
|
||||
}
|
||||
containers[i] = BlockContainer{
|
||||
Blk: b,
|
||||
Root: root,
|
||||
IsCanonical: canonical,
|
||||
}
|
||||
}
|
||||
return containers, numBlks, nextPageToken, nil
|
||||
}
|
||||
|
||||
// ListBlocksForGenesis retrieves the genesis block.
|
||||
func (bs *Server) ListBlocksForGenesis(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Genesis) ([]BlockContainer, int, string, error) {
|
||||
genBlk, err := bs.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve blocks for genesis slot: %v", err)
|
||||
}
|
||||
if genBlk == nil || genBlk.IsNil() {
|
||||
return []BlockContainer{}, 0, strconv.Itoa(0), status.Error(codes.Internal, "Could not find genesis block")
|
||||
}
|
||||
root, err := genBlk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not determine block root: %v", err)
|
||||
}
|
||||
return []BlockContainer{{
|
||||
Blk: genBlk,
|
||||
Root: root,
|
||||
IsCanonical: true,
|
||||
}}, 1, strconv.Itoa(0), nil
|
||||
}
|
||||
|
||||
func convertToProto(ctrs []BlockContainer) ([]*ethpb.BeaconBlockContainer, error) {
|
||||
protoCtrs := make([]*ethpb.BeaconBlockContainer, len(ctrs))
|
||||
for i, c := range ctrs {
|
||||
phBlk, err := c.Blk.PbPhase0Block()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get phase 0 block: %v", err)
|
||||
}
|
||||
copiedRoot := c.Root
|
||||
protoCtrs[i] = ðpb.BeaconBlockContainer{
|
||||
Block: phBlk,
|
||||
BlockRoot: copiedRoot[:],
|
||||
Canonical: c.IsCanonical,
|
||||
}
|
||||
}
|
||||
return protoCtrs, nil
|
||||
}
|
||||
|
||||
// GetChainHead retrieves information about the head of the beacon chain from
|
||||
// the view of the beacon chain node.
|
||||
//
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
@@ -217,7 +216,7 @@ func TestServer_ListBlocks_Pagination(t *testing.T) {
|
||||
Slot: 6}}),
|
||||
BlockRoot: blkContainers[6].BlockRoot,
|
||||
Canonical: blkContainers[6].Canonical}},
|
||||
TotalSize: 1}},
|
||||
TotalSize: 1, NextPageToken: strconv.Itoa(0)}},
|
||||
{req: ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: root6[:]}},
|
||||
res: ðpb.ListBlocksResponse{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainer{{Block: testutil.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
@@ -225,7 +224,7 @@ func TestServer_ListBlocks_Pagination(t *testing.T) {
|
||||
Slot: 6}}),
|
||||
BlockRoot: blkContainers[6].BlockRoot,
|
||||
Canonical: blkContainers[6].Canonical}},
|
||||
TotalSize: 1}},
|
||||
TotalSize: 1, NextPageToken: strconv.Itoa(0)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 0},
|
||||
@@ -394,7 +393,7 @@ func TestServer_GetChainHead(t *testing.T) {
|
||||
pjRoot, err := prevJustifiedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 3, Root: pjRoot[:]},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 2, Root: jRoot[:]},
|
||||
@@ -484,7 +483,7 @@ func TestServer_StreamChainHead_OnHeadUpdated(t *testing.T) {
|
||||
pjRoot, err := prevJustifiedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 1,
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 3, Root: pjRoot[:]},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 2, Root: jRoot[:]},
|
||||
@@ -747,3 +746,300 @@ func TestServer_GetWeakSubjectivityCheckpoint(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, sRoot[:], c.StateRoot)
|
||||
}
|
||||
|
||||
func TestServer_ListBlocksAltair_NoResults(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
wanted := ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: make([]*ethpb.BeaconBlockContainerAltair, 0),
|
||||
TotalSize: int32(0),
|
||||
NextPageToken: strconv.Itoa(0),
|
||||
}
|
||||
res, err := bs.ListBlocksAltair(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{
|
||||
Slot: 0,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
res, err = bs.ListBlocksAltair(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{
|
||||
Slot: 0,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
res, err = bs.ListBlocksAltair(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Root{
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBlocksAltair_Genesis(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
// Should throw an error if no genesis block is found.
|
||||
_, err := bs.ListBlocks(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
||||
Genesis: true,
|
||||
},
|
||||
})
|
||||
require.ErrorContains(t, "Could not find genesis", err)
|
||||
|
||||
// Should return the proper genesis block if it exists.
|
||||
parentRoot := [32]byte{'a'}
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
ctr, err := convertToBlockContainer(wrapper.WrappedPhase0SignedBeaconBlock(blk), root, true)
|
||||
assert.NoError(t, err)
|
||||
wanted := ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainerAltair{ctr},
|
||||
NextPageToken: "0",
|
||||
TotalSize: 1,
|
||||
}
|
||||
res, err := bs.ListBlocksAltair(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
||||
Genesis: true,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(wanted, res) {
|
||||
t.Errorf("Wanted %v, received %v", wanted, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBlocksAltair_Genesis_MultiBlocks(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
// Should return the proper genesis block if it exists.
|
||||
parentRoot := [32]byte{1, 2, 3}
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
|
||||
count := types.Slot(100)
|
||||
blks := make([]block.SignedBeaconBlock, count)
|
||||
for i := types.Slot(0); i < count; i++ {
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
require.NoError(t, err)
|
||||
blks[i] = wrapper.WrappedPhase0SignedBeaconBlock(b)
|
||||
}
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
// Should throw an error if more than one blk returned.
|
||||
_, err = bs.ListBlocksAltair(ctx, ðpb.ListBlocksRequest{
|
||||
QueryFilter: ðpb.ListBlocksRequest_Genesis{
|
||||
Genesis: true,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestServer_ListBlocksAltair_Pagination(t *testing.T) {
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
db := dbTest.SetupDB(t)
|
||||
chain := &chainMock.ChainService{
|
||||
CanonicalRoots: map[[32]byte]bool{},
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
count := types.Slot(100)
|
||||
blks := make([]block.SignedBeaconBlock, count)
|
||||
blkContainers := make([]*ethpb.BeaconBlockContainerAltair, count)
|
||||
for i := types.Slot(0); i < count; i++ {
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
chain.CanonicalRoots[root] = true
|
||||
blks[i] = wrapper.WrappedPhase0SignedBeaconBlock(b)
|
||||
ctr, err := convertToBlockContainer(blks[i], root, true)
|
||||
require.NoError(t, err)
|
||||
blkContainers[i] = ctr
|
||||
}
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
orphanedBlk := testutil.NewBeaconBlock()
|
||||
orphanedBlk.Block.Slot = 300
|
||||
orphanedBlkRoot, err := orphanedBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(orphanedBlk)))
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
CanonicalFetcher: chain,
|
||||
}
|
||||
|
||||
root6, err := blks[6].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
req *ethpb.ListBlocksRequest
|
||||
res *ethpb.ListBlocksResponseAltair
|
||||
}{
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 5},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainerAltair{{Block: ðpb.BeaconBlockContainerAltair_Phase0Block{Phase0Block: testutil.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 5}})},
|
||||
BlockRoot: blkContainers[5].BlockRoot,
|
||||
Canonical: blkContainers[5].Canonical}},
|
||||
NextPageToken: "",
|
||||
TotalSize: 1,
|
||||
},
|
||||
},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Root{Root: root6[:]},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainerAltair{{Block: ðpb.BeaconBlockContainerAltair_Phase0Block{
|
||||
Phase0Block: testutil.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 6}})},
|
||||
BlockRoot: blkContainers[6].BlockRoot,
|
||||
Canonical: blkContainers[6].Canonical}},
|
||||
TotalSize: 1, NextPageToken: strconv.Itoa(0)}},
|
||||
{req: ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: root6[:]}},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainerAltair{{Block: ðpb.BeaconBlockContainerAltair_Phase0Block{
|
||||
Phase0Block: testutil.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 6}})},
|
||||
BlockRoot: blkContainers[6].BlockRoot,
|
||||
Canonical: blkContainers[6].Canonical}},
|
||||
TotalSize: 1, NextPageToken: strconv.Itoa(0)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 0},
|
||||
PageSize: 100},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: blkContainers[0:params.BeaconConfig().SlotsPerEpoch],
|
||||
NextPageToken: "",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(1),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 5},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: blkContainers[43:46],
|
||||
NextPageToken: "2",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(1),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 11},
|
||||
PageSize: 7},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: blkContainers[95:96],
|
||||
NextPageToken: "",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: 12},
|
||||
PageSize: 4},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: blkContainers[96:100],
|
||||
NextPageToken: "",
|
||||
TotalSize: int32(params.BeaconConfig().SlotsPerEpoch / 2)}},
|
||||
{req: ðpb.ListBlocksRequest{
|
||||
PageToken: strconv.Itoa(0),
|
||||
QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 300},
|
||||
PageSize: 3},
|
||||
res: ðpb.ListBlocksResponseAltair{
|
||||
BlockContainers: []*ethpb.BeaconBlockContainerAltair{{Block: ðpb.BeaconBlockContainerAltair_Phase0Block{
|
||||
Phase0Block: testutil.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 300}})},
|
||||
BlockRoot: orphanedBlkRoot[:],
|
||||
Canonical: false}},
|
||||
NextPageToken: "",
|
||||
TotalSize: 1}},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) {
|
||||
res, err := bs.ListBlocksAltair(ctx, test.req)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, res, test.res)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_ListBlocksAltair_Errors(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bs := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
exceedsMax := int32(cmd.Get().MaxRPCPageSize + 1)
|
||||
|
||||
wanted := fmt.Sprintf("Requested page size %d can not be greater than max size %d", exceedsMax, cmd.Get().MaxRPCPageSize)
|
||||
req := ðpb.ListBlocksRequest{PageToken: strconv.Itoa(0), PageSize: exceedsMax}
|
||||
_, err := bs.ListBlocks(ctx, req)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
wanted = "Must specify a filter criteria for fetching"
|
||||
req = ðpb.ListBlocksRequest{}
|
||||
_, err = bs.ListBlocksAltair(ctx, req)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{Slot: 0}}
|
||||
res, err := bs.ListBlocksAltair(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Slot{}}
|
||||
res, err = bs.ListBlocksAltair(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
||||
res, err = bs.ListBlocksAltair(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
|
||||
req = ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Root{Root: []byte{'A'}}}
|
||||
res, err = bs.ListBlocksAltair(ctx, req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(res.BlockContainers), "Wanted empty list")
|
||||
assert.Equal(t, int32(0), res.TotalSize, "Wanted total size 0")
|
||||
}
|
||||
|
||||
@@ -20,9 +20,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/sync"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Server defines a server implementation of the gRPC Beacon Chain service,
|
||||
@@ -43,14 +40,10 @@ type Server struct {
|
||||
Broadcaster p2p.Broadcaster
|
||||
AttestationsPool attestations.Pool
|
||||
SlashingsPool slashings.PoolManager
|
||||
CanonicalStateChan chan *statepb.BeaconState
|
||||
CanonicalStateChan chan *ethpb.BeaconState
|
||||
ChainStartChan chan time.Time
|
||||
ReceivedAttestationsBuffer chan *ethpb.Attestation
|
||||
CollectedAttestationsBuffer chan []*ethpb.Attestation
|
||||
StateGen stategen.StateManager
|
||||
SyncChecker sync.Checker
|
||||
}
|
||||
|
||||
func (bs *Server) ListBlocksAltair(ctx context.Context, request *ethpb.ListBlocksRequest) (*ethpb.ListBlocksResponseAltair, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/cmd"
|
||||
"github.com/prysmaticlabs/prysm/shared/pagination"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
@@ -521,14 +523,29 @@ func (bs *Server) GetValidatorParticipation(
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
|
||||
}
|
||||
|
||||
v, b, err := precompute.New(ctx, beaconState)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set up pre compute instance: %v", err)
|
||||
}
|
||||
_, b, err = precompute.ProcessAttestations(ctx, beaconState, v, b)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not pre compute attestations: %v", err)
|
||||
var v []*precompute.Validator
|
||||
var b *precompute.Balance
|
||||
switch beaconState.Version() {
|
||||
case version.Phase0:
|
||||
v, b, err = precompute.New(ctx, beaconState)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set up pre compute instance: %v", err)
|
||||
}
|
||||
_, b, err = precompute.ProcessAttestations(ctx, beaconState, v, b)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not pre compute attestations: %v", err)
|
||||
}
|
||||
case version.Altair:
|
||||
v, b, err = altair.InitializeEpochValidators(ctx, beaconState)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not set up altair pre compute instance: %v", err)
|
||||
}
|
||||
_, b, err = altair.ProcessEpochParticipation(ctx, beaconState, b, v)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not pre compute attestations: %v", err)
|
||||
}
|
||||
default:
|
||||
return nil, status.Errorf(codes.Internal, "Invalid state type retrieved with a version of %d", beaconState.Version())
|
||||
}
|
||||
|
||||
p := ðpb.ValidatorParticipationResponse{
|
||||
@@ -662,19 +679,41 @@ func (bs *Server) GetValidatorPerformance(
|
||||
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
|
||||
}
|
||||
}
|
||||
vp, bp, err := precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
validatorSummary := []*precompute.Validator{}
|
||||
switch headState.Version() {
|
||||
case version.Phase0:
|
||||
vp, bp, err := precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vp, bp, err = precompute.ProcessAttestations(ctx, headState, vp, bp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, err = precompute.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validatorSummary = vp
|
||||
case version.Altair:
|
||||
vp, bp, err := altair.InitializeEpochValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vp, bp, err = altair.ProcessEpochParticipation(ctx, headState, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, vp, err = altair.ProcessInactivityScores(ctx, headState, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, err = altair.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validatorSummary = vp
|
||||
}
|
||||
vp, bp, err = precompute.ProcessAttestations(ctx, headState, vp, bp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headState, err = precompute.ProcessRewardsAndPenaltiesPrecompute(headState, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validatorSummary := vp
|
||||
|
||||
responseCap := len(req.Indices) + len(req.PublicKeys)
|
||||
validatorIndices := make([]types.ValidatorIndex, 0, responseCap)
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/cmd"
|
||||
@@ -1308,7 +1307,7 @@ func TestServer_GetValidatorActiveSetChanges(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServer_GetValidatorQueue_PendingActivation(t *testing.T) {
|
||||
headState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
headState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
ActivationEpoch: helpers.ActivationExitEpoch(0),
|
||||
@@ -1407,7 +1406,7 @@ func TestServer_GetValidatorQueue_ExitedValidatorLeavesQueue(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServer_GetValidatorQueue_PendingExit(t *testing.T) {
|
||||
headState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
headState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
ActivationEpoch: 0,
|
||||
@@ -1541,7 +1540,7 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) {
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
atts := []*statepb.PendingAttestation{{
|
||||
atts := []*ethpb.PendingAttestation{{
|
||||
Data: testutil.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
InclusionDelay: 1,
|
||||
AggregationBits: bitfield.NewBitlist(validatorCount / uint64(params.BeaconConfig().SlotsPerEpoch)),
|
||||
@@ -1558,8 +1557,8 @@ func TestServer_GetValidatorParticipation_CurrentAndPrevEpoch(t *testing.T) {
|
||||
b.Block.Slot = 16
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
bRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, &statepb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, &statepb.StateSummary{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, bRoot))
|
||||
@@ -1620,7 +1619,7 @@ func TestServer_GetValidatorParticipation_OrphanedUntilGenesis(t *testing.T) {
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
atts := []*statepb.PendingAttestation{{
|
||||
atts := []*ethpb.PendingAttestation{{
|
||||
Data: testutil.HydrateAttestationData(ðpb.AttestationData{}),
|
||||
InclusionDelay: 1,
|
||||
AggregationBits: bitfield.NewBitlist(validatorCount / uint64(params.BeaconConfig().SlotsPerEpoch)),
|
||||
@@ -1636,8 +1635,8 @@ func TestServer_GetValidatorParticipation_OrphanedUntilGenesis(t *testing.T) {
|
||||
b := testutil.NewBeaconBlock()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
bRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, &statepb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, &statepb.StateSummary{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, bRoot))
|
||||
@@ -1700,9 +1699,9 @@ func TestGetValidatorPerformance_OK(t *testing.T) {
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch+1))))
|
||||
atts := make([]*statepb.PendingAttestation, 3)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = &statepb.PendingAttestation{
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
@@ -2067,11 +2066,11 @@ func TestServer_GetIndividualVotes_Working(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetBlockRoots(br))
|
||||
att2.Data.Target.Root = rt[:]
|
||||
att2.Data.BeaconBlockRoot = newRt[:]
|
||||
err = beaconState.AppendPreviousEpochAttestations(&statepb.PendingAttestation{
|
||||
err = beaconState.AppendPreviousEpochAttestations(ðpb.PendingAttestation{
|
||||
Data: att1.Data, AggregationBits: bf, InclusionDelay: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = beaconState.AppendCurrentEpochAttestations(&statepb.PendingAttestation{
|
||||
err = beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{
|
||||
Data: att2.Data, AggregationBits: bf, InclusionDelay: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"aggregator.go",
|
||||
"assignments.go",
|
||||
"attester.go",
|
||||
"blocks.go",
|
||||
"exit.go",
|
||||
"log.go",
|
||||
"proposer.go",
|
||||
@@ -13,6 +14,7 @@ go_library(
|
||||
"proposer_sync_aggregate.go",
|
||||
"server.go",
|
||||
"status.go",
|
||||
"sync_committee.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/rpc/prysm/v1alpha1/validator",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
@@ -31,6 +33,7 @@ go_library(
|
||||
"//beacon-chain/core/state/interop:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
@@ -43,12 +46,15 @@ go_library(
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//shared/aggregation:go_default_library",
|
||||
"//shared/aggregation/attestations:go_default_library",
|
||||
"//shared/aggregation/sync_contribution:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/copyutil:go_default_library",
|
||||
"//shared/depositutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/p2putils:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/rand:go_default_library",
|
||||
"//shared/slotutil:go_default_library",
|
||||
@@ -61,11 +67,12 @@ go_library(
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@com_github_wealdtech_go_bytesutil//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -83,6 +90,7 @@ go_test(
|
||||
"proposer_test.go",
|
||||
"server_test.go",
|
||||
"status_test.go",
|
||||
"sync_committee_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@@ -90,6 +98,7 @@ go_test(
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
@@ -99,6 +108,7 @@ go_test(
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/rand"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@@ -136,6 +138,10 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute next committee assignments: %v", err)
|
||||
}
|
||||
|
||||
// Post Altair transition when the beacon state is Altair compatible, and requested epoch is
|
||||
// post fork boundary.
|
||||
postAltairTransition := s.Version() == version.Altair && req.Epoch >= params.BeaconConfig().AltairForkEpoch
|
||||
|
||||
validatorAssignments := make([]*ethpb.DutiesResponse_Duty, 0, len(req.PublicKeys))
|
||||
nextValidatorAssignments := make([]*ethpb.DutiesResponse_Duty, 0, len(req.PublicKeys))
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
@@ -178,6 +184,45 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
|
||||
vStatus, _ := vs.validatorStatus(ctx, s, pubKey)
|
||||
assignment.Status = vStatus.Status
|
||||
}
|
||||
|
||||
// Are the validators in current or next epoch sync committee.
|
||||
if postAltairTransition {
|
||||
syncCommPeriod := helpers.SyncCommitteePeriod(req.Epoch)
|
||||
csc, err := s.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get current sync committee: %v", err)
|
||||
}
|
||||
assignment.IsSyncCommittee, err = helpers.IsCurrentPeriodSyncCommittee(s, idx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine current epoch sync committee: %v", err)
|
||||
}
|
||||
if assignment.IsSyncCommittee {
|
||||
assignValidatorToSyncSubnet(req.Epoch, syncCommPeriod, pubKey, csc, assignment.Status)
|
||||
}
|
||||
|
||||
nextSlotEpoch := helpers.SlotToEpoch(s.Slot() + 1)
|
||||
currentEpoch := helpers.CurrentEpoch(s)
|
||||
|
||||
// Next epoch sync committee duty is assigned with next period sync committee only during
|
||||
// sync period epoch boundary (ie. EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1). Else wise
|
||||
// next epoch sync committee duty is the same as current epoch.
|
||||
if helpers.SyncCommitteePeriod(nextSlotEpoch) == helpers.SyncCommitteePeriod(currentEpoch)+1 {
|
||||
nsc, err := s.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get next sync committee: %v", err)
|
||||
}
|
||||
nextAssignment.IsSyncCommittee, err = helpers.IsNextPeriodSyncCommittee(s, idx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine next epoch sync committee: %v", err)
|
||||
}
|
||||
if nextAssignment.IsSyncCommittee {
|
||||
assignValidatorToSyncSubnet(req.Epoch, syncCommPeriod+1, pubKey, nsc, nextAssignment.Status)
|
||||
}
|
||||
} else {
|
||||
nextAssignment.IsSyncCommittee = assignment.IsSyncCommittee
|
||||
}
|
||||
}
|
||||
|
||||
validatorAssignments = append(validatorAssignments, assignment)
|
||||
nextValidatorAssignments = append(nextValidatorAssignments, nextAssignment)
|
||||
// Assign relevant validator to subnet.
|
||||
@@ -217,3 +262,51 @@ func (vs *Server) AssignValidatorToSubnet(pubkey []byte, status ethpb.ValidatorS
|
||||
totalDuration := epochDuration * time.Duration(assignedDuration)
|
||||
cache.SubnetIDs.AddPersistentCommittee(pubkey, assignedIdxs, totalDuration*time.Second)
|
||||
}
|
||||
|
||||
// assignValidatorToSyncSubnet checks the status and pubkey of a particular validator
|
||||
// to discern whether persistent subnets need to be registered for them.
|
||||
func assignValidatorToSyncSubnet(currEpoch types.Epoch, syncPeriod uint64, pubkey []byte,
|
||||
syncCommittee *ethpb.SyncCommittee, status ethpb.ValidatorStatus) {
|
||||
if status != ethpb.ValidatorStatus_ACTIVE && status != ethpb.ValidatorStatus_EXITING {
|
||||
return
|
||||
}
|
||||
startEpoch := types.Epoch(syncPeriod) * params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
currPeriod := helpers.SyncCommitteePeriod(currEpoch)
|
||||
endEpoch := startEpoch + params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
_, _, ok, expTime := cache.SyncSubnetIDs.GetSyncCommitteeSubnets(pubkey, startEpoch)
|
||||
if ok && expTime.After(timeutils.Now()) {
|
||||
return
|
||||
}
|
||||
firstValidEpoch, err := startEpoch.SafeSub(params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
if err != nil {
|
||||
firstValidEpoch = 0
|
||||
}
|
||||
// If we are processing for a future period, we only
|
||||
// add to the relevant subscription once we are at the valid
|
||||
// bound.
|
||||
if syncPeriod != currPeriod && currEpoch < firstValidEpoch {
|
||||
return
|
||||
}
|
||||
subs := subnetsFromCommittee(pubkey, syncCommittee)
|
||||
// Handle overflow in the event current epoch is less
|
||||
// than end epoch. This is an impossible condition, so
|
||||
// it is a defensive check.
|
||||
epochsToWatch, err := endEpoch.SafeSub(uint64(currEpoch))
|
||||
if err != nil {
|
||||
epochsToWatch = 0
|
||||
}
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
totalDuration := epochDuration * time.Duration(epochsToWatch) * time.Second
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets(pubkey, startEpoch, subs, totalDuration)
|
||||
}
|
||||
|
||||
// subnetsFromCommittee retrieves the relevant subnets for the chosen validator.
|
||||
func subnetsFromCommittee(pubkey []byte, comm *ethpb.SyncCommittee) []uint64 {
|
||||
positions := make([]uint64, 0)
|
||||
for i, pkey := range comm.Pubkeys {
|
||||
if bytes.Equal(pubkey, pkey) {
|
||||
positions = append(positions, uint64(i)/(params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount))
|
||||
}
|
||||
}
|
||||
return positions
|
||||
}
|
||||
|
||||
@@ -10,9 +10,12 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -94,6 +97,109 @@ func TestGetDuties_OK(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAltairDuties_SyncCommitteeOK(t *testing.T) {
|
||||
params.UseMainnetConfig()
|
||||
defer params.UseMinimalConfig()
|
||||
|
||||
bc := params.BeaconConfig()
|
||||
bc.AltairForkEpoch = types.Epoch(0)
|
||||
params.OverrideBeaconConfig(bc)
|
||||
|
||||
genesis := testutil.NewBeaconBlock()
|
||||
deposits, _, err := testutil.DeterministicDepositsAndKeys(params.BeaconConfig().SyncCommitteeSize)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := testutil.DeterministicEth1Data(len(deposits))
|
||||
require.NoError(t, err)
|
||||
bs, err := testutil.GenesisBeaconState(context.Background(), deposits, 0, eth1Data)
|
||||
h := ðpb.BeaconBlockHeader{
|
||||
StateRoot: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
ParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte{'c'}, 32),
|
||||
}
|
||||
require.NoError(t, bs.SetLatestBlockHeader(h))
|
||||
require.NoError(t, err, "Could not setup genesis bs")
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
|
||||
syncCommittee, err := altair.NextSyncCommittee(context.Background(), bs)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bs.SetCurrentSyncCommittee(syncCommittee))
|
||||
pubKeys := make([][]byte, len(deposits))
|
||||
indices := make([]uint64, len(deposits))
|
||||
for i := 0; i < len(deposits); i++ {
|
||||
pubKeys[i] = deposits[i].Data.PublicKey
|
||||
indices[i] = uint64(i)
|
||||
}
|
||||
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch*types.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)-1))
|
||||
require.NoError(t, helpers.UpdateSyncCommitteeCache(bs))
|
||||
|
||||
pubkeysAs48ByteType := make([][48]byte, len(pubKeys))
|
||||
for i, pk := range pubKeys {
|
||||
pubkeysAs48ByteType[i] = bytesutil.ToBytes48(pk)
|
||||
}
|
||||
|
||||
slot := uint64(params.BeaconConfig().SlotsPerEpoch) * uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod) * params.BeaconConfig().SecondsPerSlot
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Genesis: time.Now().Add(time.Duration(-1*int64(slot-1)) * time.Second),
|
||||
}
|
||||
vs := &Server{
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
// Test the first validator in registry.
|
||||
req := ðpb.DutiesRequest{
|
||||
PublicKeys: [][]byte{deposits[0].Data.PublicKey},
|
||||
}
|
||||
res, err := vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Assigned slot %d can't be higher than %d",
|
||||
res.CurrentEpochDuties[0].AttesterSlot, bs.Slot()+params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
// Test the last validator in registry.
|
||||
lastValidatorIndex := params.BeaconConfig().SyncCommitteeSize - 1
|
||||
req = ðpb.DutiesRequest{
|
||||
PublicKeys: [][]byte{deposits[lastValidatorIndex].Data.PublicKey},
|
||||
}
|
||||
res, err = vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
if res.CurrentEpochDuties[0].AttesterSlot > bs.Slot()+params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Assigned slot %d can't be higher than %d",
|
||||
res.CurrentEpochDuties[0].AttesterSlot, bs.Slot()+params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
// We request for duties for all validators.
|
||||
req = ðpb.DutiesRequest{
|
||||
PublicKeys: pubKeys,
|
||||
Epoch: 0,
|
||||
}
|
||||
res, err = vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.Equal(t, types.ValidatorIndex(i), res.CurrentEpochDuties[i].ValidatorIndex)
|
||||
}
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.Equal(t, true, res.CurrentEpochDuties[i].IsSyncCommittee)
|
||||
// Current epoch and next epoch duties should be equal before the sync period epoch boundary.
|
||||
assert.Equal(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
}
|
||||
|
||||
// Current epoch and next epoch duties should not be equal at the sync period epoch boundary.
|
||||
req = ðpb.DutiesRequest{
|
||||
PublicKeys: pubKeys,
|
||||
Epoch: params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1,
|
||||
}
|
||||
res, err = vs.GetDuties(context.Background(), req)
|
||||
require.NoError(t, err, "Could not call epoch committee assignment")
|
||||
for i := 0; i < len(res.CurrentEpochDuties); i++ {
|
||||
assert.NotEqual(t, res.CurrentEpochDuties[i].IsSyncCommittee, res.NextEpochDuties[i].IsSyncCommittee)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDuties_SlotOutOfUpperBound(t *testing.T) {
|
||||
chain := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
@@ -348,6 +454,28 @@ func TestAssignValidatorToSubnet(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAssignValidatorToSyncSubnet(t *testing.T) {
|
||||
k := pubKey(3)
|
||||
committee := make([][]byte, 0)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
committee = append(committee, pubKey(uint64(i)))
|
||||
}
|
||||
sCommittee := ðpb.SyncCommittee{
|
||||
Pubkeys: committee,
|
||||
}
|
||||
assignValidatorToSyncSubnet(0, 0, k, sCommittee, ethpb.ValidatorStatus_ACTIVE)
|
||||
coms, _, ok, exp := cache.SyncSubnetIDs.GetSyncCommitteeSubnets(k, 0)
|
||||
require.Equal(t, true, ok, "No cache entry found for validator")
|
||||
assert.Equal(t, uint64(1), uint64(len(coms)))
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
totalTime := time.Duration(params.BeaconConfig().EpochsPerSyncCommitteePeriod) * epochDuration * time.Second
|
||||
receivedTime := time.Until(exp.Round(time.Second)).Round(time.Second)
|
||||
if receivedTime < totalTime {
|
||||
t.Fatalf("Expiration time of %f was less than expected duration of %f ", receivedTime.Seconds(), totalTime.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCommitteeAssignment(b *testing.B) {
|
||||
|
||||
genesis := testutil.NewBeaconBlock()
|
||||
|
||||
108
beacon-chain/rpc/prysm/v1alpha1/validator/blocks.go
Normal file
108
beacon-chain/rpc/prysm/v1alpha1/validator/blocks.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// StreamBlocksAltair to clients every single time a block is received by the beacon node.
|
||||
func (bs *Server) StreamBlocksAltair(req *ethpb.StreamBlocksRequest, stream ethpb.BeaconNodeValidator_StreamBlocksAltairServer) error {
|
||||
blocksChannel := make(chan *feed.Event, 1)
|
||||
var blockSub event.Subscription
|
||||
if req.VerifiedOnly {
|
||||
blockSub = bs.StateNotifier.StateFeed().Subscribe(blocksChannel)
|
||||
} else {
|
||||
blockSub = bs.BlockNotifier.BlockFeed().Subscribe(blocksChannel)
|
||||
}
|
||||
defer blockSub.Unsubscribe()
|
||||
|
||||
for {
|
||||
select {
|
||||
case blockEvent := <-blocksChannel:
|
||||
if req.VerifiedOnly {
|
||||
if blockEvent.Type == statefeed.BlockProcessed {
|
||||
data, ok := blockEvent.Data.(*statefeed.BlockProcessedData)
|
||||
if !ok || data == nil {
|
||||
continue
|
||||
}
|
||||
b := ðpb.StreamBlocksResponse{}
|
||||
switch data.SignedBlock.Version() {
|
||||
case version.Phase0:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlock)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *ethpb.SignedBeaconBlock")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_Phase0Block{Phase0Block: phBlk}
|
||||
case version.Altair:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockAltair)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *v2.SignedBeaconBlockAltair")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_AltairBlock{AltairBlock: phBlk}
|
||||
}
|
||||
|
||||
if err := stream.Send(b); err != nil {
|
||||
return status.Errorf(codes.Unavailable, "Could not send over stream: %v", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if blockEvent.Type == blockfeed.ReceivedBlock {
|
||||
data, ok := blockEvent.Data.(*blockfeed.ReceivedBlockData)
|
||||
if !ok {
|
||||
// Got bad data over the stream.
|
||||
continue
|
||||
}
|
||||
if data.SignedBlock == nil {
|
||||
// One nil block shouldn't stop the stream.
|
||||
continue
|
||||
}
|
||||
headState, err := bs.HeadFetcher.HeadState(bs.Ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("blockSlot", data.SignedBlock.Block().Slot()).Error("Could not get head state")
|
||||
continue
|
||||
}
|
||||
signed := data.SignedBlock
|
||||
if err := blocks.VerifyBlockSignature(headState, signed.Block().ProposerIndex(), signed.Signature(), signed.Block().HashTreeRoot); err != nil {
|
||||
log.WithError(err).WithField("blockSlot", data.SignedBlock.Block().Slot()).Error("Could not verify block signature")
|
||||
continue
|
||||
}
|
||||
b := ðpb.StreamBlocksResponse{}
|
||||
switch data.SignedBlock.Version() {
|
||||
case version.Phase0:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlock)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *ethpb.SignedBeaconBlock")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_Phase0Block{Phase0Block: phBlk}
|
||||
case version.Altair:
|
||||
phBlk, ok := data.SignedBlock.Proto().(*ethpb.SignedBeaconBlockAltair)
|
||||
if !ok {
|
||||
log.Warn("Mismatch between version and block type, was expecting *v2.SignedBeaconBlockAltair")
|
||||
continue
|
||||
}
|
||||
b.Block = ðpb.StreamBlocksResponse_AltairBlock{AltairBlock: phBlk}
|
||||
}
|
||||
if err := stream.Send(b); err != nil {
|
||||
return status.Errorf(codes.Unavailable, "Could not send over stream: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-blockSub.Err():
|
||||
return status.Error(codes.Aborted, "Subscriber closed, exiting goroutine")
|
||||
case <-bs.Ctx.Done():
|
||||
return status.Error(codes.Canceled, "Context canceled")
|
||||
case <-stream.Context().Done():
|
||||
return status.Error(codes.Canceled, "Context canceled")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -24,6 +24,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
attaggregation "github.com/prysmaticlabs/prysm/shared/aggregation/attestations"
|
||||
"github.com/prysmaticlabs/prysm/shared/aggregation/sync_contribution"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
@@ -31,6 +33,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/rand"
|
||||
"github.com/prysmaticlabs/prysm/shared/trieutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
bytesutil2 "github.com/wealdtech/go-bytesutil"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -41,16 +44,31 @@ var eth1DataNotification bool
|
||||
|
||||
const eth1dataTimeout = 2 * time.Second
|
||||
|
||||
// skipcq: SCC-U1000
|
||||
type eth1DataSingleVote struct {
|
||||
eth1Data *ethpb.Eth1Data
|
||||
blockHeight *big.Int
|
||||
}
|
||||
|
||||
// skipcq: SCC-U1000
|
||||
type eth1DataAggregatedVote struct {
|
||||
data eth1DataSingleVote
|
||||
votes int
|
||||
}
|
||||
|
||||
// BlockData required to create a beacon block.
|
||||
type BlockData struct {
|
||||
ParentRoot []byte
|
||||
Graffiti [32]byte
|
||||
ProposerIdx types.ValidatorIndex
|
||||
Eth1Data *ethpb.Eth1Data
|
||||
Deposits []*ethpb.Deposit
|
||||
Attestations []*ethpb.Attestation
|
||||
ProposerSlashings []*ethpb.ProposerSlashing
|
||||
AttesterSlashings []*ethpb.AttesterSlashing
|
||||
VoluntaryExits []*ethpb.SignedVoluntaryExit
|
||||
}
|
||||
|
||||
// GetBlock is called by a proposer during its assigned slot to request a block to sign
|
||||
// by passing in the slot and the signed randao reveal of the slot.
|
||||
func (vs *Server) GetBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlock, error) {
|
||||
@@ -58,6 +76,51 @@ func (vs *Server) GetBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(req.Slot)))
|
||||
|
||||
blkData, err := vs.BuildBlockData(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use zero hash as stub for state root to compute later.
|
||||
stateRoot := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
blk := ðpb.BeaconBlock{
|
||||
Slot: req.Slot,
|
||||
ParentRoot: blkData.ParentRoot,
|
||||
StateRoot: stateRoot,
|
||||
ProposerIndex: blkData.ProposerIdx,
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Eth1Data: blkData.Eth1Data,
|
||||
Deposits: blkData.Deposits,
|
||||
Attestations: blkData.Attestations,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
ProposerSlashings: blkData.ProposerSlashings,
|
||||
AttesterSlashings: blkData.AttesterSlashings,
|
||||
VoluntaryExits: blkData.VoluntaryExits,
|
||||
Graffiti: blkData.Graffiti[:],
|
||||
},
|
||||
}
|
||||
|
||||
// Compute state root with the newly constructed block.
|
||||
stateRoot, err = vs.ComputeStateRoot(
|
||||
ctx, wrapper.WrappedPhase0SignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlock{Block: blk, Signature: make([]byte, 96)},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
interop.WriteBlockToDisk(wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{Block: blk}), true /*failed*/)
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
|
||||
}
|
||||
blk.StateRoot = stateRoot
|
||||
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// BuildBlockData for creating a new beacon block, so that this method can be shared across forks.
|
||||
func (vs *Server) BuildBlockData(ctx context.Context, req *ethpb.BlockRequest) (*BlockData, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.BuildBlockData")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
@@ -90,21 +153,18 @@ func (vs *Server) GetBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb
|
||||
return nil, status.Errorf(codes.Internal, "Could not get ETH1 data: %v", err)
|
||||
}
|
||||
|
||||
// Pack ETH1 deposits which have not been included in the beacon chain.
|
||||
// Pack ETH1 Deposits which have not been included in the beacon chain.
|
||||
deposits, err := vs.deposits(ctx, head, eth1Data)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get ETH1 deposits: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "Could not get ETH1 Deposits: %v", err)
|
||||
}
|
||||
|
||||
// Pack aggregated attestations which have not been included in the beacon chain.
|
||||
// Pack aggregated Attestations which have not been included in the beacon chain.
|
||||
atts, err := vs.packAttestations(ctx, head)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get attestations to pack into block: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "Could not get Attestations to pack into block: %v", err)
|
||||
}
|
||||
|
||||
// Use zero hash as stub for state root to compute later.
|
||||
stateRoot := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
graffiti := bytesutil.ToBytes32(req.Graffiti)
|
||||
|
||||
// Calculate new proposer index.
|
||||
@@ -113,38 +173,30 @@ func (vs *Server) GetBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb
|
||||
return nil, status.Errorf(codes.Internal, "Could not calculate proposer index %v", err)
|
||||
}
|
||||
|
||||
blk := ðpb.BeaconBlock{
|
||||
Slot: req.Slot,
|
||||
ParentRoot: parentRoot,
|
||||
StateRoot: stateRoot,
|
||||
ProposerIndex: idx,
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Eth1Data: eth1Data,
|
||||
Deposits: deposits,
|
||||
Attestations: atts,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
ProposerSlashings: vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/),
|
||||
AttesterSlashings: vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/),
|
||||
VoluntaryExits: vs.ExitPool.PendingExits(head, req.Slot, false /*noLimit*/),
|
||||
Graffiti: graffiti[:],
|
||||
},
|
||||
}
|
||||
|
||||
// Compute state root with the newly constructed block.
|
||||
stateRoot, err = vs.computeStateRoot(ctx, wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{Block: blk, Signature: make([]byte, 96)}))
|
||||
if err != nil {
|
||||
interop.WriteBlockToDisk(wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{Block: blk}), true /*failed*/)
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
|
||||
}
|
||||
blk.StateRoot = stateRoot
|
||||
|
||||
return blk, nil
|
||||
return &BlockData{
|
||||
ParentRoot: parentRoot,
|
||||
Graffiti: graffiti,
|
||||
ProposerIdx: idx,
|
||||
Eth1Data: eth1Data,
|
||||
Deposits: deposits,
|
||||
Attestations: atts,
|
||||
ProposerSlashings: vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/),
|
||||
AttesterSlashings: vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/),
|
||||
VoluntaryExits: vs.ExitPool.PendingExits(head, req.Slot, false /*noLimit*/),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ProposeBlock is called by a proposer during its assigned slot to create a block in an attempt
|
||||
// to get it processed by the beacon node as the canonical head.
|
||||
func (vs *Server) ProposeBlock(ctx context.Context, rBlk *ethpb.SignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
blk := wrapper.WrappedPhase0SignedBeaconBlock(rBlk)
|
||||
return vs.ProposeBlockGeneric(ctx, blk)
|
||||
}
|
||||
|
||||
// ProposeBlockGeneric performs the core post-block creation actions once a block proposal is received.
|
||||
func (vs *Server) ProposeBlockGeneric(ctx context.Context, blk block.SignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.proposeBlock")
|
||||
defer span.End()
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not tree hash block: %v", err)
|
||||
@@ -177,6 +229,137 @@ func (vs *Server) ProposeBlock(ctx context.Context, rBlk *ethpb.SignedBeaconBloc
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetBlockAltair is called by a proposer during its assigned slot to request a block to sign
|
||||
// by passing in the slot and the signed randao reveal of the slot. This is used by a validator
|
||||
// after the altair fork epoch has been encountered.
|
||||
func (vs *Server) GetBlockAltair(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.GetBlock")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(req.Slot)))
|
||||
|
||||
blkData, err := vs.BuildBlockData(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use zero hash as stub for state root to compute later.
|
||||
stateRoot := params.BeaconConfig().ZeroHash[:]
|
||||
|
||||
syncAggregate, err := vs.getSyncAggregate(ctx, req.Slot-1, bytesutil.ToBytes32(blkData.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blk := ðpb.BeaconBlockAltair{
|
||||
Slot: req.Slot,
|
||||
ParentRoot: blkData.ParentRoot,
|
||||
StateRoot: stateRoot,
|
||||
ProposerIndex: blkData.ProposerIdx,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
Eth1Data: blkData.Eth1Data,
|
||||
Deposits: blkData.Deposits,
|
||||
Attestations: blkData.Attestations,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
ProposerSlashings: blkData.ProposerSlashings,
|
||||
AttesterSlashings: blkData.AttesterSlashings,
|
||||
VoluntaryExits: blkData.VoluntaryExits,
|
||||
Graffiti: blkData.Graffiti[:],
|
||||
SyncAggregate: syncAggregate,
|
||||
},
|
||||
}
|
||||
// Compute state root with the newly constructed block.
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlockAltair{Block: blk, Signature: make([]byte, 96)},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateRoot, err = vs.ComputeStateRoot(
|
||||
ctx,
|
||||
wsb,
|
||||
)
|
||||
if err != nil {
|
||||
interop.WriteBlockToDisk(wsb, true /*failed*/)
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute state root: %v", err)
|
||||
}
|
||||
blk.StateRoot = stateRoot
|
||||
|
||||
return blk, nil
|
||||
}
|
||||
|
||||
// ProposeBlockAltair is called by a proposer during its assigned slot to create a block in an attempt
|
||||
// to get it processed by the beacon node as the canonical head.
|
||||
func (vs *Server) ProposeBlockAltair(ctx context.Context, rBlk *ethpb.SignedBeaconBlockAltair) (*ethpb.ProposeResponse, error) {
|
||||
blk, err := wrapper.WrappedAltairSignedBeaconBlock(rBlk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return vs.ProposeBlockGeneric(ctx, blk)
|
||||
}
|
||||
|
||||
// getSyncAggregate retrieves the sync contributions from the pool to construct the sync aggregate object.
|
||||
// The contributions are filtered based on matching of the input root and slot then profitability.
|
||||
func (vs *Server) getSyncAggregate(ctx context.Context, slot types.Slot, root [32]byte) (*ethpb.SyncAggregate, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.GetSyncAggregate")
|
||||
defer span.End()
|
||||
|
||||
// Contributions have to match the input root
|
||||
contributions, err := vs.SyncCommitteePool.SyncCommitteeContributions(slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerContributions := proposerSyncContributions(contributions).filterByBlockRoot(root)
|
||||
|
||||
// Each sync subcommittee is 128 bits and the sync committee is 512 bits(mainnet).
|
||||
bitsHolder := [][]byte{}
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
bitsHolder = append(bitsHolder, ethpb.NewSyncCommitteeAggregationBits())
|
||||
}
|
||||
sigsHolder := make([]bls.Signature, 0, params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
cs := proposerContributions.filterBySubIndex(i)
|
||||
aggregates, err := sync_contribution.Aggregate(cs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Retrieve the most profitable contribution
|
||||
deduped, err := proposerSyncContributions(aggregates).dedup()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := deduped.mostProfitable()
|
||||
if c == nil {
|
||||
continue
|
||||
}
|
||||
bitsHolder[i] = c.AggregationBits
|
||||
sig, err := bls.SignatureFromBytes(c.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigsHolder = append(sigsHolder, sig)
|
||||
}
|
||||
|
||||
// Aggregate all the contribution bits and signatures.
|
||||
var syncBits []byte
|
||||
for _, b := range bitsHolder {
|
||||
syncBits = append(syncBits, b...)
|
||||
}
|
||||
syncSig := bls.AggregateSignatures(sigsHolder)
|
||||
var syncSigBytes [96]byte
|
||||
if syncSig == nil {
|
||||
syncSigBytes = [96]byte{0xC0} // Infinity signature if itself is nil.
|
||||
} else {
|
||||
syncSigBytes = bytesutil2.ToBytes96(syncSig.Marshal())
|
||||
}
|
||||
|
||||
return ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: syncBits,
|
||||
SyncCommitteeSignature: syncSigBytes[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// eth1DataMajorityVote determines the appropriate eth1data for a block proposal using
|
||||
// an algorithm called Voting with the Majority. The algorithm works as follows:
|
||||
// - Determine the timestamp for the start slot for the eth1 voting period.
|
||||
@@ -253,6 +436,7 @@ func (vs *Server) slotStartTime(slot types.Slot) uint64 {
|
||||
return helpers.VotingPeriodStartTime(startTime, slot)
|
||||
}
|
||||
|
||||
// skipcq: SCC-U1000
|
||||
func (vs *Server) inRangeVotes(ctx context.Context,
|
||||
beaconState state.ReadOnlyBeaconState,
|
||||
firstValidBlockNumber, lastValidBlockNumber *big.Int) ([]eth1DataSingleVote, error) {
|
||||
@@ -280,6 +464,7 @@ func (vs *Server) inRangeVotes(ctx context.Context,
|
||||
return inRangeVotes, nil
|
||||
}
|
||||
|
||||
// skipcq: SCC-U1000
|
||||
func chosenEth1DataMajorityVote(votes []eth1DataSingleVote) eth1DataAggregatedVote {
|
||||
var voteCount []eth1DataAggregatedVote
|
||||
for _, singleVote := range votes {
|
||||
@@ -365,9 +550,9 @@ func (vs *Server) randomETH1DataVote(ctx context.Context) (*ethpb.Eth1Data, erro
|
||||
}, nil
|
||||
}
|
||||
|
||||
// computeStateRoot computes the state root after a block has been processed through a state transition and
|
||||
// ComputeStateRoot computes the state root after a block has been processed through a state transition and
|
||||
// returns it to the validator client.
|
||||
func (vs *Server) computeStateRoot(ctx context.Context, block block.SignedBeaconBlock) ([]byte, error) {
|
||||
func (vs *Server) ComputeStateRoot(ctx context.Context, block block.SignedBeaconBlock) ([]byte, error) {
|
||||
beaconState, err := vs.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(block.Block().ParentRoot()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve beacon state")
|
||||
@@ -395,14 +580,14 @@ func (vs *Server) deposits(
|
||||
beaconState state.BeaconState,
|
||||
currentVote *ethpb.Eth1Data,
|
||||
) ([]*ethpb.Deposit, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.deposits")
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.Deposits")
|
||||
defer span.End()
|
||||
|
||||
if vs.MockEth1Votes || !vs.Eth1InfoFetcher.IsConnectedToETH1() {
|
||||
return []*ethpb.Deposit{}, nil
|
||||
}
|
||||
// Need to fetch if the deposits up to the state's latest eth 1 data matches
|
||||
// the number of all deposits in this RPC call. If not, then we return nil.
|
||||
// Need to fetch if the Deposits up to the state's latest eth 1 data matches
|
||||
// the number of all Deposits in this RPC call. If not, then we return nil.
|
||||
canonicalEth1Data, canonicalEth1DataHeight, err := vs.canonicalEth1Data(ctx, beaconState, currentVote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -413,7 +598,7 @@ func (vs *Server) deposits(
|
||||
return []*ethpb.Deposit{}, nil
|
||||
}
|
||||
|
||||
// If there are no pending deposits, exit early.
|
||||
// If there are no pending Deposits, exit early.
|
||||
allPendingContainers := vs.PendingDepositsFetcher.PendingContainers(ctx, canonicalEth1DataHeight)
|
||||
if len(allPendingContainers) == 0 {
|
||||
return []*ethpb.Deposit{}, nil
|
||||
@@ -425,7 +610,7 @@ func (vs *Server) deposits(
|
||||
}
|
||||
|
||||
// Deposits need to be received in order of merkle index root, so this has to make sure
|
||||
// deposits are sorted from lowest to highest.
|
||||
// Deposits are sorted from lowest to highest.
|
||||
var pendingDeps []*dbpb.DepositContainer
|
||||
for _, dep := range allPendingContainers {
|
||||
if uint64(dep.Index) >= beaconState.Eth1DepositIndex() && uint64(dep.Index) < canonicalEth1Data.DepositCount {
|
||||
@@ -434,7 +619,7 @@ func (vs *Server) deposits(
|
||||
}
|
||||
|
||||
for i := range pendingDeps {
|
||||
// Don't construct merkle proof if the number of deposits is more than max allowed in block.
|
||||
// Don't construct merkle proof if the number of Deposits is more than max allowed in block.
|
||||
if uint64(i) == params.BeaconConfig().MaxDeposits {
|
||||
break
|
||||
}
|
||||
@@ -443,7 +628,7 @@ func (vs *Server) deposits(
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Limit the return of pending deposits to not be more than max deposits allowed in block.
|
||||
// Limit the return of pending Deposits to not be more than max Deposits allowed in block.
|
||||
var pendingDeposits []*ethpb.Deposit
|
||||
for i := uint64(0); i < uint64(len(pendingDeps)) && i < params.BeaconConfig().MaxDeposits; i++ {
|
||||
pendingDeposits = append(pendingDeposits, pendingDeps[i].Deposit)
|
||||
@@ -565,7 +750,7 @@ func (vs *Server) defaultEth1DataResponse(ctx context.Context, currentHeight *bi
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not fetch ETH1_FOLLOW_DISTANCE ancestor")
|
||||
}
|
||||
// Fetch all historical deposits up to an ancestor height.
|
||||
// Fetch all historical Deposits up to an ancestor height.
|
||||
depositsTillHeight, depositRoot := vs.DepositFetcher.DepositsNumberAndRootAtHeight(ctx, ancestorHeight)
|
||||
if depositsTillHeight == 0 {
|
||||
return vs.ChainStartFetcher.ChainStartEth1Data(), nil
|
||||
@@ -582,7 +767,7 @@ func (vs *Server) defaultEth1DataResponse(ctx context.Context, currentHeight *bi
|
||||
}, nil
|
||||
}
|
||||
|
||||
// This filters the input attestations to return a list of valid attestations to be packaged inside a beacon block.
|
||||
// This filters the input Attestations to return a list of valid Attestations to be packaged inside a beacon block.
|
||||
func (vs *Server) filterAttestationsForBlockInclusion(ctx context.Context, st state.BeaconState, atts []*ethpb.Attestation) ([]*ethpb.Attestation, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.filterAttestationsForBlockInclusion")
|
||||
defer span.End()
|
||||
@@ -602,7 +787,7 @@ func (vs *Server) filterAttestationsForBlockInclusion(ctx context.Context, st st
|
||||
return sorted.limitToMaxAttestations(), nil
|
||||
}
|
||||
|
||||
// The input attestations are processed and seen by the node, this deletes them from pool
|
||||
// The input Attestations are processed and seen by the node, this deletes them from pool
|
||||
// so proposers don't include them in a block for the future.
|
||||
func (vs *Server) deleteAttsInPool(ctx context.Context, atts []*ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.deleteAttsInPool")
|
||||
@@ -631,7 +816,7 @@ func constructMerkleProof(trie *trieutil.SparseMerkleTrie, index int, deposit *e
|
||||
return nil, errors.Wrapf(err, "could not generate merkle proof for deposit at index %d", index)
|
||||
}
|
||||
// For every deposit, we construct a Merkle proof using the powchain service's
|
||||
// in-memory deposits trie, which is updated only once the state's LatestETH1Data
|
||||
// in-memory Deposits trie, which is updated only once the state's LatestETH1Data
|
||||
// property changes during a state transition after a voting period.
|
||||
deposit.Proof = proof
|
||||
return deposit, nil
|
||||
@@ -644,19 +829,19 @@ func (vs *Server) packAttestations(ctx context.Context, latestState state.Beacon
|
||||
atts := vs.AttPool.AggregatedAttestations()
|
||||
atts, err := vs.filterAttestationsForBlockInclusion(ctx, latestState, atts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not filter attestations")
|
||||
return nil, errors.Wrap(err, "could not filter Attestations")
|
||||
}
|
||||
|
||||
// If there is any room left in the block, consider unaggregated attestations as well.
|
||||
// If there is any room left in the block, consider unaggregated Attestations as well.
|
||||
numAtts := uint64(len(atts))
|
||||
if numAtts < params.BeaconConfig().MaxAttestations {
|
||||
uAtts, err := vs.AttPool.UnaggregatedAttestations()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get unaggregated attestations")
|
||||
return nil, errors.Wrap(err, "could not get unaggregated Attestations")
|
||||
}
|
||||
uAtts, err = vs.filterAttestationsForBlockInclusion(ctx, latestState, uAtts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not filter attestations")
|
||||
return nil, errors.Wrap(err, "could not filter Attestations")
|
||||
}
|
||||
atts = append(atts, uAtts...)
|
||||
|
||||
|
||||
@@ -9,11 +9,13 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
@@ -22,7 +24,6 @@ import (
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
attaggregation "github.com/prysmaticlabs/prysm/shared/aggregation/attestations"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
@@ -110,7 +111,7 @@ func TestProposer_GetBlock_OK(t *testing.T) {
|
||||
assert.Equal(t, req.Slot, block.Slot, "Expected block to have slot of 1")
|
||||
assert.DeepEqual(t, parentRoot[:], block.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, block.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, block.Body.Graffiti, "Expected block to have correct graffiti")
|
||||
assert.DeepEqual(t, req.Graffiti, block.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(block.Body.ProposerSlashings)))
|
||||
assert.DeepEqual(t, proposerSlashings, block.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(block.Body.AttesterSlashings)))
|
||||
@@ -150,7 +151,7 @@ func TestProposer_GetBlock_AddsUnaggregatedAtts(t *testing.T) {
|
||||
StateGen: stategen.New(db),
|
||||
}
|
||||
|
||||
// Generate a bunch of random attestations at slot. These would be considered double votes, but
|
||||
// Generate a bunch of random Attestations at slot. These would be considered double votes, but
|
||||
// we don't care for the purpose of this test.
|
||||
var atts []*ethpb.Attestation
|
||||
for i := uint64(0); len(atts) < int(params.BeaconConfig().MaxAttestations); i++ {
|
||||
@@ -158,12 +159,12 @@ func TestProposer_GetBlock_AddsUnaggregatedAtts(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
atts = append(atts, a...)
|
||||
}
|
||||
// Max attestations minus one so we can almost fill the block and then include 1 unaggregated
|
||||
// Max Attestations minus one so we can almost fill the block and then include 1 unaggregated
|
||||
// att to maximize inclusion.
|
||||
atts = atts[:params.BeaconConfig().MaxAttestations-1]
|
||||
require.NoError(t, proposerServer.AttPool.SaveAggregatedAttestations(atts))
|
||||
|
||||
// Generate some more random attestations with a larger spread so that we can capture at least
|
||||
// Generate some more random Attestations with a larger spread so that we can capture at least
|
||||
// one unaggregated attestation.
|
||||
atts, err = testutil.GenerateAttestations(beaconState, privKeys, 300, 1, true)
|
||||
require.NoError(t, err)
|
||||
@@ -174,7 +175,7 @@ func TestProposer_GetBlock_AddsUnaggregatedAtts(t *testing.T) {
|
||||
require.NoError(t, proposerServer.AttPool.SaveUnaggregatedAttestation(a))
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "No unaggregated attestations were generated")
|
||||
require.Equal(t, true, found, "No unaggregated Attestations were generated")
|
||||
|
||||
randaoReveal, err := testutil.RandaoReveal(beaconState, 0, privKeys)
|
||||
assert.NoError(t, err)
|
||||
@@ -191,7 +192,7 @@ func TestProposer_GetBlock_AddsUnaggregatedAtts(t *testing.T) {
|
||||
assert.Equal(t, req.Slot, block.Slot, "Expected block to have slot of 1")
|
||||
assert.DeepEqual(t, parentRoot[:], block.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, block.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, block.Body.Graffiti, "Expected block to have correct graffiti")
|
||||
assert.DeepEqual(t, req.Graffiti, block.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttestations, uint64(len(block.Body.Attestations)), "Expected block atts to be aggregated down to 1")
|
||||
hasUnaggregatedAtt := false
|
||||
for _, a := range block.Body.Attestations {
|
||||
@@ -278,7 +279,7 @@ func TestProposer_ComputeStateRoot_OK(t *testing.T) {
|
||||
req.Signature, err = helpers.ComputeDomainAndSign(beaconState, currentEpoch, req.Block, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = proposerServer.computeStateRoot(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(req))
|
||||
_, err = proposerServer.ComputeStateRoot(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(req))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -333,7 +334,7 @@ func TestProposer_PendingDeposits_Eth1DataVoteOK(t *testing.T) {
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: blkRoot[:]},
|
||||
}
|
||||
|
||||
// It should also return the recent deposits after their follow window.
|
||||
// It should also return the recent Deposits after their follow window.
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
_, eth1Height, err := bs.canonicalEth1Data(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
@@ -373,7 +374,7 @@ func TestProposer_PendingDeposits_OutsideEth1FollowWindow(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: bytesutil.PadTo([]byte("0x0"), 32),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -466,14 +467,14 @@ func TestProposer_PendingDeposits_OutsideEth1FollowWindow(t *testing.T) {
|
||||
|
||||
deposits, err := bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected list of deposits")
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected list of Deposits")
|
||||
|
||||
// It should not return the recent deposits after their follow window.
|
||||
// as latest block number makes no difference in retrieval of deposits
|
||||
// It should not return the recent Deposits after their follow window.
|
||||
// as latest block number makes no difference in retrieval of Deposits
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
deposits, err = bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected number of pending deposits")
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected number of pending Deposits")
|
||||
}
|
||||
|
||||
func TestProposer_PendingDeposits_FollowsCorrectEth1Block(t *testing.T) {
|
||||
@@ -501,7 +502,7 @@ func TestProposer_PendingDeposits_FollowsCorrectEth1Block(t *testing.T) {
|
||||
votes = append(votes, vote)
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: []byte("0x0"),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -595,15 +596,15 @@ func TestProposer_PendingDeposits_FollowsCorrectEth1Block(t *testing.T) {
|
||||
|
||||
deposits, err := bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected list of deposits")
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected list of Deposits")
|
||||
|
||||
// It should also return the recent deposits after their follow window.
|
||||
// It should also return the recent Deposits after their follow window.
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
// we should get our pending deposits once this vote pushes the vote tally to include
|
||||
// we should get our pending Deposits once this vote pushes the vote tally to include
|
||||
// the updated eth1 data.
|
||||
deposits, err = bs.deposits(ctx, beaconState, vote)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(recentDeposits), len(deposits), "Received unexpected number of pending deposits")
|
||||
assert.Equal(t, len(recentDeposits), len(deposits), "Received unexpected number of pending Deposits")
|
||||
}
|
||||
|
||||
func TestProposer_PendingDeposits_CantReturnBelowStateEth1DepositIndex(t *testing.T) {
|
||||
@@ -692,13 +693,13 @@ func TestProposer_PendingDeposits_CantReturnBelowStateEth1DepositIndex(t *testin
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: blkRoot[:]},
|
||||
}
|
||||
|
||||
// It should also return the recent deposits after their follow window.
|
||||
// It should also return the recent Deposits after their follow window.
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
deposits, err := bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedDeposits := 6
|
||||
assert.Equal(t, expectedDeposits, len(deposits), "Received unexpected number of pending deposits")
|
||||
assert.Equal(t, expectedDeposits, len(deposits), "Received unexpected number of pending Deposits")
|
||||
}
|
||||
|
||||
func TestProposer_PendingDeposits_CantReturnMoreThanMax(t *testing.T) {
|
||||
@@ -712,7 +713,7 @@ func TestProposer_PendingDeposits_CantReturnMoreThanMax(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: bytesutil.PadTo([]byte("0x0"), 32),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -788,11 +789,11 @@ func TestProposer_PendingDeposits_CantReturnMoreThanMax(t *testing.T) {
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: blkRoot[:]},
|
||||
}
|
||||
|
||||
// It should also return the recent deposits after their follow window.
|
||||
// It should also return the recent Deposits after their follow window.
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
deposits, err := bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().MaxDeposits, uint64(len(deposits)), "Received unexpected number of pending deposits")
|
||||
assert.Equal(t, params.BeaconConfig().MaxDeposits, uint64(len(deposits)), "Received unexpected number of pending Deposits")
|
||||
}
|
||||
|
||||
func TestProposer_PendingDeposits_CantReturnMoreThanDepositCount(t *testing.T) {
|
||||
@@ -806,7 +807,7 @@ func TestProposer_PendingDeposits_CantReturnMoreThanDepositCount(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: bytesutil.PadTo([]byte("0x0"), 32),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -882,11 +883,11 @@ func TestProposer_PendingDeposits_CantReturnMoreThanDepositCount(t *testing.T) {
|
||||
PendingDepositsFetcher: depositCache,
|
||||
}
|
||||
|
||||
// It should also return the recent deposits after their follow window.
|
||||
// It should also return the recent Deposits after their follow window.
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
deposits, err := bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 3, len(deposits), "Received unexpected number of pending deposits")
|
||||
assert.Equal(t, 3, len(deposits), "Received unexpected number of pending Deposits")
|
||||
}
|
||||
|
||||
func TestProposer_DepositTrie_UtilizesCachedFinalizedDeposits(t *testing.T) {
|
||||
@@ -900,7 +901,7 @@ func TestProposer_DepositTrie_UtilizesCachedFinalizedDeposits(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: bytesutil.PadTo([]byte("0x0"), 32),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -1010,7 +1011,7 @@ func TestProposer_DepositTrie_RebuildTrie(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: bytesutil.PadTo([]byte("0x0"), 32),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -1291,7 +1292,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(52, earliestValidTime+2, []byte("second")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("first"), DepositCount: 1},
|
||||
@@ -1327,7 +1328,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(52, earliestValidTime+2, []byte("second")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("earliest"), DepositCount: 1},
|
||||
@@ -1363,7 +1364,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(51, earliestValidTime+1, []byte("first")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("first"), DepositCount: 1},
|
||||
@@ -1400,7 +1401,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(51, earliestValidTime+1, []byte("first")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("before_range"), DepositCount: 1},
|
||||
@@ -1437,7 +1438,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(100, latestValidTime, []byte("latest")).
|
||||
InsertBlock(101, latestValidTime+1, []byte("after_range"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("first"), DepositCount: 1},
|
||||
@@ -1474,7 +1475,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(52, earliestValidTime+2, []byte("second")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("unknown"), DepositCount: 1},
|
||||
@@ -1508,7 +1509,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(49, earliestValidTime-1, []byte("before_range")).
|
||||
InsertBlock(101, latestValidTime+1, []byte("after_range"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1540,7 +1541,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(52, earliestValidTime+2, []byte("second")).
|
||||
InsertBlock(101, latestValidTime+1, []byte("after_range"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("before_range"), DepositCount: 1},
|
||||
@@ -1574,7 +1575,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(50, earliestValidTime, []byte("earliest")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{}})
|
||||
require.NoError(t, err)
|
||||
@@ -1599,12 +1600,12 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
assert.DeepEqual(t, expectedHash, hash)
|
||||
})
|
||||
|
||||
t.Run("no votes and more recent block has less deposits - choose current eth1data", func(t *testing.T) {
|
||||
t.Run("no votes and more recent block has less Deposits - choose current eth1data", func(t *testing.T) {
|
||||
p := mockPOW.NewPOWChain().
|
||||
InsertBlock(50, earliestValidTime, []byte("earliest")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -1638,7 +1639,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(52, earliestValidTime+2, []byte("second")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("first"), DepositCount: 1},
|
||||
@@ -1666,7 +1667,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
assert.DeepEqual(t, expectedHash, hash)
|
||||
})
|
||||
|
||||
t.Run("highest count on block with less deposits - choose another block", func(t *testing.T) {
|
||||
t.Run("highest count on block with less Deposits - choose another block", func(t *testing.T) {
|
||||
t.Skip()
|
||||
p := mockPOW.NewPOWChain().
|
||||
InsertBlock(50, earliestValidTime, []byte("earliest")).
|
||||
@@ -1674,7 +1675,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
InsertBlock(52, earliestValidTime+2, []byte("second")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("no_new_deposits"), DepositCount: 0},
|
||||
@@ -1707,7 +1708,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
t.Skip()
|
||||
p := mockPOW.NewPOWChain().InsertBlock(50, earliestValidTime, []byte("earliest"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("earliest"), DepositCount: 1},
|
||||
@@ -1741,7 +1742,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
// because of earliest block increment in the algorithm.
|
||||
InsertBlock(50, earliestValidTime+1, []byte("first"))
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("before_range"), DepositCount: 1},
|
||||
@@ -1769,7 +1770,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
assert.DeepEqual(t, expectedHash, hash)
|
||||
})
|
||||
|
||||
t.Run("no deposits - choose chain start eth1data", func(t *testing.T) {
|
||||
t.Run("no Deposits - choose chain start eth1data", func(t *testing.T) {
|
||||
p := mockPOW.NewPOWChain().
|
||||
InsertBlock(50, earliestValidTime, []byte("earliest")).
|
||||
InsertBlock(100, latestValidTime, []byte("latest"))
|
||||
@@ -1780,7 +1781,7 @@ func TestProposer_Eth1Data_MajorityVote(t *testing.T) {
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: slot,
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{
|
||||
{BlockHash: []byte("earliest"), DepositCount: 1},
|
||||
@@ -1828,7 +1829,7 @@ func TestProposer_FilterAttestation(t *testing.T) {
|
||||
expectedAtts func(inputAtts []*ethpb.Attestation) []*ethpb.Attestation
|
||||
}{
|
||||
{
|
||||
name: "nil attestations",
|
||||
name: "nil Attestations",
|
||||
inputAtts: func() []*ethpb.Attestation {
|
||||
return nil
|
||||
},
|
||||
@@ -1837,7 +1838,7 @@ func TestProposer_FilterAttestation(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid attestations",
|
||||
name: "invalid Attestations",
|
||||
inputAtts: func() []*ethpb.Attestation {
|
||||
atts := make([]*ethpb.Attestation, 10)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
@@ -1923,7 +1924,7 @@ func TestProposer_Deposits_ReturnsEmptyList_IfLatestEth1DataEqGenesisEth1Block(t
|
||||
GenesisEth1Block: height,
|
||||
}
|
||||
|
||||
beaconState, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
BlockHash: bytesutil.PadTo([]byte("0x0"), 32),
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -1999,11 +2000,11 @@ func TestProposer_Deposits_ReturnsEmptyList_IfLatestEth1DataEqGenesisEth1Block(t
|
||||
PendingDepositsFetcher: depositCache,
|
||||
}
|
||||
|
||||
// It should also return the recent deposits after their follow window.
|
||||
// It should also return the recent Deposits after their follow window.
|
||||
p.LatestBlockNumber = big.NewInt(0).Add(p.LatestBlockNumber, big.NewInt(10000))
|
||||
deposits, err := bs.deposits(ctx, beaconState, ðpb.Eth1Data{})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected number of pending deposits")
|
||||
assert.Equal(t, 0, len(deposits), "Received unexpected number of pending Deposits")
|
||||
}
|
||||
|
||||
func TestProposer_DeleteAttsInPool_Aggregated(t *testing.T) {
|
||||
@@ -2032,6 +2033,200 @@ func TestProposer_DeleteAttsInPool_Aggregated(t *testing.T) {
|
||||
assert.Equal(t, 0, len(atts), "Did not delete unaggregated attestation")
|
||||
}
|
||||
|
||||
func TestProposer_ProposeBlockAltair_OK(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
|
||||
genesis := testutil.NewBeaconBlockAltair()
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(
|
||||
t,
|
||||
db.SaveBlock(
|
||||
ctx,
|
||||
wsb,
|
||||
),
|
||||
"Could not save genesis block",
|
||||
)
|
||||
|
||||
numDeposits := uint64(64)
|
||||
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, numDeposits)
|
||||
bsRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, genesisRoot), "Could not save genesis state")
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
proposerServer := &Server{
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
BlockReceiver: c,
|
||||
HeadFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
}
|
||||
req := testutil.NewBeaconBlockAltair()
|
||||
req.Block.Slot = 5
|
||||
req.Block.ParentRoot = bsRoot[:]
|
||||
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(req)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
_, err = proposerServer.ProposeBlockAltair(context.Background(), req)
|
||||
assert.NoError(t, err, "Could not propose block correctly")
|
||||
}
|
||||
|
||||
func TestProposer_GetBlockAltair_OK(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
|
||||
|
||||
stateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
genAltair := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Slot: genesis.Block.Slot,
|
||||
ParentRoot: genesis.Block.ParentRoot,
|
||||
StateRoot: genesis.Block.StateRoot,
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
RandaoReveal: genesis.Block.Body.RandaoReveal,
|
||||
Graffiti: genesis.Block.Body.Graffiti,
|
||||
Eth1Data: genesis.Block.Body.Eth1Data,
|
||||
SyncAggregate: ðpb.SyncAggregate{SyncCommitteeBits: bitfield.NewBitvector512(), SyncCommitteeSignature: make([]byte, 96)},
|
||||
},
|
||||
},
|
||||
Signature: genesis.Signature,
|
||||
}
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(genAltair)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genAltair.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
proposerServer := &Server{
|
||||
HeadFetcher: &mock.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mock.ChainService{},
|
||||
ChainStartFetcher: &mockPOW.POWChain{},
|
||||
Eth1InfoFetcher: &mockPOW.POWChain{},
|
||||
Eth1BlockFetcher: &mockPOW.POWChain{},
|
||||
MockEth1Votes: true,
|
||||
AttPool: attestations.NewPool(),
|
||||
SlashingsPool: slashings.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateGen: stategen.New(db),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
}
|
||||
|
||||
randaoReveal, err := testutil.RandaoReveal(beaconState, 0, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
graffiti := bytesutil.ToBytes32([]byte("eth2"))
|
||||
req := ðpb.BlockRequest{
|
||||
Slot: 1,
|
||||
RandaoReveal: randaoReveal,
|
||||
Graffiti: graffiti[:],
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpb.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxProposerSlashings; i++ {
|
||||
proposerSlashing, err := testutil.GenerateProposerSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i],
|
||||
i, /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
proposerSlashings[i] = proposerSlashing
|
||||
err = proposerServer.SlashingsPool.InsertProposerSlashing(context.Background(), beaconState, proposerSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
attSlashings := make([]*ethpb.AttesterSlashing, params.BeaconConfig().MaxAttesterSlashings)
|
||||
for i := uint64(0); i < params.BeaconConfig().MaxAttesterSlashings; i++ {
|
||||
attesterSlashing, err := testutil.GenerateAttesterSlashingForValidator(
|
||||
beaconState,
|
||||
privKeys[i+params.BeaconConfig().MaxProposerSlashings],
|
||||
types.ValidatorIndex(i+params.BeaconConfig().MaxProposerSlashings), /* validator index */
|
||||
)
|
||||
require.NoError(t, err)
|
||||
attSlashings[i] = attesterSlashing
|
||||
err = proposerServer.SlashingsPool.InsertAttesterSlashing(context.Background(), beaconState, attesterSlashing)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
block, err := proposerServer.GetBlockAltair(ctx, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, req.Slot, block.Slot, "Expected block to have slot of 1")
|
||||
assert.DeepEqual(t, parentRoot[:], block.ParentRoot, "Expected block to have correct parent root")
|
||||
assert.DeepEqual(t, randaoReveal, block.Body.RandaoReveal, "Expected block to have correct randao reveal")
|
||||
assert.DeepEqual(t, req.Graffiti, block.Body.Graffiti, "Expected block to have correct Graffiti")
|
||||
assert.Equal(t, params.BeaconConfig().MaxProposerSlashings, uint64(len(block.Body.ProposerSlashings)))
|
||||
assert.DeepEqual(t, proposerSlashings, block.Body.ProposerSlashings)
|
||||
assert.Equal(t, params.BeaconConfig().MaxAttesterSlashings, uint64(len(block.Body.AttesterSlashings)))
|
||||
assert.DeepEqual(t, attSlashings, block.Body.AttesterSlashings)
|
||||
}
|
||||
|
||||
func TestProposer_GetSyncAggregate_OK(t *testing.T) {
|
||||
proposerServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
}
|
||||
|
||||
r := params.BeaconConfig().ZeroHash
|
||||
conts := []*ethpb.SyncCommitteeContribution{
|
||||
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
|
||||
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
{Slot: 2, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
|
||||
}
|
||||
|
||||
for _, cont := range conts {
|
||||
require.NoError(t, proposerServer.SyncCommitteePool.SaveSyncCommitteeContribution(cont))
|
||||
}
|
||||
|
||||
aggregate, err := proposerServer.getSyncAggregate(context.Background(), 1, bytesutil.ToBytes32(conts[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bitfield.Bitvector512{0xf, 0xf, 0xf, 0xf}, aggregate.SyncCommitteeBits)
|
||||
|
||||
aggregate, err = proposerServer.getSyncAggregate(context.Background(), 2, bytesutil.ToBytes32(conts[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bitfield.Bitvector512{0xaa, 0xaa, 0xaa, 0xaa}, aggregate.SyncCommitteeBits)
|
||||
|
||||
aggregate, err = proposerServer.getSyncAggregate(context.Background(), 3, bytesutil.ToBytes32(conts[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bitfield.NewBitvector512(), aggregate.SyncCommitteeBits)
|
||||
}
|
||||
|
||||
func majorityVoteBoundaryTime(slot types.Slot) (uint64, uint64) {
|
||||
slots := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod))
|
||||
slotStartTime := uint64(mockPOW.GenesisTime) + uint64((slot - (slot % (slots))).Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
@@ -19,14 +18,15 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/sync"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2putils"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -44,7 +44,7 @@ type Server struct {
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
CanonicalStateChan chan *statepb.BeaconState
|
||||
CanonicalStateChan chan *ethpb.BeaconState
|
||||
BlockFetcher powchain.POWBlockFetcher
|
||||
DepositFetcher depositcache.DepositFetcher
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
@@ -56,6 +56,7 @@ type Server struct {
|
||||
AttPool attestations.Pool
|
||||
SlashingsPool slashings.PoolManager
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher powchain.POWBlockFetcher
|
||||
@@ -64,38 +65,6 @@ type Server struct {
|
||||
StateGen stategen.StateManager
|
||||
}
|
||||
|
||||
func (vs *Server) GetBlockAltair(ctx context.Context, request *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) ProposeBlockAltair(ctx context.Context, altair *ethpb.SignedBeaconBlockAltair) (*ethpb.ProposeResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) GetSyncMessageBlockRoot(ctx context.Context, empty *empty.Empty) (*ethpb.SyncMessageBlockRootResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) SubmitSyncMessage(ctx context.Context, message *ethpb.SyncCommitteeMessage) (*empty.Empty, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) GetSyncSubcommitteeIndex(ctx context.Context, request *ethpb.SyncSubcommitteeIndexRequest) (*ethpb.SyncSubcommitteeIndexResponse, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) GetSyncCommitteeContribution(ctx context.Context, request *ethpb.SyncCommitteeContributionRequest) (*ethpb.SyncCommitteeContribution, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) SubmitSignedContributionAndProof(ctx context.Context, proof *ethpb.SignedContributionAndProof) (*empty.Empty, error) {
|
||||
return nil, status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
func (vs *Server) StreamBlocksAltair(request *ethpb.StreamBlocksRequest, server ethpb.BeaconNodeValidator_StreamBlocksAltairServer) error {
|
||||
return status.Error(codes.Unimplemented, "Unimplemented")
|
||||
}
|
||||
|
||||
// WaitForActivation checks if a validator public key exists in the active validator registry of the current
|
||||
// beacon state, if not, then it creates a stream which listens for canonical states which contain
|
||||
// the validator with the public key as an active validator record.
|
||||
@@ -155,7 +124,10 @@ func (vs *Server) ValidatorIndex(ctx context.Context, req *ethpb.ValidatorIndexR
|
||||
|
||||
// DomainData fetches the current domain version information from the beacon state.
|
||||
func (vs *Server) DomainData(_ context.Context, request *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
|
||||
fork := vs.ForkFetcher.CurrentFork()
|
||||
fork, err := p2putils.Fork(request.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headGenesisValidatorRoot := vs.HeadFetcher.HeadGenesisValidatorRoot()
|
||||
dv, err := helpers.Domain(fork, request.Epoch, bytesutil.ToBytes4(request.Domain), headGenesisValidatorRoot[:])
|
||||
if err != nil {
|
||||
|
||||
186
beacon-chain/rpc/prysm/v1alpha1/validator/sync_committee.go
Normal file
186
beacon-chain/rpc/prysm/v1alpha1/validator/sync_committee.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// GetSyncMessageBlockRoot retrieves the sync committee block root of the beacon chain.
|
||||
func (vs *Server) GetSyncMessageBlockRoot(
|
||||
ctx context.Context, _ *emptypb.Empty,
|
||||
) (*ethpb.SyncMessageBlockRootResponse, error) {
|
||||
r, err := vs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not retrieve head root: %v", err)
|
||||
}
|
||||
|
||||
return ðpb.SyncMessageBlockRootResponse{
|
||||
Root: r,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SubmitSyncMessage submits the sync committee message to the network.
|
||||
// It also saves the sync committee message into the pending pool for block inclusion.
|
||||
func (vs *Server) SubmitSyncMessage(ctx context.Context, msg *ethpb.SyncCommitteeMessage) (*emptypb.Empty, error) {
|
||||
errs, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
idxResp, err := vs.syncSubcommitteeIndex(ctx, msg.ValidatorIndex, msg.Slot)
|
||||
if err != nil {
|
||||
return &emptypb.Empty{}, err
|
||||
}
|
||||
// Broadcasting and saving message into the pool in parallel. As one fail should not affect another.
|
||||
// This broadcasts for all subnets.
|
||||
for _, id := range idxResp.Indices {
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
subnet := uint64(id) / subCommitteeSize
|
||||
errs.Go(func() error {
|
||||
return vs.P2P.BroadcastSyncCommitteeMessage(ctx, subnet, msg)
|
||||
})
|
||||
}
|
||||
|
||||
if err := vs.SyncCommitteePool.SaveSyncCommitteeMessage(msg); err != nil {
|
||||
return &emptypb.Empty{}, err
|
||||
}
|
||||
|
||||
// Wait for p2p broadcast to complete and return the first error (if any)
|
||||
err = errs.Wait()
|
||||
return &emptypb.Empty{}, err
|
||||
}
|
||||
|
||||
// GetSyncSubcommitteeIndex is called by a sync committee participant to get
|
||||
// its subcommittee index for sync message aggregation duty.
|
||||
func (vs *Server) GetSyncSubcommitteeIndex(
|
||||
ctx context.Context, req *ethpb.SyncSubcommitteeIndexRequest,
|
||||
) (*ethpb.SyncSubcommitteeIndexResponse, error) {
|
||||
index, exists := vs.HeadFetcher.HeadPublicKeyToValidatorIndex(ctx, bytesutil.ToBytes48(req.PublicKey))
|
||||
if !exists {
|
||||
return nil, errors.New("public key does not exist in state")
|
||||
}
|
||||
indices, err := vs.syncSubcommitteeIndex(ctx, index, req.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get sync subcommittee index: %v", err)
|
||||
}
|
||||
return indices, nil
|
||||
}
|
||||
|
||||
// syncSubcommitteeIndex returns a list of subcommittee index of a validator and slot for sync message aggregation duty.
|
||||
func (vs *Server) syncSubcommitteeIndex(
|
||||
ctx context.Context, index types.ValidatorIndex, slot types.Slot,
|
||||
) (*ethpb.SyncSubcommitteeIndexResponse, error) {
|
||||
|
||||
nextSlotEpoch := helpers.SlotToEpoch(slot + 1)
|
||||
currentEpoch := helpers.SlotToEpoch(slot)
|
||||
|
||||
switch {
|
||||
case helpers.SyncCommitteePeriod(nextSlotEpoch) == helpers.SyncCommitteePeriod(currentEpoch):
|
||||
indices, err := vs.HeadFetcher.HeadCurrentSyncCommitteeIndices(ctx, index, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.SyncSubcommitteeIndexResponse{
|
||||
Indices: indices,
|
||||
}, nil
|
||||
// At sync committee period boundary, validator should sample the next epoch sync committee.
|
||||
case helpers.SyncCommitteePeriod(nextSlotEpoch) == helpers.SyncCommitteePeriod(currentEpoch)+1:
|
||||
indices, err := vs.HeadFetcher.HeadNextSyncCommitteeIndices(ctx, index, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.SyncSubcommitteeIndexResponse{
|
||||
Indices: indices,
|
||||
}, nil
|
||||
default:
|
||||
// Impossible condition.
|
||||
return nil, errors.New("could get calculate sync subcommittee based on the period")
|
||||
}
|
||||
}
|
||||
|
||||
// GetSyncCommitteeContribution is called by a sync committee aggregator
|
||||
// to retrieve sync committee contribution object.
|
||||
func (vs *Server) GetSyncCommitteeContribution(
|
||||
ctx context.Context, req *ethpb.SyncCommitteeContributionRequest,
|
||||
) (*ethpb.SyncCommitteeContribution, error) {
|
||||
msgs, err := vs.SyncCommitteePool.SyncCommitteeMessages(req.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get sync subcommittee messages: %v", err)
|
||||
}
|
||||
headRoot, err := vs.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get head root: %v", err)
|
||||
}
|
||||
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
sigs := make([]bls.Signature, 0, subCommitteeSize)
|
||||
bits := ethpb.NewSyncCommitteeAggregationBits()
|
||||
for _, msg := range msgs {
|
||||
if bytes.Equal(headRoot, msg.BlockRoot) {
|
||||
idxResp, err := vs.syncSubcommitteeIndex(ctx, msg.ValidatorIndex, req.Slot)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not get sync subcommittee index: %v", err)
|
||||
}
|
||||
for _, index := range idxResp.Indices {
|
||||
i := uint64(index)
|
||||
subnetIndex := i / subCommitteeSize
|
||||
if subnetIndex == req.SubnetId {
|
||||
bits.SetBitAt(i%subCommitteeSize, true)
|
||||
sig, err := bls.SignatureFromBytes(msg.Signature)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(
|
||||
codes.Internal,
|
||||
"Could not get bls signature from bytes: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
sigs = append(sigs, sig)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
aggregatedSig := make([]byte, 96)
|
||||
aggregatedSig[0] = 0xC0
|
||||
if len(sigs) != 0 {
|
||||
aggregatedSig = bls.AggregateSignatures(sigs).Marshal()
|
||||
}
|
||||
contribution := ðpb.SyncCommitteeContribution{
|
||||
Slot: req.Slot,
|
||||
BlockRoot: headRoot,
|
||||
SubcommitteeIndex: req.SubnetId,
|
||||
AggregationBits: bits,
|
||||
Signature: aggregatedSig,
|
||||
}
|
||||
|
||||
return contribution, nil
|
||||
}
|
||||
|
||||
// SubmitSignedContributionAndProof is called by a sync committee aggregator
|
||||
// to submit signed contribution and proof object.
|
||||
func (vs *Server) SubmitSignedContributionAndProof(
|
||||
ctx context.Context, s *ethpb.SignedContributionAndProof,
|
||||
) (*emptypb.Empty, error) {
|
||||
errs, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
// Broadcasting and saving contribution into the pool in parallel. As one fail should not affect another.
|
||||
errs.Go(func() error {
|
||||
return vs.P2P.Broadcast(ctx, s)
|
||||
})
|
||||
|
||||
if err := vs.SyncCommitteePool.SaveSyncCommitteeContribution(s.Message.Contribution); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wait for p2p broadcast to complete and return the first error (if any)
|
||||
err := errs.Wait()
|
||||
return &emptypb.Empty{}, err
|
||||
}
|
||||
@@ -0,0 +1,96 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
func TestGetSyncMessageBlockRoot_OK(t *testing.T) {
|
||||
r := []byte{'a'}
|
||||
server := &Server{
|
||||
HeadFetcher: &mock.ChainService{Root: r},
|
||||
}
|
||||
res, err := server.GetSyncMessageBlockRoot(context.Background(), &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, r, res.Root)
|
||||
}
|
||||
|
||||
func TestSubmitSyncMessage_OK(t *testing.T) {
|
||||
st, _ := testutil.DeterministicGenesisStateAltair(t, 10)
|
||||
server := &Server{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
HeadFetcher: &mock.ChainService{
|
||||
State: st,
|
||||
},
|
||||
}
|
||||
msg := ðpb.SyncCommitteeMessage{
|
||||
Slot: 1,
|
||||
ValidatorIndex: 2,
|
||||
}
|
||||
_, err := server.SubmitSyncMessage(context.Background(), msg)
|
||||
require.NoError(t, err)
|
||||
savedMsgs, err := server.SyncCommitteePool.SyncCommitteeMessages(1)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []*ethpb.SyncCommitteeMessage{msg}, savedMsgs)
|
||||
}
|
||||
|
||||
func TestGetSyncSubcommitteeIndex_Ok(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
state.SkipSlotCache.Disable()
|
||||
defer state.SkipSlotCache.Enable()
|
||||
|
||||
server := &Server{
|
||||
HeadFetcher: &mock.ChainService{
|
||||
CurrentSyncCommitteeIndices: []types.CommitteeIndex{0},
|
||||
NextSyncCommitteeIndices: []types.CommitteeIndex{1},
|
||||
},
|
||||
}
|
||||
pubKey := [48]byte{}
|
||||
// Request slot 0, should get the index 0 for validator 0.
|
||||
res, err := server.GetSyncSubcommitteeIndex(context.Background(), ðpb.SyncSubcommitteeIndexRequest{
|
||||
PublicKey: pubKey[:], Slot: types.Slot(0),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []types.CommitteeIndex{0}, res.Indices)
|
||||
|
||||
// Request at period boundary, should get index 1 for validator 0.
|
||||
periodBoundary := types.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch - 1
|
||||
res, err = server.GetSyncSubcommitteeIndex(context.Background(), ðpb.SyncSubcommitteeIndexRequest{
|
||||
PublicKey: pubKey[:], Slot: periodBoundary,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []types.CommitteeIndex{1}, res.Indices)
|
||||
}
|
||||
|
||||
func TestSubmitSignedContributionAndProof_OK(t *testing.T) {
|
||||
server := &Server{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
}
|
||||
contribution := ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 2,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := server.SubmitSignedContributionAndProof(context.Background(), contribution)
|
||||
require.NoError(t, err)
|
||||
savedMsgs, err := server.SyncCommitteePool.SyncCommitteeContributions(1)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, []*ethpb.SyncCommitteeContribution{contribution.Message.Contribution}, savedMsgs)
|
||||
}
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
@@ -92,6 +93,7 @@ type Config struct {
|
||||
AttestationsPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingsPool slashings.PoolManager
|
||||
SyncCommitteeObjectPool synccommittee.Pool
|
||||
SyncService chainSync.Checker
|
||||
Broadcaster p2p.Broadcaster
|
||||
PeersFetcher p2p.PeersProvider
|
||||
@@ -189,7 +191,9 @@ func (s *Service) Start() {
|
||||
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
|
||||
SlashingsPool: s.cfg.SlashingsPool,
|
||||
StateGen: s.cfg.StateGen,
|
||||
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
|
||||
}
|
||||
|
||||
validatorServerV1 := &validator.Server{
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
@@ -285,6 +289,7 @@ func (s *Service) Start() {
|
||||
})
|
||||
if s.cfg.EnableDebugRPCEndpoints {
|
||||
log.Info("Enabled debug gRPC endpoints")
|
||||
|
||||
debugServer := &debugv1alpha1.Server{
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
chainMock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -25,7 +25,7 @@ func TestGetState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
headSlot := types.Slot(123)
|
||||
fillSlot := func(state *eth.BeaconState) error {
|
||||
fillSlot := func(state *ethpb.BeaconState) error {
|
||||
state.Slot = headSlot
|
||||
return nil
|
||||
}
|
||||
@@ -59,7 +59,7 @@ func TestGetState(t *testing.T) {
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
state, err := testutil.NewBeaconState(func(state *eth.BeaconState) error {
|
||||
state, err := testutil.NewBeaconState(func(state *ethpb.BeaconState) error {
|
||||
state.BlockRoots[0] = r[:]
|
||||
return nil
|
||||
})
|
||||
@@ -67,7 +67,7 @@ func TestGetState(t *testing.T) {
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ð.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, r))
|
||||
require.NoError(t, db.SaveState(ctx, state, r))
|
||||
|
||||
@@ -88,7 +88,7 @@ func TestGetState(t *testing.T) {
|
||||
|
||||
p := StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{
|
||||
FinalizedCheckPoint: ð.Checkpoint{
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: stateRoot[:],
|
||||
},
|
||||
},
|
||||
@@ -108,7 +108,7 @@ func TestGetState(t *testing.T) {
|
||||
|
||||
p := StateProvider{
|
||||
ChainInfoFetcher: &chainMock.ChainService{
|
||||
CurrentJustifiedCheckPoint: ð.Checkpoint{
|
||||
CurrentJustifiedCheckPoint: ðpb.Checkpoint{
|
||||
Root: stateRoot[:],
|
||||
},
|
||||
},
|
||||
@@ -187,7 +187,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
headSlot := types.Slot(123)
|
||||
fillSlot := func(state *eth.BeaconState) error {
|
||||
fillSlot := func(state *ethpb.BeaconState) error {
|
||||
state.Slot = headSlot
|
||||
return nil
|
||||
}
|
||||
@@ -218,13 +218,13 @@ func TestGetStateRoot(t *testing.T) {
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
state, err := testutil.NewBeaconState(func(state *eth.BeaconState) error {
|
||||
state, err := testutil.NewBeaconState(func(state *ethpb.BeaconState) error {
|
||||
state.BlockRoots[0] = r[:]
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ð.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, r))
|
||||
require.NoError(t, db.SaveState(ctx, state, r))
|
||||
|
||||
@@ -248,7 +248,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
blk.Block.Slot = 40
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ð.Checkpoint{
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 5,
|
||||
Root: root[:],
|
||||
}
|
||||
@@ -279,7 +279,7 @@ func TestGetStateRoot(t *testing.T) {
|
||||
blk.Block.Slot = 40
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ð.Checkpoint{
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 5,
|
||||
Root: root[:],
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ var (
|
||||
host = flag.String("host", "127.0.0.1", "Host to serve on")
|
||||
debug = flag.Bool("debug", false, "Enable debug logging")
|
||||
allowedOrigins = flag.String("corsdomain", "localhost:4242", "A comma separated list of CORS domains to allow")
|
||||
enableDebugRPCEndpoints = flag.Bool("enable-debug-rpc-endpoints", false, "Enable debug rpc endpoints such as /eth/v1alpha1/beacon/state")
|
||||
enableDebugRPCEndpoints = flag.Bool("enable-debug-rpc-endpoints", false, "Enable debug rpc endpoints such as /prysm/v1alpha1/beacon/state")
|
||||
grpcMaxMsgSize = flag.Int("grpc-max-msg-size", 1<<22, "Integer to define max recieve message call size")
|
||||
)
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
"//shared/benchutil:__pkg__",
|
||||
"//shared/depositutil:__subpackages__",
|
||||
"//shared/testutil:__pkg__",
|
||||
"//shared/testutil/altair:__pkg__",
|
||||
"//slasher/rpc:__subpackages__",
|
||||
"//spectest:__subpackages__",
|
||||
"//tools/benchmark-files-gen:__pkg__",
|
||||
|
||||
@@ -21,6 +21,7 @@ go_library(
|
||||
"//fuzz:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -30,6 +31,7 @@ go_library(
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
@@ -68,6 +70,7 @@ go_test(
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
)
|
||||
|
||||
@@ -86,7 +86,7 @@ func (m *MockStateManager) StateBySlot(ctx context.Context, slot types.Slot) (st
|
||||
func (m *MockStateManager) RecoverStateSummary(
|
||||
ctx context.Context,
|
||||
blockRoot [32]byte,
|
||||
) (*statepb.StateSummary, error) {
|
||||
) (*ethpb.StateSummary, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
||||
@@ -6,11 +6,15 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
transition "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -23,7 +27,6 @@ func (s *State) ReplayBlocks(
|
||||
) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "stateGen.ReplayBlocks")
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
log.Debugf("Replaying state from slot %d till slot %d", state.Slot(), targetSlot)
|
||||
// The input block list is sorted in decreasing slots order.
|
||||
@@ -147,6 +150,16 @@ func executeStateTransitionStateGen(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
if signed.Version() == version.Altair {
|
||||
sa, err := signed.Block().Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = altair.ProcessSyncAggregate(state, sa)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
@@ -177,14 +190,30 @@ func processSlotsStateGen(ctx context.Context, state state.BeaconState, slot typ
|
||||
return nil, errors.Wrap(err, "could not process slot")
|
||||
}
|
||||
if transition.CanProcessEpoch(state) {
|
||||
state, err = transition.ProcessEpochPrecompute(ctx, state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
||||
switch state.Version() {
|
||||
case version.Phase0:
|
||||
state, err = transition.ProcessEpochPrecompute(ctx, state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimizations")
|
||||
}
|
||||
case version.Altair:
|
||||
state, err = altair.ProcessEpoch(ctx, state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process epoch with optimization")
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("beacon state should have a version")
|
||||
}
|
||||
}
|
||||
if err := state.SetSlot(state.Slot() + 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if helpers.IsEpochStart(state.Slot()) && helpers.SlotToEpoch(state.Slot()) == params.BeaconConfig().AltairForkEpoch {
|
||||
state, err = altair.UpgradeToAltair(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
@@ -17,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestReplayBlocks_AllSkipSlots(t *testing.T) {
|
||||
copy(mockRoot[:], "hello-world")
|
||||
cp.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&statepb.PendingAttestation{}))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{}))
|
||||
|
||||
service := New(beaconDB)
|
||||
targetSlot := params.BeaconConfig().SlotsPerEpoch - 1
|
||||
@@ -69,7 +69,7 @@ func TestReplayBlocks_SameSlot(t *testing.T) {
|
||||
copy(mockRoot[:], "hello-world")
|
||||
cp.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&statepb.PendingAttestation{}))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{}))
|
||||
|
||||
service := New(beaconDB)
|
||||
targetSlot := beaconState.Slot()
|
||||
@@ -99,7 +99,7 @@ func TestReplayBlocks_LowerSlotBlock(t *testing.T) {
|
||||
copy(mockRoot[:], "hello-world")
|
||||
cp.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cp))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&statepb.PendingAttestation{}))
|
||||
require.NoError(t, beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{}))
|
||||
|
||||
service := New(beaconDB)
|
||||
targetSlot := beaconState.Slot()
|
||||
@@ -110,6 +110,34 @@ func TestReplayBlocks_LowerSlotBlock(t *testing.T) {
|
||||
assert.Equal(t, targetSlot, newState.Slot(), "Did not advance slots")
|
||||
}
|
||||
|
||||
func TestReplayBlocks_ThroughForkBoundary(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bCfg := params.BeaconConfig()
|
||||
bCfg.AltairForkEpoch = 1
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = 1
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
genesisBlock := blocks.NewGenesisBlock([]byte{})
|
||||
bodyRoot, err := genesisBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = beaconState.SetLatestBlockHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: genesisBlock.Block.Slot,
|
||||
ParentRoot: genesisBlock.Block.ParentRoot,
|
||||
StateRoot: params.BeaconConfig().ZeroHash[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
service := New(testDB.SetupDB(t))
|
||||
targetSlot := params.BeaconConfig().SlotsPerEpoch
|
||||
newState, err := service.ReplayBlocks(context.Background(), beaconState, []block.SignedBeaconBlock{}, targetSlot)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify state is version Altair.
|
||||
assert.Equal(t, version.Altair, newState.Version())
|
||||
}
|
||||
|
||||
func TestLoadBlocks_FirstBranch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
@@ -740,7 +768,7 @@ func TestLoadFinalizedBlocks(t *testing.T) {
|
||||
filteredBlocks, err := s.loadFinalizedBlocks(ctx, 0, 8)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(filteredBlocks))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, &statepb.StateSummary{Root: roots[8][:]}))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: roots[8][:]}))
|
||||
|
||||
require.NoError(t, s.beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: roots[8][:]}))
|
||||
filteredBlocks, err = s.loadFinalizedBlocks(ctx, 0, 8)
|
||||
|
||||
@@ -19,8 +19,17 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v2",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//fuzz:__pkg__",
|
||||
"//proto/testing:__subpackages__",
|
||||
"//shared/aggregation:__subpackages__",
|
||||
"//shared/benchutil:__pkg__",
|
||||
"//shared/depositutil:__subpackages__",
|
||||
"//shared/interop:__subpackages__",
|
||||
"//shared/testutil:__pkg__",
|
||||
"//slasher/rpc:__subpackages__",
|
||||
"//spectest:__subpackages__",
|
||||
"//tools/benchmark-files-gen:__pkg__",
|
||||
"//tools/pcli:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -53,16 +62,20 @@ go_test(
|
||||
"deprecated_getters_test.go",
|
||||
"deprecated_setters_test.go",
|
||||
"getters_test.go",
|
||||
"state_trie_block_box_test.go",
|
||||
"state_trie_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
275
beacon-chain/state/v2/state_trie_block_box_test.go
Normal file
275
beacon-chain/state/v2/state_trie_block_box_test.go
Normal file
@@ -0,0 +1,275 @@
|
||||
package v2_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestInitializeFromProto(t *testing.T) {
|
||||
testState, _ := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
pbState, err := stateAltair.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
type test struct {
|
||||
name string
|
||||
state *ethpb.BeaconStateAltair
|
||||
error string
|
||||
}
|
||||
initTests := []test{
|
||||
{
|
||||
name: "nil state",
|
||||
state: nil,
|
||||
error: "received nil state",
|
||||
},
|
||||
{
|
||||
name: "nil validators",
|
||||
state: ðpb.BeaconStateAltair{
|
||||
Slot: 4,
|
||||
Validators: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty state",
|
||||
state: ðpb.BeaconStateAltair{},
|
||||
},
|
||||
{
|
||||
name: "full state",
|
||||
state: pbState,
|
||||
},
|
||||
}
|
||||
for _, tt := range initTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := stateAltair.InitializeFromProto(tt.state)
|
||||
if tt.error != "" {
|
||||
require.ErrorContains(t, tt.error, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitializeFromProtoUnsafe(t *testing.T) {
|
||||
testState, _ := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
pbState, err := stateAltair.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
type test struct {
|
||||
name string
|
||||
state *ethpb.BeaconStateAltair
|
||||
error string
|
||||
}
|
||||
initTests := []test{
|
||||
{
|
||||
name: "nil state",
|
||||
state: nil,
|
||||
error: "received nil state",
|
||||
},
|
||||
{
|
||||
name: "nil validators",
|
||||
state: ðpb.BeaconStateAltair{
|
||||
Slot: 4,
|
||||
Validators: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty state",
|
||||
state: ðpb.BeaconStateAltair{},
|
||||
},
|
||||
{
|
||||
name: "full state",
|
||||
state: pbState,
|
||||
},
|
||||
}
|
||||
for _, tt := range initTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := stateAltair.InitializeFromProtoUnsafe(tt.state)
|
||||
if tt.error != "" {
|
||||
assert.ErrorContains(t, tt.error, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconState_HashTreeRoot(t *testing.T) {
|
||||
t.Skip("TODO: Fix FSSZ HTR for sync committee and participation roots")
|
||||
testState, _ := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
type test struct {
|
||||
name string
|
||||
stateModify func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error)
|
||||
error string
|
||||
}
|
||||
initTests := []test{
|
||||
{
|
||||
name: "unchanged state",
|
||||
stateModify: func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
return beaconState, nil
|
||||
},
|
||||
error: "",
|
||||
},
|
||||
{
|
||||
name: "different slot",
|
||||
stateModify: func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
if err := beaconState.SetSlot(5); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
},
|
||||
error: "",
|
||||
},
|
||||
{
|
||||
name: "different validator balance",
|
||||
stateModify: func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
val, err := beaconState.ValidatorAtIndex(5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
val.EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement
|
||||
if err := beaconState.UpdateValidatorAtIndex(5, val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
},
|
||||
error: "",
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
var oldHTR []byte
|
||||
for _, tt := range initTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testState, err = tt.stateModify(testState)
|
||||
assert.NoError(t, err)
|
||||
root, err := testState.HashTreeRoot(context.Background())
|
||||
if err == nil && tt.error != "" {
|
||||
t.Errorf("Expected error, expected %v, recevied %v", tt.error, err)
|
||||
}
|
||||
pbState, err := stateAltair.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
genericHTR, err := pbState.HashTreeRoot()
|
||||
if err == nil && tt.error != "" {
|
||||
t.Errorf("Expected error, expected %v, recevied %v", tt.error, err)
|
||||
}
|
||||
assert.DeepNotEqual(t, []byte{}, root[:], "Received empty hash tree root")
|
||||
assert.DeepEqual(t, genericHTR[:], root[:], "Expected hash tree root to match generic")
|
||||
if len(oldHTR) != 0 && bytes.Equal(root[:], oldHTR) {
|
||||
t.Errorf("Expected HTR to change, received %#x == old %#x", root, oldHTR)
|
||||
}
|
||||
oldHTR = root[:]
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconState_HashTreeRoot_FieldTrie(t *testing.T) {
|
||||
t.Skip("TODO: Fix FSSZ HTR for sync committee and participation roots")
|
||||
testState, _ := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
|
||||
type test struct {
|
||||
name string
|
||||
stateModify func(state.BeaconStateAltair) (state.BeaconStateAltair, error)
|
||||
error string
|
||||
}
|
||||
initTests := []test{
|
||||
{
|
||||
name: "unchanged state",
|
||||
stateModify: func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
return beaconState, nil
|
||||
},
|
||||
error: "",
|
||||
},
|
||||
{
|
||||
name: "different slot",
|
||||
stateModify: func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
if err := beaconState.SetSlot(5); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
},
|
||||
error: "",
|
||||
},
|
||||
{
|
||||
name: "different validator balance",
|
||||
stateModify: func(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
val, err := beaconState.ValidatorAtIndex(5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
val.EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement
|
||||
if err := beaconState.UpdateValidatorAtIndex(5, val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
},
|
||||
error: "",
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
var oldHTR []byte
|
||||
for _, tt := range initTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testState, err = tt.stateModify(testState)
|
||||
assert.NoError(t, err)
|
||||
root, err := testState.HashTreeRoot(context.Background())
|
||||
if err == nil && tt.error != "" {
|
||||
t.Errorf("Expected error, expected %v, recevied %v", tt.error, err)
|
||||
}
|
||||
pbState, err := stateAltair.ProtobufBeaconState(testState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
genericHTR, err := pbState.HashTreeRoot()
|
||||
if err == nil && tt.error != "" {
|
||||
t.Errorf("Expected error, expected %v, recevied %v", tt.error, err)
|
||||
}
|
||||
assert.DeepNotEqual(t, []byte{}, root[:], "Received empty hash tree root")
|
||||
assert.DeepEqual(t, genericHTR[:], root[:], "Expected hash tree root to match generic")
|
||||
if len(oldHTR) != 0 && bytes.Equal(root[:], oldHTR) {
|
||||
t.Errorf("Expected HTR to change, received %#x == old %#x", root, oldHTR)
|
||||
}
|
||||
oldHTR = root[:]
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconStateAltair_ProtoBeaconStateCompatibility(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, 6)
|
||||
inner := s.InnerStateUnsafe()
|
||||
genesis, err := stateAltair.ProtobufBeaconState(inner)
|
||||
require.NoError(t, err)
|
||||
customState, err := stateAltair.InitializeFromProto(genesis)
|
||||
require.NoError(t, err)
|
||||
cloned, ok := proto.Clone(genesis).(*ethpb.BeaconStateAltair)
|
||||
assert.Equal(t, true, ok, "Object is not of type *ethpb.BeaconStateAltair")
|
||||
custom := customState.CloneInnerState()
|
||||
assert.DeepSSZEqual(t, cloned, custom)
|
||||
r1, err := customState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
beaconState, err := stateAltair.InitializeFromProto(genesis)
|
||||
require.NoError(t, err)
|
||||
r2, err := beaconState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, r1, r2, "Mismatched roots")
|
||||
|
||||
// We then write to the the state and compare hash tree roots again.
|
||||
balances := genesis.Balances
|
||||
balances[0] = 3823
|
||||
require.NoError(t, customState.SetBalances(balances))
|
||||
r1, err = customState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesis.Balances = balances
|
||||
beaconState, err = stateAltair.InitializeFromProto(genesis)
|
||||
require.NoError(t, err)
|
||||
r2, err = beaconState.HashTreeRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, r1, r2, "Mismatched roots")
|
||||
}
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"decode_pubsub.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"fork_watcher.go",
|
||||
"fuzz_exports.go", # keep
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
@@ -29,12 +30,17 @@ go_library(
|
||||
"subscriber_beacon_attestation.go",
|
||||
"subscriber_beacon_blocks.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
"subscription_topic_handler.go",
|
||||
"utils.go",
|
||||
"validate_aggregate_proof.go",
|
||||
"validate_attester_slashing.go",
|
||||
"validate_beacon_attestation.go",
|
||||
"validate_beacon_blocks.go",
|
||||
"validate_proposer_slashing.go",
|
||||
"validate_sync_committee_message.go",
|
||||
"validate_sync_contribution_proof.go",
|
||||
"validate_voluntary_exit.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/sync",
|
||||
@@ -45,6 +51,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
@@ -57,6 +64,7 @@ go_library(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
@@ -85,6 +93,8 @@ go_library(
|
||||
"//shared/sszutil:go_default_library",
|
||||
"//shared/timeutils:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ferranbt_fastssz//:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_kevinms_leakybucket_go//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//:go_default_library",
|
||||
@@ -98,6 +108,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_trailofbits_go_mutexasserts//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
@@ -112,11 +123,13 @@ go_test(
|
||||
"context_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
"pending_attestations_queue_test.go",
|
||||
"pending_blocks_queue_test.go",
|
||||
"rate_limiter_test.go",
|
||||
"rpc_beacon_blocks_by_range_test.go",
|
||||
"rpc_beacon_blocks_by_root_test.go",
|
||||
"rpc_chunked_response_test.go",
|
||||
"rpc_goodbye_test.go",
|
||||
"rpc_metadata_test.go",
|
||||
"rpc_ping_test.go",
|
||||
@@ -127,6 +140,7 @@ go_test(
|
||||
"subscriber_beacon_aggregate_proof_test.go",
|
||||
"subscriber_beacon_blocks_test.go",
|
||||
"subscriber_test.go",
|
||||
"subscription_topic_handler_test.go",
|
||||
"sync_test.go",
|
||||
"utils_test.go",
|
||||
"validate_aggregate_proof_test.go",
|
||||
@@ -134,13 +148,17 @@ go_test(
|
||||
"validate_beacon_attestation_test.go",
|
||||
"validate_beacon_blocks_test.go",
|
||||
"validate_proposer_slashing_test.go",
|
||||
"validate_sync_committee_message_test.go",
|
||||
"validate_sync_contribution_proof_test.go",
|
||||
"validate_voluntary_exit_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
shard_count = 4,
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -162,12 +180,14 @@ go_test(
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//shared/abool:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/copyutil:go_default_library",
|
||||
"//shared/p2putils:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/rand:go_default_library",
|
||||
"//shared/sszutil:go_default_library",
|
||||
@@ -177,6 +197,7 @@ go_test(
|
||||
"//shared/timeutils:go_default_library",
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_kevinms_leakybucket_go//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//:go_default_library",
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
)
|
||||
|
||||
// Specifies the fixed size context length.
|
||||
const digestLength = 4
|
||||
|
||||
// writes peer's current context for the expected payload to the stream.
|
||||
func writeContextToStream(stream network.Stream, chain blockchain.ChainInfoFetcher) error {
|
||||
func writeContextToStream(objCtx []byte, stream network.Stream, chain blockchain.ChainInfoFetcher) error {
|
||||
rpcCtx, err := rpcContext(stream, chain)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -18,6 +21,10 @@ func writeContextToStream(stream network.Stream, chain blockchain.ChainInfoFetch
|
||||
if len(rpcCtx) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Always choose the object's context when writing to the stream.
|
||||
if objCtx != nil {
|
||||
rpcCtx = objCtx
|
||||
}
|
||||
_, err = stream.Write(rpcCtx)
|
||||
return err
|
||||
}
|
||||
@@ -32,7 +39,7 @@ func readContextFromStream(stream network.Stream, chain blockchain.ChainInfoFetc
|
||||
return []byte{}, nil
|
||||
}
|
||||
// Read context (fork-digest) from stream
|
||||
b := make([]byte, 4)
|
||||
b := make([]byte, digestLength)
|
||||
if _, err := stream.Read(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -49,7 +56,27 @@ func rpcContext(stream network.Stream, chain blockchain.ChainInfoFetcher) ([]byt
|
||||
case p2p.SchemaVersionV1:
|
||||
// Return empty context for a v1 method.
|
||||
return []byte{}, nil
|
||||
case p2p.SchemaVersionV2:
|
||||
currFork := chain.CurrentFork()
|
||||
genVersion := chain.GenesisValidatorRoot()
|
||||
digest, err := helpers.ComputeForkDigest(currFork.CurrentVersion, genVersion[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return digest[:], nil
|
||||
default:
|
||||
return nil, errors.New("invalid version of %s registered for topic: %s")
|
||||
}
|
||||
}
|
||||
|
||||
// Validates that the rpc topic matches the provided version.
|
||||
func validateVersion(version string, stream network.Stream) error {
|
||||
_, _, streamVersion, err := p2p.TopicDeconstructor(string(stream.Protocol()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if streamVersion != version {
|
||||
return errors.Errorf("stream version of %s doesn't match provided version %s", streamVersion, version)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestContextWrite_NoWrites(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Nothing will be written to the stream
|
||||
assert.NoError(t, writeContextToStream(strm, nil))
|
||||
assert.NoError(t, writeContextToStream(nil, strm, nil))
|
||||
if testutil.WaitTimeout(wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
|
||||
@@ -1,32 +1,60 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
ssz "github.com/ferranbt/fastssz"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var errNilPubsubMessage = errors.New("nil pubsub message")
|
||||
var errInvalidTopic = errors.New("invalid topic format")
|
||||
|
||||
func (s *Service) decodePubsubMessage(msg *pubsub.Message) (proto.Message, error) {
|
||||
func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, error) {
|
||||
if msg == nil || msg.Topic == nil || *msg.Topic == "" {
|
||||
return nil, errNilPubsubMessage
|
||||
}
|
||||
topic := *msg.Topic
|
||||
fDigest, err := p2p.ExtractGossipDigest(topic)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "extraction failed for topic: %s", topic)
|
||||
}
|
||||
topic = strings.TrimSuffix(topic, s.cfg.P2P.Encoding().ProtocolSuffix())
|
||||
topic, err := s.replaceForkDigest(topic)
|
||||
topic, err = s.replaceForkDigest(topic)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Specially handle subnet messages.
|
||||
switch {
|
||||
case strings.Contains(topic, p2p.GossipAttestationMessage):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ð.Attestation{})]
|
||||
// Given that both sync message related subnets have the same message name, we have to
|
||||
// differentiate them below.
|
||||
case strings.Contains(topic, p2p.GossipSyncCommitteeMessage) && !strings.Contains(topic, p2p.SyncContributionAndProofSubnetTopicFormat):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
}
|
||||
|
||||
base := p2p.GossipTopicMappings(topic, 0)
|
||||
if base == nil {
|
||||
return nil, p2p.ErrMessageNotMapped
|
||||
}
|
||||
m := proto.Clone(base)
|
||||
m, ok := proto.Clone(base).(ssz.Unmarshaler)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("message of %T does not support marshaller interface", base)
|
||||
}
|
||||
// Handle different message types across forks.
|
||||
if topic == p2p.BlockSubnetTopicFormat {
|
||||
m, err = extractBlockDataType(fDigest[:], s.cfg.Chain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := s.cfg.P2P.Encoding().DecodeGossip(msg.Data, m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -2,25 +2,34 @@ package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/d4l3k/messagediff"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
p2ptesting "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestService_decodePubsubMessage(t *testing.T) {
|
||||
digest, err := helpers.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, make([]byte, 32))
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
topic string
|
||||
input *pubsub.Message
|
||||
want proto.Message
|
||||
want interface{}
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
@@ -44,12 +53,12 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "topic not mapped to any message type",
|
||||
topic: "/eth2/abcdef/foo",
|
||||
topic: "/eth2/abababab/foo/ssz_snappy",
|
||||
wantErr: p2p.ErrMessageNotMapped,
|
||||
},
|
||||
{
|
||||
name: "valid message -- beacon block",
|
||||
topic: p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlock{})],
|
||||
topic: fmt.Sprintf(p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SignedBeaconBlock{})], digest),
|
||||
input: &pubsub.Message{
|
||||
Message: &pb.Message{
|
||||
Data: func() []byte {
|
||||
@@ -62,13 +71,13 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
},
|
||||
},
|
||||
wantErr: nil,
|
||||
want: testutil.NewBeaconBlock(),
|
||||
want: wrapper.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
cfg: &Config{P2P: p2ptesting.NewTestP2P(t)},
|
||||
cfg: &Config{P2P: p2ptesting.NewTestP2P(t), Chain: &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()}},
|
||||
}
|
||||
if tt.topic != "" {
|
||||
if tt.input == nil {
|
||||
@@ -79,7 +88,7 @@ func TestService_decodePubsubMessage(t *testing.T) {
|
||||
tt.input.Message.Topic = &tt.topic
|
||||
}
|
||||
got, err := s.decodePubsubMessage(tt.input)
|
||||
if err != tt.wantErr {
|
||||
if err != tt.wantErr && !strings.Contains(err.Error(), tt.wantErr.Error()) {
|
||||
t.Errorf("decodePubsubMessage() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
105
beacon-chain/sync/fork_watcher.go
Normal file
105
beacon-chain/sync/fork_watcher.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2putils"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
)
|
||||
|
||||
// Is a background routine that observes for new incoming forks. Depending on the epoch
|
||||
// it will be in charge of subscribing/unsubscribing the relevant topics at the fork boundaries.
|
||||
func (s *Service) forkWatcher() {
|
||||
slotTicker := slotutil.NewSlotTicker(s.cfg.Chain.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case currSlot := <-slotTicker.C():
|
||||
currEpoch := helpers.SlotToEpoch(currSlot)
|
||||
if err := s.checkForNextEpochFork(currEpoch); err != nil {
|
||||
log.WithError(err).Error("Unable to check for fork in the next epoch")
|
||||
continue
|
||||
}
|
||||
if err := s.checkForPreviousEpochFork(currEpoch); err != nil {
|
||||
log.WithError(err).Error("Unable to check for fork in the previous epoch")
|
||||
continue
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
slotTicker.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if there is a fork in the next epoch.
|
||||
func (s *Service) checkForNextEpochFork(currEpoch types.Epoch) error {
|
||||
genRoot := s.cfg.Chain.GenesisValidatorRoot()
|
||||
isNextForkEpoch, err := p2putils.IsForkNextEpoch(s.cfg.Chain.GenesisTime(), genRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Could not retrieve next fork epoch")
|
||||
}
|
||||
// In preparation for the upcoming fork
|
||||
// in the following epoch, the node
|
||||
// will subscribe the new topics in advance.
|
||||
if isNextForkEpoch {
|
||||
nextEpoch := currEpoch + 1
|
||||
if nextEpoch == params.BeaconConfig().AltairForkEpoch {
|
||||
digest, err := p2putils.ForkDigestFromEpoch(nextEpoch, genRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Could not retrieve fork digest")
|
||||
}
|
||||
if s.subHandler.digestExists(digest) {
|
||||
return nil
|
||||
}
|
||||
s.registerSubscribers(nextEpoch, digest)
|
||||
s.registerRPCHandlersAltair()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks if there is a fork in the previous epoch.
|
||||
func (s *Service) checkForPreviousEpochFork(currEpoch types.Epoch) error {
|
||||
genRoot := s.cfg.Chain.GenesisValidatorRoot()
|
||||
// This method takes care of the de-registration of
|
||||
// old gossip pubsub handlers. Once we are at the epoch
|
||||
// after the fork, we de-register from all the outdated topics.
|
||||
currFork, err := p2putils.Fork(currEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
epochAfterFork := currFork.Epoch + 1
|
||||
nonGenesisFork := currFork.Epoch > 1
|
||||
// If we are in the epoch after the fork, we start de-registering.
|
||||
if epochAfterFork == currEpoch && nonGenesisFork {
|
||||
// Look at the previous fork's digest.
|
||||
epochBeforeFork := currFork.Epoch - 1
|
||||
prevDigest, err := p2putils.ForkDigestFromEpoch(epochBeforeFork, genRoot[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed to determine previous epoch fork digest")
|
||||
}
|
||||
|
||||
// Exit early if there are no topics with that particular
|
||||
// digest.
|
||||
if !s.subHandler.digestExists(prevDigest) {
|
||||
return nil
|
||||
}
|
||||
s.unregisterPhase0Handlers()
|
||||
// Run through all our current active topics and see
|
||||
// if there are any subscriptions to be removed.
|
||||
for _, t := range s.subHandler.allTopics() {
|
||||
retDigest, err := p2p.ExtractGossipDigest(t)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve digest")
|
||||
continue
|
||||
}
|
||||
if retDigest == prevDigest {
|
||||
s.unSubscribeFromTopic(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
249
beacon-chain/sync/fork_watcher_test.go
Normal file
249
beacon-chain/sync/fork_watcher_test.go
Normal file
@@ -0,0 +1,249 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/shared/abool"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2putils"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
)
|
||||
|
||||
func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
svcCreator func(t *testing.T) *Service
|
||||
currEpoch types.Epoch
|
||||
wantErr bool
|
||||
postSvcCheck func(t *testing.T, s *Service)
|
||||
}{
|
||||
{
|
||||
name: "no fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(time.Duration(-params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().SlotsPerEpoch))) * time.Second),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &Config{
|
||||
P2P: p2p,
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 10,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fork in the next epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-4 * oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
bCfg := params.BeaconConfig()
|
||||
bCfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &Config{
|
||||
P2P: p2p,
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.Chain.GenesisValidatorRoot()
|
||||
digest, err := p2putils.ForkDigestFromEpoch(5, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.P2P.Host().Mux().Protocols() {
|
||||
rpcMap[p] = true
|
||||
}
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlocksByRangeTopicV2+s.cfg.P2P.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlocksByRootTopicV2+s.cfg.P2P.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCMetaDataTopicV2+s.cfg.P2P.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcCreator(t)
|
||||
if err := s.checkForNextEpochFork(tt.currEpoch); (err != nil) != tt.wantErr {
|
||||
t.Errorf("checkForNextEpochFork() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
tt.postSvcCheck(t, s)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_CheckForPreviousEpochFork(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
svcCreator func(t *testing.T) *Service
|
||||
currEpoch types.Epoch
|
||||
wantErr bool
|
||||
postSvcCheck func(t *testing.T, s *Service)
|
||||
}{
|
||||
{
|
||||
name: "no fork in the previous epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &Config{
|
||||
P2P: p2p,
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
r.registerRPCHandlers()
|
||||
return r
|
||||
},
|
||||
currEpoch: 10,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
ptcls := s.cfg.P2P.Host().Mux().Protocols()
|
||||
pMap := make(map[string]bool)
|
||||
for _, p := range ptcls {
|
||||
pMap[p] = true
|
||||
}
|
||||
assert.Equal(t, true, pMap[p2p.RPCGoodByeTopicV1+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCStatusTopicV1+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCPingTopicV1+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCMetaDataTopicV1+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCBlocksByRangeTopicV1+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCBlocksByRootTopicV1+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fork in the previous epoch",
|
||||
svcCreator: func(t *testing.T) *Service {
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now().Add(-4 * oneEpoch()),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
bCfg := params.BeaconConfig()
|
||||
bCfg.AltairForkEpoch = 3
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &Config{
|
||||
P2P: p2p,
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
prevGenesis := chainService.Genesis
|
||||
// To allow registration of v1 handlers
|
||||
chainService.Genesis = time.Now().Add(-1 * oneEpoch())
|
||||
r.registerRPCHandlers()
|
||||
|
||||
chainService.Genesis = prevGenesis
|
||||
r.registerRPCHandlersAltair()
|
||||
|
||||
genRoot := r.cfg.Chain.GenesisValidatorRoot()
|
||||
digest, err := p2putils.ForkDigestFromEpoch(0, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
r.registerSubscribers(0, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
digest, err = p2putils.ForkDigestFromEpoch(3, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
r.registerSubscribers(3, digest)
|
||||
assert.Equal(t, true, r.subHandler.digestExists(digest))
|
||||
|
||||
return r
|
||||
},
|
||||
currEpoch: 4,
|
||||
wantErr: false,
|
||||
postSvcCheck: func(t *testing.T, s *Service) {
|
||||
genRoot := s.cfg.Chain.GenesisValidatorRoot()
|
||||
digest, err := p2putils.ForkDigestFromEpoch(0, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
digest, err = p2putils.ForkDigestFromEpoch(3, genRoot[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
|
||||
ptcls := s.cfg.P2P.Host().Mux().Protocols()
|
||||
pMap := make(map[string]bool)
|
||||
for _, p := range ptcls {
|
||||
pMap[p] = true
|
||||
}
|
||||
assert.Equal(t, true, pMap[p2p.RPCGoodByeTopicV1+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCStatusTopicV1+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCPingTopicV1+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCMetaDataTopicV2+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCBlocksByRangeTopicV2+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, true, pMap[p2p.RPCBlocksByRootTopicV2+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
|
||||
assert.Equal(t, false, pMap[p2p.RPCMetaDataTopicV1+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, false, pMap[p2p.RPCBlocksByRangeTopicV1+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
assert.Equal(t, false, pMap[p2p.RPCBlocksByRootTopicV1+s.cfg.P2P.Encoding().ProtocolSuffix()])
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := tt.svcCreator(t)
|
||||
if err := s.checkForPreviousEpochFork(tt.currEpoch); (err != nil) != tt.wantErr {
|
||||
t.Errorf("checkForNextEpochFork() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
tt.postSvcCheck(t, s)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func oneEpoch() time.Duration {
|
||||
return time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().SecondsPerSlot))) * time.Second
|
||||
}
|
||||
@@ -281,6 +281,8 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
FinalizedCheckPoint: ð.Checkpoint{
|
||||
Epoch: 0,
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -414,7 +416,8 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
}
|
||||
|
||||
mc, p2p, _ := initializeTestServices(t, chainConfig.expectedBlockSlots, chainConfig.peers)
|
||||
|
||||
mc.ValidatorsRoot = [32]byte{}
|
||||
mc.Genesis = time.Now()
|
||||
t.Run("context cancellation", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
@@ -547,7 +550,7 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
defer cancel()
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1})
|
||||
fetcher.rateLimiter = leakybucket.NewCollector(float64(req.Count), int64(req.Count*burstFactor), false)
|
||||
|
||||
fetcher.chain = &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
hook := logTest.NewGlobal()
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(1)
|
||||
@@ -610,7 +613,8 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = i
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
mchain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, mchain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
}
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
@@ -631,7 +635,8 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step+1); i += types.Slot(req.Step) {
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = i
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
}
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
@@ -652,11 +657,12 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
return func(stream network.Stream) {
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 163
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
|
||||
blk = testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 162
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
},
|
||||
@@ -676,11 +682,13 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
return func(stream network.Stream) {
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 160
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
|
||||
blk = testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 160
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
},
|
||||
@@ -703,14 +711,15 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
}()
|
||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
|
||||
blk := testutil.NewBeaconBlock()
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
// Patch mid block, with invalid slot number.
|
||||
if i == req.StartSlot.Add(req.Count*req.Step/2) {
|
||||
blk.Block.Slot = req.StartSlot - 1
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
break
|
||||
} else {
|
||||
blk.Block.Slot = i
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -734,14 +743,15 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
}()
|
||||
for i := req.StartSlot; i < req.StartSlot.Add(req.Count*req.Step); i += types.Slot(req.Step) {
|
||||
blk := testutil.NewBeaconBlock()
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
// Patch mid block, with invalid slot number.
|
||||
if i == req.StartSlot.Add(req.Count*req.Step/2) {
|
||||
blk.Block.Slot = req.StartSlot.Add(req.Count * req.Step)
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
break
|
||||
} else {
|
||||
blk.Block.Slot = i
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -762,11 +772,12 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
return func(stream network.Stream) {
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 100
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
|
||||
blk = testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 105
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
},
|
||||
@@ -785,11 +796,12 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
return func(stream network.Stream) {
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 100
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
|
||||
blk = testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 103
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p1.Encoding(), blk))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
assert.NoError(t, stream.Close())
|
||||
}
|
||||
},
|
||||
@@ -805,7 +817,7 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1})
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{p2p: p1, chain: &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}})
|
||||
fetcher.rateLimiter = leakybucket.NewCollector(0.000001, 640, false)
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -178,6 +178,8 @@ func TestBlocksFetcher_findFork(t *testing.T) {
|
||||
Epoch: finalizedEpoch,
|
||||
Root: []byte(fmt.Sprintf("finalized_root %d", finalizedEpoch)),
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -321,9 +323,11 @@ func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
mc := &mock.ChainService{
|
||||
State: st,
|
||||
Root: genesisRoot[:],
|
||||
DB: beaconDB,
|
||||
State: st,
|
||||
Root: genesisRoot[:],
|
||||
DB: beaconDB,
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -438,6 +442,8 @@ func TestBlocksFetcher_findAncestor(t *testing.T) {
|
||||
Epoch: finalizedEpoch,
|
||||
Root: []byte(fmt.Sprintf("finalized_root %d", finalizedEpoch)),
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
@@ -1052,6 +1052,8 @@ func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) {
|
||||
Epoch: finalizedEpoch,
|
||||
Root: []byte(fmt.Sprintf("finalized_root %d", finalizedEpoch)),
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -1251,6 +1253,8 @@ func TestBlocksQueue_stuckWhenHeadIsSetToOrphanedBlock(t *testing.T) {
|
||||
Epoch: finalizedEpoch,
|
||||
Root: []byte(fmt.Sprintf("finalized_root %d", finalizedEpoch)),
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
|
||||
// Populate database with blocks with part of the chain, orphaned block will be added on top.
|
||||
|
||||
@@ -96,6 +96,8 @@ func initializeTestServices(t *testing.T, slots []types.Slot, peers []*peerData)
|
||||
FinalizedCheckPoint: ð.Checkpoint{
|
||||
Epoch: 0,
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}, p, beaconDB
|
||||
}
|
||||
|
||||
@@ -216,8 +218,9 @@ func connectPeer(t *testing.T, host *p2pt.TestP2P, datum *peerData, peerStatus *
|
||||
ret = ret[:req.Count]
|
||||
}
|
||||
|
||||
mChain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
for i := 0; i < len(ret); i++ {
|
||||
assert.NoError(t, beaconsync.WriteChunk(stream, nil, p.Encoding(), ret[i]))
|
||||
assert.NoError(t, beaconsync.WriteBlockChunk(stream, mChain, p.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(ret[i])))
|
||||
}
|
||||
})
|
||||
|
||||
@@ -286,7 +289,8 @@ func connectPeerHavingBlocks(
|
||||
if uint64(i) >= uint64(len(blocks)) {
|
||||
break
|
||||
}
|
||||
require.NoError(t, beaconsync.WriteChunk(stream, nil, p.Encoding(), blocks[i]))
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
require.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blocks[i])))
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -295,6 +295,8 @@ func TestService_roundRobinSync(t *testing.T) {
|
||||
FinalizedCheckPoint: ð.Checkpoint{
|
||||
Epoch: 0,
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
} // no-op mock
|
||||
s := &Service{
|
||||
ctx: context.Background(),
|
||||
@@ -338,6 +340,8 @@ func TestService_processBlock(t *testing.T) {
|
||||
FinalizedCheckPoint: ð.Checkpoint{
|
||||
Epoch: 0,
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
},
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
})
|
||||
@@ -527,6 +531,8 @@ func TestService_blockProviderScoring(t *testing.T) {
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
} // no-op mock
|
||||
s := &Service{
|
||||
ctx: context.Background(),
|
||||
@@ -590,6 +596,8 @@ func TestService_syncToFinalizedEpoch(t *testing.T) {
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
s := &Service{
|
||||
ctx: context.Background(),
|
||||
|
||||
@@ -58,6 +58,8 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
FinalizedCheckPoint: ð.Checkpoint{
|
||||
Epoch: 0,
|
||||
},
|
||||
Genesis: time.Unix(4113849600, 0),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
},
|
||||
methodRuns: func(fd *event.Feed) {
|
||||
@@ -86,6 +88,8 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
FinalizedCheckPoint: ð.Checkpoint{
|
||||
Epoch: 0,
|
||||
},
|
||||
Genesis: time.Now().Add(-5 * time.Minute),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
},
|
||||
methodRuns: func(fd *event.Feed) {
|
||||
@@ -117,6 +121,8 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
FinalizedCheckPoint: ð.Checkpoint{
|
||||
Epoch: helpers.SlotToEpoch(futureSlot),
|
||||
},
|
||||
Genesis: makeGenesisTime(futureSlot),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
},
|
||||
methodRuns: func(fd *event.Feed) {
|
||||
@@ -150,7 +156,7 @@ func TestService_InitStartStop(t *testing.T) {
|
||||
defer hook.Reset()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
mc := &mock.ChainService{}
|
||||
mc := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
// Allow overriding with customized chain service.
|
||||
if tt.chainService != nil {
|
||||
mc = tt.chainService()
|
||||
@@ -211,7 +217,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
s := newService(ctx, &mock.ChainService{})
|
||||
s := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}})
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@@ -238,7 +244,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
defer hook.Reset()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
s := newService(ctx, &mock.ChainService{})
|
||||
s := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}})
|
||||
|
||||
expectedGenesisTime := time.Unix(358544700, 0)
|
||||
var receivedGenesisTime time.Time
|
||||
@@ -282,7 +288,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
defer hook.Reset()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
s := newService(ctx, &mock.ChainService{})
|
||||
s := newService(ctx, &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}})
|
||||
// Initialize mock feed
|
||||
_ = s.cfg.StateNotifier.StateFeed()
|
||||
|
||||
@@ -320,7 +326,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_markSynced(t *testing.T) {
|
||||
mc := &mock.ChainService{}
|
||||
mc := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
s := NewService(ctx, &Config{
|
||||
@@ -403,6 +409,8 @@ func TestService_Resync(t *testing.T) {
|
||||
FinalizedCheckPoint: ð.Checkpoint{
|
||||
Epoch: helpers.SlotToEpoch(futureSlot),
|
||||
},
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
}
|
||||
},
|
||||
assert: func(s *Service) {
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -20,6 +22,12 @@ var (
|
||||
Help: "The number of peers subscribed to a given topic.",
|
||||
}, []string{"topic"},
|
||||
)
|
||||
subscribedTopicPeerCount = promauto.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "p2p_subscribed_topic_peer_count",
|
||||
Help: "The number of peers subscribed to topics that a host node is also subscribed to.",
|
||||
}, []string{"topic"},
|
||||
)
|
||||
messageReceivedCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "p2p_message_received_total",
|
||||
@@ -34,6 +42,13 @@ var (
|
||||
},
|
||||
[]string{"topic"},
|
||||
)
|
||||
messageIgnoredValidationCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "p2p_message_ignored_validation_total",
|
||||
Help: "Count of messages that were ignored in validation.",
|
||||
},
|
||||
[]string{"topic"},
|
||||
)
|
||||
messageFailedProcessingCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "p2p_message_failed_processing_total",
|
||||
@@ -64,29 +79,40 @@ func (s *Service) updateMetrics() {
|
||||
return
|
||||
}
|
||||
// We update the dynamic subnet topics.
|
||||
digest, err := s.forkDigest()
|
||||
digest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Debugf("Could not compute fork digest")
|
||||
}
|
||||
indices := s.aggregatorSubnetIndices(s.cfg.Chain.CurrentSlot())
|
||||
syncIndices := cache.SyncSubnetIDs.GetAllSubnets(helpers.SlotToEpoch(s.cfg.Chain.CurrentSlot()))
|
||||
attTopic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.Attestation{})]
|
||||
syncTopic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.SyncCommitteeMessage{})]
|
||||
attTopic += s.cfg.P2P.Encoding().ProtocolSuffix()
|
||||
syncTopic += s.cfg.P2P.Encoding().ProtocolSuffix()
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
|
||||
formattedTopic := fmt.Sprintf(attTopic, digest, i)
|
||||
topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(s.cfg.P2P.PubSub().ListPeers(formattedTopic))))
|
||||
}
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
formattedTopic := fmt.Sprintf(syncTopic, digest, i)
|
||||
topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(s.cfg.P2P.PubSub().ListPeers(formattedTopic))))
|
||||
}
|
||||
} else {
|
||||
for _, committeeIdx := range indices {
|
||||
formattedTopic := fmt.Sprintf(attTopic, digest, committeeIdx)
|
||||
topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(s.cfg.P2P.PubSub().ListPeers(formattedTopic))))
|
||||
}
|
||||
for _, committeeIdx := range syncIndices {
|
||||
formattedTopic := fmt.Sprintf(syncTopic, digest, committeeIdx)
|
||||
topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(s.cfg.P2P.PubSub().ListPeers(formattedTopic))))
|
||||
}
|
||||
}
|
||||
|
||||
// We update all other gossip topics.
|
||||
for _, topic := range p2p.AllTopics() {
|
||||
// We already updated attestation subnet topics.
|
||||
if strings.Contains(topic, "beacon_attestation") {
|
||||
if strings.Contains(topic, p2p.GossipAttestationMessage) || strings.Contains(topic, p2p.GossipSyncCommitteeMessage) {
|
||||
continue
|
||||
}
|
||||
topic += s.cfg.P2P.Encoding().ProtocolSuffix()
|
||||
@@ -97,4 +123,8 @@ func (s *Service) updateMetrics() {
|
||||
formattedTopic := fmt.Sprintf(topic, digest)
|
||||
topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(s.cfg.P2P.PubSub().ListPeers(formattedTopic))))
|
||||
}
|
||||
|
||||
for _, topic := range s.cfg.P2P.PubSub().GetTopics() {
|
||||
subscribedTopicPeerCount.WithLabelValues(topic).Set(float64(len(s.cfg.P2P.PubSub().ListPeers(topic))))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -355,6 +355,8 @@ func TestService_BatchRootRequest(t *testing.T) {
|
||||
Epoch: 1,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
ValidatorsRoot: [32]byte{},
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
},
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
|
||||
@@ -42,6 +42,7 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
topicMap[addEncoding(p2p.RPCGoodByeTopicV1)] = leakybucket.NewCollector(1, 1, false /* deleteEmptyBuckets */)
|
||||
// MetadataV0 Message
|
||||
topicMap[addEncoding(p2p.RPCMetaDataTopicV1)] = leakybucket.NewCollector(1, defaultBurstLimit, false /* deleteEmptyBuckets */)
|
||||
topicMap[addEncoding(p2p.RPCMetaDataTopicV2)] = leakybucket.NewCollector(1, defaultBurstLimit, false /* deleteEmptyBuckets */)
|
||||
// Ping Message
|
||||
topicMap[addEncoding(p2p.RPCPingTopicV1)] = leakybucket.NewCollector(1, defaultBurstLimit, false /* deleteEmptyBuckets */)
|
||||
// Status Message
|
||||
@@ -49,12 +50,16 @@ func newRateLimiter(p2pProvider p2p.P2P) *limiter {
|
||||
|
||||
// Use a single collector for block requests
|
||||
blockCollector := leakybucket.NewCollector(allowedBlocksPerSecond, allowedBlocksBurst, false /* deleteEmptyBuckets */)
|
||||
// Collector for V2
|
||||
blockCollectorV2 := leakybucket.NewCollector(allowedBlocksPerSecond, allowedBlocksBurst, false /* deleteEmptyBuckets */)
|
||||
|
||||
// BlocksByRoots requests
|
||||
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV1)] = blockCollector
|
||||
topicMap[addEncoding(p2p.RPCBlocksByRootTopicV2)] = blockCollectorV2
|
||||
|
||||
// BlockByRange requests
|
||||
topicMap[addEncoding(p2p.RPCBlocksByRangeTopicV1)] = blockCollector
|
||||
topicMap[addEncoding(p2p.RPCBlocksByRangeTopicV2)] = blockCollectorV2
|
||||
|
||||
// General topic for all rpc requests.
|
||||
topicMap[rpcLimiterTopic] = leakybucket.NewCollector(5, defaultBurstLimit*2, false /* deleteEmptyBuckets */)
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
|
||||
func TestNewRateLimiter(t *testing.T) {
|
||||
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 7, "correct number of topics not registered")
|
||||
assert.Equal(t, len(rlimiter.limiterMap), 10, "correct number of topics not registered")
|
||||
}
|
||||
|
||||
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {
|
||||
|
||||
@@ -3,10 +3,14 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
ssz "github.com/ferranbt/fastssz"
|
||||
libp2pcore "github.com/libp2p/go-libp2p-core"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -30,6 +34,24 @@ type rpcHandler func(context.Context, interface{}, libp2pcore.Stream) error
|
||||
|
||||
// registerRPCHandlers for p2p RPC.
|
||||
func (s *Service) registerRPCHandlers() {
|
||||
currEpoch := helpers.SlotToEpoch(s.cfg.Chain.CurrentSlot())
|
||||
// Register V2 handlers if we are past altair fork epoch.
|
||||
if currEpoch >= params.BeaconConfig().AltairForkEpoch {
|
||||
s.registerRPC(
|
||||
p2p.RPCStatusTopicV1,
|
||||
s.statusRPCHandler,
|
||||
)
|
||||
s.registerRPC(
|
||||
p2p.RPCGoodByeTopicV1,
|
||||
s.goodbyeRPCHandler,
|
||||
)
|
||||
s.registerRPC(
|
||||
p2p.RPCPingTopicV1,
|
||||
s.pingHandler,
|
||||
)
|
||||
s.registerRPCHandlersAltair()
|
||||
return
|
||||
}
|
||||
s.registerRPC(
|
||||
p2p.RPCStatusTopicV1,
|
||||
s.statusRPCHandler,
|
||||
@@ -56,11 +78,45 @@ func (s *Service) registerRPCHandlers() {
|
||||
)
|
||||
}
|
||||
|
||||
// registerRPCHandlers for altair.
|
||||
func (s *Service) registerRPCHandlersAltair() {
|
||||
s.registerRPC(
|
||||
p2p.RPCBlocksByRangeTopicV2,
|
||||
s.beaconBlocksByRangeRPCHandler,
|
||||
)
|
||||
s.registerRPC(
|
||||
p2p.RPCBlocksByRootTopicV2,
|
||||
s.beaconBlocksRootRPCHandler,
|
||||
)
|
||||
s.registerRPC(
|
||||
p2p.RPCMetaDataTopicV2,
|
||||
s.metaDataHandler,
|
||||
)
|
||||
}
|
||||
|
||||
// Remove all v1 Stream handlers that are no longer supported
|
||||
// from altair onwards.
|
||||
func (s *Service) unregisterPhase0Handlers() {
|
||||
fullBlockRangeTopic := p2p.RPCBlocksByRangeTopicV1 + s.cfg.P2P.Encoding().ProtocolSuffix()
|
||||
fullBlockRootTopic := p2p.RPCBlocksByRootTopicV1 + s.cfg.P2P.Encoding().ProtocolSuffix()
|
||||
fullMetadataTopic := p2p.RPCMetaDataTopicV1 + s.cfg.P2P.Encoding().ProtocolSuffix()
|
||||
|
||||
s.cfg.P2P.Host().RemoveStreamHandler(protocol.ID(fullBlockRangeTopic))
|
||||
s.cfg.P2P.Host().RemoveStreamHandler(protocol.ID(fullBlockRootTopic))
|
||||
s.cfg.P2P.Host().RemoveStreamHandler(protocol.ID(fullMetadataTopic))
|
||||
}
|
||||
|
||||
// registerRPC for a given topic with an expected protobuf message type.
|
||||
func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
topic := baseTopic + s.cfg.P2P.Encoding().ProtocolSuffix()
|
||||
log := log.WithField("topic", topic)
|
||||
s.cfg.P2P.SetStreamHandler(topic, func(stream network.Stream) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.WithField("error", r).Error("Panic occurred")
|
||||
log.Errorf("%s", debug.Stack())
|
||||
}
|
||||
}()
|
||||
ctx, cancel := context.WithTimeout(s.ctx, ttfbTimeout)
|
||||
defer cancel()
|
||||
|
||||
@@ -113,7 +169,7 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
|
||||
// since metadata requests do not have any data in the payload, we
|
||||
// do not decode anything.
|
||||
if baseTopic == p2p.RPCMetaDataTopicV1 {
|
||||
if baseTopic == p2p.RPCMetaDataTopicV1 || baseTopic == p2p.RPCMetaDataTopicV2 {
|
||||
if err := handle(ctx, base, stream); err != nil {
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
if err != p2ptypes.ErrWrongForkDigestVersion {
|
||||
@@ -128,8 +184,12 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
// a way to check for its reflect.Kind and based on the result, we can decode
|
||||
// accordingly.
|
||||
if t.Kind() == reflect.Ptr {
|
||||
msg := reflect.New(t.Elem())
|
||||
if err := s.cfg.P2P.Encoding().DecodeWithMaxLength(stream, msg.Interface()); err != nil {
|
||||
msg, ok := reflect.New(t.Elem()).Interface().(ssz.Unmarshaler)
|
||||
if !ok {
|
||||
log.Errorf("message of %T does not support marshaller interface", msg)
|
||||
return
|
||||
}
|
||||
if err := s.cfg.P2P.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
// Debug logs for goodbye/status errors
|
||||
if strings.Contains(topic, p2p.RPCGoodByeTopicV1) || strings.Contains(topic, p2p.RPCStatusTopicV1) {
|
||||
log.WithError(err).Debug("Could not decode goodbye stream message")
|
||||
@@ -140,7 +200,7 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return
|
||||
}
|
||||
if err := handle(ctx, msg.Interface(), stream); err != nil {
|
||||
if err := handle(ctx, msg, stream); err != nil {
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
if err != p2ptypes.ErrWrongForkDigestVersion {
|
||||
log.WithError(err).Debug("Could not handle p2p RPC")
|
||||
@@ -148,13 +208,18 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
traceutil.AnnotateError(span, err)
|
||||
}
|
||||
} else {
|
||||
msg := reflect.New(t)
|
||||
if err := s.cfg.P2P.Encoding().DecodeWithMaxLength(stream, msg.Interface()); err != nil {
|
||||
nTyp := reflect.New(t)
|
||||
msg, ok := nTyp.Interface().(ssz.Unmarshaler)
|
||||
if !ok {
|
||||
log.Errorf("message of %T does not support marshaller interface", msg)
|
||||
return
|
||||
}
|
||||
if err := s.cfg.P2P.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
log.WithError(err).Debug("Could not decode stream message")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return
|
||||
}
|
||||
if err := handle(ctx, msg.Elem().Interface(), stream); err != nil {
|
||||
if err := handle(ctx, nTyp.Elem().Interface(), stream); err != nil {
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
if err != p2ptypes.ErrWrongForkDigestVersion {
|
||||
log.WithError(err).Debug("Could not handle p2p RPC")
|
||||
|
||||
@@ -162,7 +162,7 @@ func (s *Service) writeBlockRangeToStream(ctx context.Context, startSlot, endSlo
|
||||
if b == nil || b.IsNil() || b.Block().IsNil() {
|
||||
continue
|
||||
}
|
||||
if chunkErr := s.chunkWriter(stream, b.Proto()); chunkErr != nil {
|
||||
if chunkErr := s.chunkBlockWriter(stream, b); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
traceutil.AnnotateError(span, chunkErr)
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -635,7 +634,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
||||
require.NoError(t, d.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(blocks[j])))
|
||||
j++
|
||||
}
|
||||
stateSummaries := make([]*statepb.StateSummary, len(blocks))
|
||||
stateSummaries := make([]*ethpb.StateSummary, len(blocks))
|
||||
|
||||
if finalized {
|
||||
if chain.CanonicalRoots == nil {
|
||||
@@ -644,7 +643,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
||||
for i, b := range blocks {
|
||||
bRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
stateSummaries[i] = &statepb.StateSummary{
|
||||
stateSummaries[i] = ðpb.StateSummary{
|
||||
Slot: b.Block.Slot,
|
||||
Root: bRoot[:],
|
||||
}
|
||||
@@ -685,7 +684,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
||||
require.NoError(t, d.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(blocks[j])))
|
||||
j++
|
||||
}
|
||||
stateSummaries := make([]*statepb.StateSummary, len(blocks))
|
||||
stateSummaries := make([]*ethpb.StateSummary, len(blocks))
|
||||
if finalized {
|
||||
if chain.CanonicalRoots == nil {
|
||||
chain.CanonicalRoots = map[[32]byte]bool{}
|
||||
@@ -693,7 +692,7 @@ func TestRPCBeaconBlocksByRange_FilterBlocks(t *testing.T) {
|
||||
for i, b := range blocks {
|
||||
bRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
stateSummaries[i] = &statepb.StateSummary{
|
||||
stateSummaries[i] = ðpb.StateSummary{
|
||||
Slot: b.Block.Slot,
|
||||
Root: bRoot[:],
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{
|
||||
if blk == nil || blk.IsNil() {
|
||||
continue
|
||||
}
|
||||
if err := s.chunkWriter(stream, blk.Proto()); err != nil {
|
||||
if err := s.chunkBlockWriter(stream, blk); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,6 +44,7 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks(t *testing.T) {
|
||||
}
|
||||
|
||||
r := &Service{cfg: &Config{P2P: p1, DB: d}, rateLimiter: newRateLimiter(p1)}
|
||||
r.cfg.Chain = &mock.ChainService{ValidatorsRoot: [32]byte{}}
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRootTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(10000, 10000, false)
|
||||
@@ -104,6 +105,8 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) {
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: blockARoot[:],
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{},
|
||||
},
|
||||
},
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
|
||||
@@ -1,35 +1,56 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
libp2pcore "github.com/libp2p/go-libp2p-core"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2putils"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
)
|
||||
|
||||
// chunkWriter writes the given message as a chunked response to the given network
|
||||
// chunkBlockWriter writes the given message as a chunked response to the given network
|
||||
// stream.
|
||||
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
||||
func (s *Service) chunkWriter(stream libp2pcore.Stream, msg interface{}) error {
|
||||
func (s *Service) chunkBlockWriter(stream libp2pcore.Stream, blk block.SignedBeaconBlock) error {
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
return WriteChunk(stream, s.cfg.Chain, s.cfg.P2P.Encoding(), msg)
|
||||
return WriteBlockChunk(stream, s.cfg.Chain, s.cfg.P2P.Encoding(), blk)
|
||||
}
|
||||
|
||||
// WriteChunk object to stream.
|
||||
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
||||
func WriteChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, encoding encoder.NetworkEncoding, msg interface{}) error {
|
||||
func WriteBlockChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, encoding encoder.NetworkEncoding, blk block.SignedBeaconBlock) error {
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeContextToStream(stream, chain); err != nil {
|
||||
obtainedCtx := []byte{}
|
||||
switch blk.Version() {
|
||||
case version.Phase0:
|
||||
valRoot := chain.GenesisValidatorRoot()
|
||||
digest, err := p2putils.ForkDigestFromEpoch(params.BeaconConfig().GenesisEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
case version.Altair:
|
||||
valRoot := chain.GenesisValidatorRoot()
|
||||
digest, err := p2putils.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obtainedCtx = digest[:]
|
||||
}
|
||||
|
||||
if err := writeContextToStream(obtainedCtx, stream, chain); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := encoding.EncodeWithMaxLength(stream, msg)
|
||||
_, err := encoding.EncodeWithMaxLength(stream, blk)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -40,17 +61,13 @@ func ReadChunkedBlock(stream libp2pcore.Stream, chain blockchain.ChainInfoFetche
|
||||
if isFirstChunk {
|
||||
return readFirstChunkedBlock(stream, chain, p2p)
|
||||
}
|
||||
blk := ð.SignedBeaconBlock{}
|
||||
if err := readResponseChunk(stream, chain, p2p, blk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wrapper.WrappedPhase0SignedBeaconBlock(blk), nil
|
||||
|
||||
return readResponseChunk(stream, chain, p2p)
|
||||
}
|
||||
|
||||
// readFirstChunkedBlock reads the first chunked block and applies the appropriate deadlines to
|
||||
// it.
|
||||
func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, p2p p2p.P2P) (block.SignedBeaconBlock, error) {
|
||||
blk := ð.SignedBeaconBlock{}
|
||||
code, errMsg, err := ReadStatusCode(stream, p2p.Encoding())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -58,30 +75,64 @@ func readFirstChunkedBlock(stream libp2pcore.Stream, chain blockchain.ChainInfoF
|
||||
if code != 0 {
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
// No-op for now with the rpc context.
|
||||
_, err = readContextFromStream(stream, chain)
|
||||
rpcCtx, err := readContextFromStream(stream, chain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blk, err := extractBlockDataType(rpcCtx, chain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// This may not work, double check tests.
|
||||
err = p2p.Encoding().DecodeWithMaxLength(stream, blk)
|
||||
return wrapper.WrappedPhase0SignedBeaconBlock(blk), err
|
||||
return blk, err
|
||||
}
|
||||
|
||||
// readResponseChunk reads the response from the stream and decodes it into the
|
||||
// provided message type.
|
||||
func readResponseChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, p2p p2p.P2P, to interface{}) error {
|
||||
func readResponseChunk(stream libp2pcore.Stream, chain blockchain.ChainInfoFetcher, p2p p2p.P2P) (block.SignedBeaconBlock, error) {
|
||||
SetStreamReadDeadline(stream, respTimeout)
|
||||
code, errMsg, err := readStatusCodeNoDeadline(stream, p2p.Encoding())
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if code != 0 {
|
||||
return errors.New(errMsg)
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
// No-op for now with the rpc context.
|
||||
_, err = readContextFromStream(stream, chain)
|
||||
rpcCtx, err := readContextFromStream(stream, chain)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return p2p.Encoding().DecodeWithMaxLength(stream, to)
|
||||
blk, err := extractBlockDataType(rpcCtx, chain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// This may not work, double check tests.
|
||||
err = p2p.Encoding().DecodeWithMaxLength(stream, blk)
|
||||
return blk, err
|
||||
}
|
||||
|
||||
func extractBlockDataType(digest []byte, chain blockchain.ChainInfoFetcher) (block.SignedBeaconBlock, error) {
|
||||
if len(digest) == 0 {
|
||||
bFunc, ok := types.BlockMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
return nil, errors.New("no block type exists for the genesis fork version.")
|
||||
}
|
||||
return bFunc()
|
||||
}
|
||||
if len(digest) != digestLength {
|
||||
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", digestLength, len(digest))
|
||||
}
|
||||
vRoot := chain.GenesisValidatorRoot()
|
||||
for k, blkFunc := range types.BlockMap {
|
||||
rDigest, err := helpers.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rDigest == bytesutil.ToBytes4(digest) {
|
||||
return blkFunc()
|
||||
}
|
||||
}
|
||||
return nil, errors.New("no valid digest matched")
|
||||
}
|
||||
|
||||
96
beacon-chain/sync/rpc_chunked_response_test.go
Normal file
96
beacon-chain/sync/rpc_chunked_response_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestExtractBlockDataType(t *testing.T) {
|
||||
// Precompute digests
|
||||
genDigest, err := helpers.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := helpers.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
chain blockchain.ChainInfoFetcher
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want block.SignedBeaconBlock
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: func() block.SignedBeaconBlock {
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := extractBlockDataType(tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("extractBlockDataType() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("extractBlockDataType() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/shared/mputil"
|
||||
@@ -91,7 +92,11 @@ func (s *Service) sendGoodByeMessage(ctx context.Context, code p2ptypes.RPCGoodb
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
stream, err := s.cfg.P2P.Send(ctx, &code, p2p.RPCGoodByeTopicV1, id)
|
||||
topic, err := p2p.TopicFromMessage(p2p.GoodbyeMessageName, helpers.SlotToEpoch(s.cfg.Chain.CurrentSlot()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stream, err := s.cfg.P2P.Send(ctx, &code, topic, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
db "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
@@ -148,8 +149,9 @@ func TestSendGoodbye_SendsMessage(t *testing.T) {
|
||||
d := db.SetupDB(t)
|
||||
r := &Service{
|
||||
cfg: &Config{
|
||||
DB: d,
|
||||
P2P: p1,
|
||||
DB: d,
|
||||
P2P: p1,
|
||||
Chain: &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -192,8 +194,9 @@ func TestSendGoodbye_DisconnectWithPeer(t *testing.T) {
|
||||
d := db.SetupDB(t)
|
||||
r := &Service{
|
||||
cfg: &Config{
|
||||
DB: d,
|
||||
P2P: p1,
|
||||
DB: d,
|
||||
P2P: p1,
|
||||
Chain: &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
@@ -6,10 +6,18 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p-core"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2putils"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
)
|
||||
|
||||
// metaDataHandler reads the incoming metadata rpc request from the peer.
|
||||
@@ -21,13 +29,54 @@ func (s *Service) metaDataHandler(_ context.Context, _ interface{}, stream libp2
|
||||
}
|
||||
s.rateLimiter.add(stream, 1)
|
||||
|
||||
if s.cfg.P2P.Metadata() == nil || s.cfg.P2P.Metadata().IsNil() {
|
||||
nilErr := errors.New("nil metadata stored for host")
|
||||
resp, err := s.generateErrorResponse(responseCodeServerError, types.ErrGeneric.Error())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not generate a response error")
|
||||
} else if _, err := stream.Write(resp); err != nil {
|
||||
log.WithError(err).Debug("Could not write to stream")
|
||||
}
|
||||
return nilErr
|
||||
}
|
||||
_, _, streamVersion, err := p2p.TopicDeconstructor(string(stream.Protocol()))
|
||||
if err != nil {
|
||||
resp, genErr := s.generateErrorResponse(responseCodeServerError, types.ErrGeneric.Error())
|
||||
if genErr != nil {
|
||||
log.WithError(genErr).Debug("Could not generate a response error")
|
||||
} else if _, wErr := stream.Write(resp); wErr != nil {
|
||||
log.WithError(wErr).Debug("Could not write to stream")
|
||||
}
|
||||
return err
|
||||
}
|
||||
currMd := s.cfg.P2P.Metadata()
|
||||
switch streamVersion {
|
||||
case p2p.SchemaVersionV1:
|
||||
// We have a v1 metadata object saved locally, so we
|
||||
// convert it back to a v0 metadata object.
|
||||
if currMd.Version() != version.Phase0 {
|
||||
currMd = wrapper.WrappedMetadataV0(
|
||||
&pb.MetaDataV0{
|
||||
Attnets: currMd.AttnetsBitfield(),
|
||||
SeqNumber: currMd.SequenceNumber(),
|
||||
})
|
||||
}
|
||||
case p2p.SchemaVersionV2:
|
||||
// We have a v0 metadata object saved locally, so we
|
||||
// convert it to a v1 metadata object.
|
||||
if currMd.Version() != version.Altair {
|
||||
currMd = wrapper.WrappedMetadataV1(
|
||||
&pb.MetaDataV1{
|
||||
Attnets: currMd.AttnetsBitfield(),
|
||||
SeqNumber: currMd.SequenceNumber(),
|
||||
Syncnets: bitfield.Bitvector4{byte(0x00)},
|
||||
})
|
||||
}
|
||||
}
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.cfg.P2P.Metadata() == nil || s.cfg.P2P.Metadata().IsNil() {
|
||||
return errors.New("nil metadata stored for host")
|
||||
}
|
||||
_, err := s.cfg.P2P.Encoding().EncodeWithMaxLength(stream, s.cfg.P2P.Metadata().InnerObject())
|
||||
_, err = s.cfg.P2P.Encoding().EncodeWithMaxLength(stream, currMd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -39,7 +88,11 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
stream, err := s.cfg.P2P.Send(ctx, new(interface{}), p2p.RPCMetaDataTopicV1, id)
|
||||
topic, err := p2p.TopicFromMessage(p2p.MetadataMessageName, helpers.SlotToEpoch(s.cfg.Chain.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stream, err := s.cfg.P2P.Send(ctx, new(interface{}), topic, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -52,14 +105,52 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (metadata
|
||||
s.cfg.P2P.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
return nil, errors.New(errMsg)
|
||||
}
|
||||
// No-op for now with the rpc context.
|
||||
_, err = readContextFromStream(stream, s.cfg.Chain)
|
||||
valRoot := s.cfg.Chain.GenesisValidatorRoot()
|
||||
rpcCtx, err := p2putils.ForkDigestFromEpoch(helpers.SlotToEpoch(s.cfg.Chain.CurrentSlot()), valRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msg := new(pb.MetaDataV0)
|
||||
msg, err := extractMetaDataType(rpcCtx[:], s.cfg.Chain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Defensive check to ensure valid objects are being sent.
|
||||
topicVersion := ""
|
||||
switch msg.Version() {
|
||||
case version.Phase0:
|
||||
topicVersion = p2p.SchemaVersionV1
|
||||
case version.Altair:
|
||||
topicVersion = p2p.SchemaVersionV2
|
||||
}
|
||||
if err := validateVersion(topicVersion, stream); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.cfg.P2P.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wrapper.WrappedMetadataV0(msg), nil
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
func extractMetaDataType(digest []byte, chain blockchain.ChainInfoFetcher) (metadata.Metadata, error) {
|
||||
if len(digest) == 0 {
|
||||
mdFunc, ok := types.MetaDataMap[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)]
|
||||
if !ok {
|
||||
return nil, errors.New("no metadata type exists for the genesis fork version.")
|
||||
}
|
||||
return mdFunc(), nil
|
||||
}
|
||||
if len(digest) != digestLength {
|
||||
return nil, errors.Errorf("invalid digest returned, wanted a length of %d but received %d", digestLength, len(digest))
|
||||
}
|
||||
vRoot := chain.GenesisValidatorRoot()
|
||||
for k, mdFunc := range types.MetaDataMap {
|
||||
rDigest, err := helpers.ComputeForkDigest(k[:], vRoot[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rDigest == bytesutil.ToBytes4(digest) {
|
||||
return mdFunc(), nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("no valid digest matched")
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -9,11 +10,16 @@ import (
|
||||
"github.com/kevinms/leakybucket-go"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
db "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/metadata"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sszutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -37,12 +43,15 @@ func TestMetaDataRPCHandler_ReceivesMetadata(t *testing.T) {
|
||||
cfg: &Config{
|
||||
DB: d,
|
||||
P2P: p1,
|
||||
Chain: &mock.ChainService{
|
||||
ValidatorsRoot: [32]byte{},
|
||||
},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
// Setup streams
|
||||
pcl := protocol.ID("/testing")
|
||||
pcl := protocol.ID(p2p.RPCMetaDataTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(1, 1, false)
|
||||
var wg sync.WaitGroup
|
||||
@@ -84,16 +93,18 @@ func TestMetadataRPCHandler_SendsMetadata(t *testing.T) {
|
||||
d := db.SetupDB(t)
|
||||
r := &Service{
|
||||
cfg: &Config{
|
||||
DB: d,
|
||||
P2P: p1,
|
||||
DB: d,
|
||||
P2P: p1,
|
||||
Chain: &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
r2 := &Service{
|
||||
cfg: &Config{
|
||||
DB: d,
|
||||
P2P: p2,
|
||||
DB: d,
|
||||
P2P: p2,
|
||||
Chain: &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p2),
|
||||
}
|
||||
@@ -127,3 +138,168 @@ func TestMetadataRPCHandler_SendsMetadata(t *testing.T) {
|
||||
t.Error("Peer is disconnected despite receiving a valid ping")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadataRPCHandler_SendsMetadataAltair(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bCfg := params.BeaconConfig()
|
||||
bCfg.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
assert.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
bitfield := [8]byte{'A', 'B'}
|
||||
p2.LocalMetadata = wrapper.WrappedMetadataV0(&pb.MetaDataV0{
|
||||
SeqNumber: 2,
|
||||
Attnets: bitfield[:],
|
||||
})
|
||||
|
||||
// Set up a head state in the database with data we expect.
|
||||
d := db.SetupDB(t)
|
||||
r := &Service{
|
||||
cfg: &Config{
|
||||
DB: d,
|
||||
P2P: p1,
|
||||
Chain: &mock.ChainService{Genesis: time.Now().Add(-5 * oneEpoch()), ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
|
||||
r2 := &Service{
|
||||
cfg: &Config{
|
||||
DB: d,
|
||||
P2P: p2,
|
||||
Chain: &mock.ChainService{Genesis: time.Now().Add(-5 * oneEpoch()), ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p2),
|
||||
}
|
||||
|
||||
// Setup streams
|
||||
pcl := protocol.ID(p2p.RPCMetaDataTopicV2 + r.cfg.P2P.Encoding().ProtocolSuffix())
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(2, 2, false)
|
||||
r2.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(2, 2, false)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
err := r2.metaDataHandler(context.Background(), new(interface{}), stream)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
_, err := r.sendMetaDataRequest(context.Background(), p2.BHost.ID())
|
||||
assert.NoError(t, err)
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
|
||||
// Fix up peer with the correct metadata.
|
||||
p2.LocalMetadata = wrapper.WrappedMetadataV1(&pb.MetaDataV1{
|
||||
SeqNumber: 2,
|
||||
Attnets: bitfield[:],
|
||||
Syncnets: []byte{0x0},
|
||||
})
|
||||
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
assert.NoError(t, r2.metaDataHandler(context.Background(), new(interface{}), stream))
|
||||
})
|
||||
|
||||
metadata, err := r.sendMetaDataRequest(context.Background(), p2.BHost.ID())
|
||||
assert.NoError(t, err)
|
||||
|
||||
if !sszutil.DeepEqual(metadata.InnerObject(), p2.LocalMetadata.InnerObject()) {
|
||||
t.Fatalf("MetadataV1 unequal, received %v but wanted %v", metadata, p2.LocalMetadata)
|
||||
}
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
|
||||
conns := p1.BHost.Network().ConnsToPeer(p2.BHost.ID())
|
||||
if len(conns) == 0 {
|
||||
t.Error("Peer is disconnected despite receiving a valid ping")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractMetaDataType(t *testing.T) {
|
||||
// Precompute digests
|
||||
genDigest, err := helpers.ComputeForkDigest(params.BeaconConfig().GenesisForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
altairDigest, err := helpers.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().ZeroHash[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
type args struct {
|
||||
digest []byte
|
||||
chain blockchain.ChainInfoFetcher
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want metadata.Metadata
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no digest",
|
||||
args: args{
|
||||
digest: []byte{},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non existent digest",
|
||||
args: args{
|
||||
digest: []byte{0x00, 0x01, 0x02, 0x03},
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "genesis fork version",
|
||||
args: args{
|
||||
digest: genDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: wrapper.WrappedMetadataV0(&pb.MetaDataV0{}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "altair fork version",
|
||||
args: args{
|
||||
digest: altairDigest[:],
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
},
|
||||
want: wrapper.WrappedMetadataV1(&pb.MetaDataV1{}),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := extractMetaDataType(tt.args.digest, tt.args.chain)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("extractMetaDataType() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("extractMetaDataType() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p-core"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
@@ -77,7 +78,11 @@ func (s *Service) sendPingRequest(ctx context.Context, id peer.ID) error {
|
||||
defer cancel()
|
||||
|
||||
metadataSeq := types.SSZUint64(s.cfg.P2P.MetadataSeq())
|
||||
stream, err := s.cfg.P2P.Send(ctx, &metadataSeq, p2p.RPCPingTopicV1, id)
|
||||
topic, err := p2p.TopicFromMessage(p2p.PingMessageName, helpers.SlotToEpoch(s.cfg.Chain.CurrentSlot()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stream, err := s.cfg.P2P.Send(ctx, &metadataSeq, topic, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -95,11 +100,6 @@ func (s *Service) sendPingRequest(ctx context.Context, id peer.ID) error {
|
||||
s.cfg.P2P.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
// No-op for now with the rpc context.
|
||||
_, err = readContextFromStream(stream, s.cfg.Chain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg := new(types.SSZUint64)
|
||||
if err := s.cfg.P2P.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
return err
|
||||
|
||||
@@ -11,7 +11,9 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
db "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -50,7 +52,7 @@ func TestPingRPCHandler_ReceivesPing(t *testing.T) {
|
||||
p1.Peers().SetMetadata(p2.BHost.ID(), p2.LocalMetadata)
|
||||
|
||||
// Setup streams
|
||||
pcl := protocol.ID("/testing")
|
||||
pcl := protocol.ID(p2p.RPCPingTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(1, 1, false)
|
||||
var wg sync.WaitGroup
|
||||
@@ -97,8 +99,9 @@ func TestPingRPCHandler_SendsPing(t *testing.T) {
|
||||
d := db.SetupDB(t)
|
||||
r := &Service{
|
||||
cfg: &Config{
|
||||
DB: d,
|
||||
P2P: p1,
|
||||
DB: d,
|
||||
P2P: p1,
|
||||
Chain: &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
@@ -111,8 +114,9 @@ func TestPingRPCHandler_SendsPing(t *testing.T) {
|
||||
|
||||
r2 := &Service{
|
||||
cfg: &Config{
|
||||
DB: d,
|
||||
P2P: p2,
|
||||
DB: d,
|
||||
P2P: p2,
|
||||
Chain: &mock.ChainService{ValidatorsRoot: [32]byte{}, Genesis: time.Now()},
|
||||
},
|
||||
rateLimiter: newRateLimiter(p2),
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -27,7 +28,11 @@ func SendBeaconBlocksByRangeRequest(
|
||||
ctx context.Context, chain blockchain.ChainInfoFetcher, p2pProvider p2p.P2P, pid peer.ID,
|
||||
req *pb.BeaconBlocksByRangeRequest, blockProcessor BeaconBlockProcessor,
|
||||
) ([]block.SignedBeaconBlock, error) {
|
||||
stream, err := p2pProvider.Send(ctx, req, p2p.RPCBlocksByRangeTopicV1, pid)
|
||||
topic, err := p2p.TopicFromMessage(p2p.BeaconBlocksByRangeMessageName, helpers.SlotToEpoch(chain.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stream, err := p2pProvider.Send(ctx, req, topic, pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -85,7 +90,11 @@ func SendBeaconBlocksByRootRequest(
|
||||
ctx context.Context, chain blockchain.ChainInfoFetcher, p2pProvider p2p.P2P, pid peer.ID,
|
||||
req *p2ptypes.BeaconBlockByRootsReq, blockProcessor BeaconBlockProcessor,
|
||||
) ([]block.SignedBeaconBlock, error) {
|
||||
stream, err := p2pProvider.Send(ctx, req, p2p.RPCBlocksByRootTopicV1, pid)
|
||||
topic, err := p2p.TopicFromMessage(p2p.BeaconBlocksByRootsMessageName, helpers.SlotToEpoch(chain.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stream, err := p2pProvider.Send(ctx, req, topic, pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -6,10 +6,12 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/mux"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
@@ -35,7 +37,8 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
p1.Connect(bogusPeer)
|
||||
|
||||
req := &pb.BeaconBlocksByRangeRequest{}
|
||||
_, err := SendBeaconBlocksByRangeRequest(ctx, nil, p1, bogusPeer.PeerID(), req, nil)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
_, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, bogusPeer.PeerID(), req, nil)
|
||||
assert.ErrorContains(t, "protocol not supported", err)
|
||||
})
|
||||
|
||||
@@ -80,7 +83,8 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
if uint64(i) >= uint64(len(knownBlocks)) {
|
||||
break
|
||||
}
|
||||
err = WriteChunk(stream, nil, p2pProvider.Encoding(), knownBlocks[i])
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
err = WriteBlockChunk(stream, chain, p2pProvider.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(knownBlocks[i]))
|
||||
if err != nil && err.Error() != mux.ErrReset.Error() {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -99,7 +103,8 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Count: 128,
|
||||
Step: 1,
|
||||
}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, nil, p1, p2.PeerID(), req, nil)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 128, len(blocks))
|
||||
})
|
||||
@@ -117,7 +122,8 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Step: 1,
|
||||
}
|
||||
blocksFromProcessor := make([]block.SignedBeaconBlock, 0)
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, nil, p1, p2.PeerID(), req, func(block block.SignedBeaconBlock) error {
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, func(block block.SignedBeaconBlock) error {
|
||||
blocksFromProcessor = append(blocksFromProcessor, block)
|
||||
return nil
|
||||
})
|
||||
@@ -139,7 +145,8 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Step: 1,
|
||||
}
|
||||
errFromProcessor := errors.New("processor error")
|
||||
_, err := SendBeaconBlocksByRangeRequest(ctx, nil, p1, p2.PeerID(), req, func(block block.SignedBeaconBlock) error {
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
_, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, func(block block.SignedBeaconBlock) error {
|
||||
return errFromProcessor
|
||||
})
|
||||
assert.ErrorContains(t, errFromProcessor.Error(), err)
|
||||
@@ -157,7 +164,8 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Count: 128,
|
||||
Step: 1,
|
||||
}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, nil, p1, p2.PeerID(), req, nil)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 128, len(blocks))
|
||||
|
||||
@@ -168,7 +176,7 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
cfg.MaxRequestBlocks = maxRequestBlocks
|
||||
params.OverrideBeaconNetworkConfig(cfg)
|
||||
}()
|
||||
blocks, err = SendBeaconBlocksByRangeRequest(ctx, nil, p1, p2.PeerID(), req, func(block block.SignedBeaconBlock) error {
|
||||
blocks, err = SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, func(block block.SignedBeaconBlock) error {
|
||||
// Since ssz checks the boundaries, and doesn't normally allow to send requests bigger than
|
||||
// the max request size, we are updating max request size dynamically. Even when updated dynamically,
|
||||
// no more than max request size of blocks is expected on return.
|
||||
@@ -199,7 +207,8 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Count: 128,
|
||||
Step: 1,
|
||||
}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, nil, p1, p2.PeerID(), req, nil)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
assert.ErrorContains(t, expectedErr.Error(), err)
|
||||
assert.Equal(t, 0, len(blocks))
|
||||
})
|
||||
@@ -227,7 +236,8 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
if uint64(i) >= uint64(len(knownBlocks)) {
|
||||
break
|
||||
}
|
||||
err = WriteChunk(stream, nil, p2.Encoding(), knownBlocks[i])
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
err = WriteBlockChunk(stream, chain, p2.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(knownBlocks[i]))
|
||||
if err != nil && err.Error() != mux.ErrReset.Error() {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -239,7 +249,8 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Count: 128,
|
||||
Step: 1,
|
||||
}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, nil, p1, p2.PeerID(), req, nil)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
assert.ErrorContains(t, ErrInvalidFetchedData.Error(), err)
|
||||
assert.Equal(t, 0, len(blocks))
|
||||
|
||||
@@ -268,7 +279,8 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
if uint64(i) >= uint64(len(knownBlocks)) {
|
||||
break
|
||||
}
|
||||
err = WriteChunk(stream, nil, p2.Encoding(), knownBlocks[i])
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
err = WriteBlockChunk(stream, chain, p2.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(knownBlocks[i]))
|
||||
if err != nil && err.Error() != mux.ErrReset.Error() {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -280,7 +292,8 @@ func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
Count: 128,
|
||||
Step: 10,
|
||||
}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, nil, p1, p2.PeerID(), req, nil)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRangeRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
assert.ErrorContains(t, ErrInvalidFetchedData.Error(), err)
|
||||
assert.Equal(t, 0, len(blocks))
|
||||
|
||||
@@ -309,7 +322,8 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
p1.Connect(bogusPeer)
|
||||
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{}
|
||||
_, err := SendBeaconBlocksByRootRequest(ctx, nil, p1, bogusPeer.PeerID(), req, nil)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
_, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, bogusPeer.PeerID(), req, nil)
|
||||
assert.ErrorContains(t, "protocol not supported", err)
|
||||
})
|
||||
|
||||
@@ -356,7 +370,8 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
p2.SetStreamHandler(pcl, knownBlocksProvider(p2, nil))
|
||||
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1]}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, nil, p1, p2.PeerID(), req, nil)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(blocks))
|
||||
})
|
||||
@@ -370,7 +385,8 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
// No error from block processor.
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1]}
|
||||
blocksFromProcessor := make([]block.SignedBeaconBlock, 0)
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, nil, p1, p2.PeerID(), req, func(block block.SignedBeaconBlock) error {
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, func(block block.SignedBeaconBlock) error {
|
||||
blocksFromProcessor = append(blocksFromProcessor, block)
|
||||
return nil
|
||||
})
|
||||
@@ -388,7 +404,8 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
// Send error from block processor.
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1]}
|
||||
errFromProcessor := errors.New("processor error")
|
||||
_, err := SendBeaconBlocksByRootRequest(ctx, nil, p1, p2.PeerID(), req, func(block block.SignedBeaconBlock) error {
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
_, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, func(block block.SignedBeaconBlock) error {
|
||||
return errFromProcessor
|
||||
})
|
||||
assert.ErrorContains(t, errFromProcessor.Error(), err)
|
||||
@@ -402,7 +419,8 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
|
||||
// No cap on max roots.
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1], knownRoots[2], knownRoots[3]}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, nil, p1, p2.PeerID(), req, nil)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 4, len(blocks))
|
||||
|
||||
@@ -413,7 +431,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
cfg.MaxRequestBlocks = maxRequestBlocks
|
||||
params.OverrideBeaconNetworkConfig(cfg)
|
||||
}()
|
||||
blocks, err = SendBeaconBlocksByRootRequest(ctx, nil, p1, p2.PeerID(), req, func(block block.SignedBeaconBlock) error {
|
||||
blocks, err = SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, func(block block.SignedBeaconBlock) error {
|
||||
// Since ssz checks the boundaries, and doesn't normally allow to send requests bigger than
|
||||
// the max request size, we are updating max request size dynamically. Even when updated dynamically,
|
||||
// no more than max request size of blocks is expected on return.
|
||||
@@ -440,7 +458,8 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
}))
|
||||
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1], knownRoots[2], knownRoots[3]}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, nil, p1, p2.PeerID(), req, nil)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
assert.ErrorContains(t, expectedErr.Error(), err)
|
||||
assert.Equal(t, 0, len(blocks))
|
||||
})
|
||||
@@ -460,7 +479,8 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
}))
|
||||
|
||||
req := &p2pTypes.BeaconBlockByRootsReq{knownRoots[0], knownRoots[1], knownRoots[2], knownRoots[3]}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, nil, p1, p2.PeerID(), req, nil)
|
||||
chain := &mock.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
blocks, err := SendBeaconBlocksByRootRequest(ctx, chain, p1, p2.PeerID(), req, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(blocks))
|
||||
})
|
||||
|
||||
@@ -128,7 +128,7 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
|
||||
return err
|
||||
}
|
||||
|
||||
forkDigest, err := s.forkDigest()
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -139,7 +139,11 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
|
||||
HeadRoot: headRoot,
|
||||
HeadSlot: s.cfg.Chain.HeadSlot(),
|
||||
}
|
||||
stream, err := s.cfg.P2P.Send(ctx, resp, p2p.RPCStatusTopicV1, id)
|
||||
topic, err := p2p.TopicFromMessage(p2p.StatusMessageName, helpers.SlotToEpoch(s.cfg.Chain.CurrentSlot()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stream, err := s.cfg.P2P.Send(ctx, resp, topic, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -154,11 +158,6 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
|
||||
s.cfg.P2P.Peers().Scorers().BadResponsesScorer().Increment(id)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
// No-op for now with the rpc context.
|
||||
_, err = readContextFromStream(stream, s.cfg.Chain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg := &pb.Status{}
|
||||
if err := s.cfg.P2P.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
return err
|
||||
@@ -258,7 +257,7 @@ func (s *Service) respondWithStatus(ctx context.Context, stream network.Stream)
|
||||
return err
|
||||
}
|
||||
|
||||
forkDigest, err := s.forkDigest()
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -278,7 +277,7 @@ func (s *Service) respondWithStatus(ctx context.Context, stream network.Stream)
|
||||
}
|
||||
|
||||
func (s *Service) validateStatusMessage(ctx context.Context, msg *pb.Status) error {
|
||||
forkDigest, err := s.forkDigest()
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
||||
testingDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
@@ -22,7 +23,6 @@ import (
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
p2pWrapper "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
@@ -46,7 +46,7 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
||||
cfg: &Config{
|
||||
P2P: p1,
|
||||
Chain: &mock.ChainService{
|
||||
Fork: &statepb.Fork{
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
@@ -61,7 +61,7 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
pcl := protocol.ID("/testing")
|
||||
pcl := protocol.ID(p2p.RPCStatusTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(1, 1, false)
|
||||
|
||||
@@ -114,7 +114,7 @@ func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) {
|
||||
cfg: &Config{
|
||||
P2P: p1,
|
||||
Chain: &mock.ChainService{
|
||||
Fork: &statepb.Fork{
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
@@ -129,7 +129,7 @@ func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) {
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
pcl := protocol.ID("/testing")
|
||||
pcl := protocol.ID(p2p.RPCStatusTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(1, 1, false)
|
||||
|
||||
@@ -145,7 +145,7 @@ func TestStatusRPCHandler_ConnectsOnGenesis(t *testing.T) {
|
||||
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
require.NoError(t, err)
|
||||
digest, err := r.forkDigest()
|
||||
digest, err := r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = r.statusRPCHandler(context.Background(), &pb.Status{ForkDigest: digest[:], FinalizedRoot: params.BeaconConfig().ZeroHash[:]}, stream1)
|
||||
@@ -195,7 +195,7 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: &statepb.Fork{
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
@@ -206,11 +206,11 @@ func TestStatusRPCHandler_ReturnsHelloMessage(t *testing.T) {
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
digest, err := r.forkDigest()
|
||||
digest, err := r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup streams
|
||||
pcl := protocol.ID("/testing")
|
||||
pcl := protocol.ID(p2p.RPCStatusTopicV1)
|
||||
topic := string(pcl)
|
||||
r.rateLimiter.limiterMap[topic] = leakybucket.NewCollector(1, 1, false)
|
||||
var wg sync.WaitGroup
|
||||
@@ -263,7 +263,7 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
Attnets: bytesutil.PadTo([]byte{'C', 'D'}, 8),
|
||||
})
|
||||
|
||||
st, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
st, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -279,7 +279,7 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
Chain: &mock.ChainService{
|
||||
State: st,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Epoch: 0, Root: finalizedRoot[:]},
|
||||
Fork: &statepb.Fork{
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
@@ -292,7 +292,7 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
ctx: context.Background(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
p1.Digest, err = r.forkDigest()
|
||||
p1.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
r2 := &Service{
|
||||
@@ -304,7 +304,7 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
},
|
||||
rateLimiter: newRateLimiter(p2),
|
||||
}
|
||||
p2.Digest, err = r.forkDigest()
|
||||
p2.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
r.Start()
|
||||
@@ -414,7 +414,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: &statepb.Fork{
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
@@ -436,7 +436,7 @@ func TestStatusRPCRequest_RequestSent(t *testing.T) {
|
||||
defer wg.Done()
|
||||
out := &pb.Status{}
|
||||
assert.NoError(t, r.cfg.P2P.Encoding().DecodeWithMaxLength(stream, out))
|
||||
digest, err := r.forkDigest()
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
expected := &pb.Status{
|
||||
ForkDigest: digest[:],
|
||||
@@ -496,7 +496,7 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: &statepb.Fork{
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
@@ -515,7 +515,7 @@ func TestStatusRPCRequest_FinalizedBlockExists(t *testing.T) {
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: &statepb.Fork{
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
@@ -567,11 +567,11 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) {
|
||||
blocksTillHead := makeBlocks(t, 1, 1000, genRoot)
|
||||
require.NoError(t, db.SaveBlocks(context.Background(), blocksTillHead))
|
||||
|
||||
stateSummaries := make([]*statepb.StateSummary, len(blocksTillHead))
|
||||
stateSummaries := make([]*ethpb.StateSummary, len(blocksTillHead))
|
||||
for i, b := range blocksTillHead {
|
||||
bRoot, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
stateSummaries[i] = &statepb.StateSummary{
|
||||
stateSummaries[i] = ðpb.StateSummary{
|
||||
Slot: b.Block().Slot(),
|
||||
Root: bRoot[:],
|
||||
}
|
||||
@@ -669,7 +669,7 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) {
|
||||
State: nState,
|
||||
FinalizedCheckPoint: remoteFinalizedChkpt,
|
||||
Root: rHeadRoot[:],
|
||||
Fork: &statepb.Fork{
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
@@ -688,7 +688,7 @@ func TestStatusRPCRequest_FinalizedBlockSkippedSlots(t *testing.T) {
|
||||
State: nState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: &statepb.Fork{
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
@@ -756,7 +756,7 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: &statepb.Fork{
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
@@ -835,7 +835,7 @@ func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
State: genesisState,
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: headRoot[:],
|
||||
Fork: &statepb.Fork{
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
@@ -845,7 +845,7 @@ func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
},
|
||||
ctx: context.Background(),
|
||||
}
|
||||
digest, err := r.forkDigest()
|
||||
digest, err := r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
// There should be no error for a status message
|
||||
// with a genesis checkpoint.
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/synccommittee"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
@@ -42,6 +43,8 @@ const rangeLimit = 1024
|
||||
const seenBlockSize = 1000
|
||||
const seenUnaggregatedAttSize = 20000
|
||||
const seenAggregatedAttSize = 1024
|
||||
const seenSyncMsgSize = 1000
|
||||
const seenSyncSize = 300
|
||||
const seenExitSize = 100
|
||||
const seenProposerSlashingSize = 100
|
||||
const badBlockSize = 1000
|
||||
@@ -63,6 +66,7 @@ type Config struct {
|
||||
AttPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
SyncCommsPool synccommittee.Pool
|
||||
Chain blockchainService
|
||||
InitialSync Checker
|
||||
StateNotifier statefeed.Notifier
|
||||
@@ -92,6 +96,7 @@ type Service struct {
|
||||
slotToPendingBlocks *gcache.Cache
|
||||
seenPendingBlocks map[[32]byte]bool
|
||||
blkRootToPendingAtts map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof
|
||||
subHandler *subTopicHandler
|
||||
pendingAttsLock sync.RWMutex
|
||||
pendingQueueLock sync.RWMutex
|
||||
chainStarted *abool.AtomicBool
|
||||
@@ -109,6 +114,10 @@ type Service struct {
|
||||
seenProposerSlashingCache *lru.Cache
|
||||
seenAttesterSlashingLock sync.RWMutex
|
||||
seenAttesterSlashingCache map[uint64]bool
|
||||
seenSyncContributionLock sync.RWMutex
|
||||
seenSyncContributionCache *lru.Cache
|
||||
seenSyncMessageLock sync.RWMutex
|
||||
seenSyncMessageCache *lru.Cache
|
||||
badBlockCache *lru.Cache
|
||||
badBlockLock sync.RWMutex
|
||||
}
|
||||
@@ -127,6 +136,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
slotToPendingBlocks: c,
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
subHandler: newSubTopicHandler(),
|
||||
rateLimiter: rLimiter,
|
||||
}
|
||||
|
||||
@@ -171,9 +181,7 @@ func (s *Service) Stop() error {
|
||||
}
|
||||
// Deregister Topic Subscribers.
|
||||
for _, t := range s.cfg.P2P.PubSub().GetTopics() {
|
||||
if err := s.cfg.P2P.PubSub().UnregisterTopicValidator(t); err != nil {
|
||||
log.Errorf("Could not successfully unregister for topic %s: %v", t, err)
|
||||
}
|
||||
s.unSubscribeFromTopic(t)
|
||||
}
|
||||
defer s.cancel()
|
||||
return nil
|
||||
@@ -205,6 +213,14 @@ func (s *Service) initCaches() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
syncMsgCache, err := lru.New(seenSyncMsgSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
syncContrCache, err := lru.New(seenSyncSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
exitCache, err := lru.New(seenExitSize)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -220,6 +236,8 @@ func (s *Service) initCaches() error {
|
||||
s.seenBlockCache = blkCache
|
||||
s.seenAggregatedAttestationCache = aggregatedAttCache
|
||||
s.seenUnAggregatedAttestationCache = unAggregatedAttCache
|
||||
s.seenSyncContributionCache = syncContrCache
|
||||
s.seenSyncMessageCache = syncMsgCache
|
||||
s.seenExitCache = exitCache
|
||||
s.seenAttesterSlashingCache = make(map[uint64]bool)
|
||||
s.seenProposerSlashingCache = proposerSlashingCache
|
||||
@@ -263,7 +281,13 @@ func (s *Service) registerHandlers() {
|
||||
return
|
||||
}
|
||||
// Register respective pubsub handlers at state synced event.
|
||||
s.registerSubscribers()
|
||||
digest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve current fork digest")
|
||||
}
|
||||
currentEpoch := helpers.SlotToEpoch(helpers.CurrentSlot(uint64(s.cfg.Chain.GenesisTime().Unix())))
|
||||
s.registerSubscribers(currentEpoch, digest)
|
||||
go s.forkWatcher()
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/abool"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
)
|
||||
|
||||
func TestService_StatusZeroEpoch(t *testing.T) {
|
||||
bState, err := v1.InitializeFromProto(&statepb.BeaconState{Slot: 0})
|
||||
bState, err := v1.InitializeFromProto(ðpb.BeaconState{Slot: 0})
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
cfg: &Config{
|
||||
@@ -135,6 +135,7 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) {
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
|
||||
topic := "/eth2/%x/beacon_block"
|
||||
@@ -157,7 +158,7 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) {
|
||||
msg := testutil.NewBeaconBlock()
|
||||
msg.Block.ParentRoot = testutil.Random32Bytes(t)
|
||||
msg.Signature = sk.Sign([]byte("data")).Marshal()
|
||||
p2p.Digest, err = r.forkDigest()
|
||||
p2p.Digest, err = r.currentForkDigest()
|
||||
r.cfg.BlockNotifier = chainService.BlockNotifier()
|
||||
blockChan := make(chan feed.Event, 1)
|
||||
sub := r.cfg.BlockNotifier.BlockFeed().Subscribe(blockChan)
|
||||
@@ -213,6 +214,7 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
|
||||
go r.registerHandlers()
|
||||
@@ -228,7 +230,7 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
}
|
||||
|
||||
var err error
|
||||
p2p.Digest, err = r.forkDigest()
|
||||
p2p.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
// wait for chainstart to be sent
|
||||
|
||||
@@ -11,8 +11,11 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/messagehandler"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2putils"
|
||||
@@ -41,61 +44,104 @@ func (s *Service) noopValidator(_ context.Context, _ peer.ID, msg *pubsub.Messag
|
||||
}
|
||||
|
||||
// Register PubSub subscribers
|
||||
func (s *Service) registerSubscribers() {
|
||||
func (s *Service) registerSubscribers(epoch types.Epoch, digest [4]byte) {
|
||||
s.subscribe(
|
||||
p2p.BlockSubnetTopicFormat,
|
||||
s.validateBeaconBlockPubSub,
|
||||
s.beaconBlockSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.AggregateAndProofSubnetTopicFormat,
|
||||
s.validateAggregateAndProof,
|
||||
s.beaconAggregateProofSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.ExitSubnetTopicFormat,
|
||||
s.validateVoluntaryExit,
|
||||
s.voluntaryExitSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.ProposerSlashingSubnetTopicFormat,
|
||||
s.validateProposerSlashing,
|
||||
s.proposerSlashingSubscriber,
|
||||
digest,
|
||||
)
|
||||
s.subscribe(
|
||||
p2p.AttesterSlashingSubnetTopicFormat,
|
||||
s.validateAttesterSlashing,
|
||||
s.attesterSlashingSubscriber,
|
||||
digest,
|
||||
)
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
s.subscribeStaticWithSubnets(
|
||||
"/eth2/%x/beacon_attestation_%d",
|
||||
p2p.AttestationSubnetTopicFormat,
|
||||
s.validateCommitteeIndexBeaconAttestation, /* validator */
|
||||
s.committeeIndexBeaconAttestationSubscriber, /* message handler */
|
||||
digest,
|
||||
)
|
||||
} else {
|
||||
s.subscribeDynamicWithSubnets(
|
||||
"/eth2/%x/beacon_attestation_%d",
|
||||
p2p.AttestationSubnetTopicFormat,
|
||||
s.validateCommitteeIndexBeaconAttestation, /* validator */
|
||||
s.committeeIndexBeaconAttestationSubscriber, /* message handler */
|
||||
digest,
|
||||
)
|
||||
}
|
||||
// Altair Fork Version
|
||||
if epoch >= params.BeaconConfig().AltairForkEpoch {
|
||||
s.subscribe(
|
||||
p2p.SyncContributionAndProofSubnetTopicFormat,
|
||||
s.validateSyncContributionAndProof,
|
||||
s.syncContributionAndProofSubscriber,
|
||||
digest,
|
||||
)
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
s.subscribeStaticWithSyncSubnets(
|
||||
p2p.SyncCommitteeSubnetTopicFormat,
|
||||
s.validateSyncCommitteeMessage, /* validator */
|
||||
s.syncCommitteeMessageSubscriber, /* message handler */
|
||||
digest,
|
||||
)
|
||||
} else {
|
||||
s.subscribeDynamicWithSyncSubnets(
|
||||
p2p.SyncCommitteeSubnetTopicFormat,
|
||||
s.validateSyncCommitteeMessage, /* validator */
|
||||
s.syncCommitteeMessageSubscriber, /* message handler */
|
||||
digest,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe to a given topic with a given validator and subscription handler.
|
||||
// The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribe(topic string, validator pubsub.ValidatorEx, handle subHandler) *pubsub.Subscription {
|
||||
base := p2p.GossipTopicMappings(topic, 0)
|
||||
func (s *Service) subscribe(topic string, validator pubsub.ValidatorEx, handle subHandler, digest [4]byte) *pubsub.Subscription {
|
||||
genRoot := s.cfg.Chain.GenesisValidatorRoot()
|
||||
_, e, err := p2putils.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topic, e)
|
||||
if base == nil {
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic))
|
||||
}
|
||||
return s.subscribeWithBase(s.addDigestToTopic(topic), validator, handle)
|
||||
return s.subscribeWithBase(s.addDigestToTopic(topic, digest), validator, handle)
|
||||
}
|
||||
|
||||
func (s *Service) subscribeWithBase(topic string, validator pubsub.ValidatorEx, handle subHandler) *pubsub.Subscription {
|
||||
topic += s.cfg.P2P.Encoding().ProtocolSuffix()
|
||||
log := log.WithField("topic", topic)
|
||||
|
||||
// Do not resubscribe already seen subscriptions.
|
||||
ok := s.subHandler.topicExists(topic)
|
||||
if ok {
|
||||
log.Debugf("Provided topic already has an active subscription running: %s", topic)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.cfg.P2P.PubSub().RegisterTopicValidator(s.wrapAndReportValidation(topic, validator)); err != nil {
|
||||
log.WithError(err).Error("Could not register validator for topic")
|
||||
return nil
|
||||
@@ -109,6 +155,7 @@ func (s *Service) subscribeWithBase(topic string, validator pubsub.ValidatorEx,
|
||||
log.WithError(err).Error("Could not subscribe topic")
|
||||
return nil
|
||||
}
|
||||
s.subHandler.addTopic(sub.Topic(), sub)
|
||||
|
||||
// Pipeline decodes the incoming subscription data, runs the validation, and handles the
|
||||
// message.
|
||||
@@ -166,6 +213,7 @@ func (s *Service) subscribeWithBase(topic string, validator pubsub.ValidatorEx,
|
||||
}
|
||||
|
||||
go messageLoop()
|
||||
log.WithField("topic", topic).Info("Subscribed to topic")
|
||||
return sub
|
||||
}
|
||||
|
||||
@@ -184,26 +232,48 @@ func (s *Service) wrapAndReportValidation(topic string, v pubsub.ValidatorEx) (s
|
||||
}
|
||||
// Ignore any messages received before chainstart.
|
||||
if s.chainStarted.IsNotSet() {
|
||||
messageFailedValidationCounter.WithLabelValues(topic).Inc()
|
||||
messageIgnoredValidationCounter.WithLabelValues(topic).Inc()
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
retDigest, err := p2p.ExtractGossipDigest(topic)
|
||||
if err != nil {
|
||||
log.WithField("topic", topic).Errorf("Invalid topic format of pubsub topic: %v", err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
currDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithField("topic", topic).Errorf("Unable to retrieve fork data: %v", err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
if currDigest != retDigest {
|
||||
log.WithField("topic", topic).Warnf("Received message from outdated fork digest %#x", retDigest)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
b := v(ctx, pid, msg)
|
||||
if b == pubsub.ValidationReject {
|
||||
messageFailedValidationCounter.WithLabelValues(topic).Inc()
|
||||
}
|
||||
if b == pubsub.ValidationIgnore {
|
||||
messageIgnoredValidationCounter.WithLabelValues(topic).Inc()
|
||||
}
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe to a static subnet with the given topic and index.A given validator and subscription handler is
|
||||
// used to handle messages from the subnet. The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribeStaticWithSubnets(topic string, validator pubsub.ValidatorEx, handle subHandler) {
|
||||
base := p2p.GossipTopicMappings(topic, 0)
|
||||
func (s *Service) subscribeStaticWithSubnets(topic string, validator pubsub.ValidatorEx, handle subHandler, digest [4]byte) {
|
||||
genRoot := s.cfg.Chain.GenesisValidatorRoot()
|
||||
_, e, err := p2putils.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topic, e)
|
||||
if base == nil {
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic))
|
||||
}
|
||||
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
|
||||
s.subscribeWithBase(s.addDigestAndIndexToTopic(topic, i), validator, handle)
|
||||
s.subscribeWithBase(s.addDigestAndIndexToTopic(topic, digest, i), validator, handle)
|
||||
}
|
||||
genesis := s.cfg.Chain.GenesisTime()
|
||||
ticker := slotutil.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -218,14 +288,29 @@ func (s *Service) subscribeStaticWithSubnets(topic string, validator pubsub.Vali
|
||||
if s.chainStarted.IsSet() && s.cfg.InitialSync.Syncing() {
|
||||
continue
|
||||
}
|
||||
valid, err := isDigestValid(digest, genesis, genRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
log.Warnf("Attestation subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||
// Unsubscribes from all our current subnets.
|
||||
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
|
||||
fullTopic := fmt.Sprintf(topic, digest, i) + s.cfg.P2P.Encoding().ProtocolSuffix()
|
||||
s.unSubscribeFromTopic(fullTopic)
|
||||
}
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
// Check every slot that there are enough peers
|
||||
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
|
||||
if !s.validPeersExist(s.addDigestAndIndexToTopic(topic, i)) {
|
||||
if !s.validPeersExist(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||
log.Debugf("No peers found subscribed to attestation gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", i)
|
||||
_, err := s.cfg.P2P.FindPeersWithSubnet(
|
||||
s.ctx,
|
||||
s.addDigestAndIndexToTopic(topic, i),
|
||||
s.addDigestAndIndexToTopic(topic, digest, i),
|
||||
i,
|
||||
params.BeaconNetworkConfig().MinimumPeersInSubnet,
|
||||
)
|
||||
@@ -247,14 +332,16 @@ func (s *Service) subscribeDynamicWithSubnets(
|
||||
topicFormat string,
|
||||
validate pubsub.ValidatorEx,
|
||||
handle subHandler,
|
||||
digest [4]byte,
|
||||
) {
|
||||
base := p2p.GossipTopicMappings(topicFormat, 0)
|
||||
if base == nil {
|
||||
log.Fatalf("%s is not mapped to any message in GossipTopicMappings", topicFormat)
|
||||
}
|
||||
digest, err := s.forkDigest()
|
||||
genRoot := s.cfg.Chain.GenesisValidatorRoot()
|
||||
_, e, err := p2putils.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not compute fork digest")
|
||||
panic(err)
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topicFormat, e)
|
||||
if base == nil {
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat))
|
||||
}
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
genesis := s.cfg.Chain.GenesisTime()
|
||||
@@ -270,6 +357,18 @@ func (s *Service) subscribeDynamicWithSubnets(
|
||||
if s.chainStarted.IsSet() && s.cfg.InitialSync.Syncing() {
|
||||
continue
|
||||
}
|
||||
valid, err := isDigestValid(digest, genesis, genRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
log.Warnf("Attestation subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||
// Unsubscribes from all our current subnets.
|
||||
s.reValidateSubscriptions(subscriptions, []uint64{}, topicFormat, digest)
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
wantedSubs := s.retrievePersistentSubs(currentSlot)
|
||||
// Resize as appropriate.
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubs, topicFormat, digest)
|
||||
@@ -302,9 +401,7 @@ func (s *Service) reValidateSubscriptions(subscriptions map[uint64]*pubsub.Subsc
|
||||
if !wanted && v != nil {
|
||||
v.Cancel()
|
||||
fullTopic := fmt.Sprintf(topicFormat, digest, k) + s.cfg.P2P.Encoding().ProtocolSuffix()
|
||||
if err := s.cfg.P2P.PubSub().UnregisterTopicValidator(fullTopic); err != nil {
|
||||
log.WithError(err).Error("Could not unregister topic validator")
|
||||
}
|
||||
s.unSubscribeFromTopic(fullTopic)
|
||||
delete(subscriptions, k)
|
||||
}
|
||||
}
|
||||
@@ -336,6 +433,155 @@ func (s *Service) subscribeAggregatorSubnet(
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe missing subnets for our sync committee members.
|
||||
func (s *Service) subscribeSyncSubnet(
|
||||
subscriptions map[uint64]*pubsub.Subscription,
|
||||
idx uint64,
|
||||
digest [4]byte,
|
||||
validate pubsub.ValidatorEx,
|
||||
handle subHandler,
|
||||
) {
|
||||
// do not subscribe if we have no peers in the same
|
||||
// subnet
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
subnetTopic := fmt.Sprintf(topic, digest, idx)
|
||||
// check if subscription exists and if not subscribe the relevant subnet.
|
||||
if _, exists := subscriptions[idx]; !exists {
|
||||
subscriptions[idx] = s.subscribeWithBase(subnetTopic, validate, handle)
|
||||
}
|
||||
if !s.validPeersExist(subnetTopic) {
|
||||
log.Debugf("No peers found subscribed to sync gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", idx)
|
||||
_, err := s.cfg.P2P.FindPeersWithSubnet(s.ctx, subnetTopic, idx, params.BeaconNetworkConfig().MinimumPeersInSubnet)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe to a static subnet with the given topic and index.A given validator and subscription handler is
|
||||
// used to handle messages from the subnet. The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribeStaticWithSyncSubnets(topic string, validator pubsub.ValidatorEx, handle subHandler, digest [4]byte) {
|
||||
genRoot := s.cfg.Chain.GenesisValidatorRoot()
|
||||
_, e, err := p2putils.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topic, e)
|
||||
if base == nil {
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic))
|
||||
}
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
s.subscribeWithBase(s.addDigestAndIndexToTopic(topic, digest, i), validator, handle)
|
||||
}
|
||||
genesis := s.cfg.Chain.GenesisTime()
|
||||
ticker := slotutil.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
case <-ticker.C():
|
||||
if s.chainStarted.IsSet() && s.cfg.InitialSync.Syncing() {
|
||||
continue
|
||||
}
|
||||
valid, err := isDigestValid(digest, genesis, genRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
log.Warnf("Sync subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||
// Unsubscribes from all our current subnets.
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
fullTopic := fmt.Sprintf(topic, digest, i) + s.cfg.P2P.Encoding().ProtocolSuffix()
|
||||
s.unSubscribeFromTopic(fullTopic)
|
||||
}
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
// Check every slot that there are enough peers
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
if !s.validPeersExist(s.addDigestAndIndexToTopic(topic, digest, i)) {
|
||||
log.Debugf("No peers found subscribed to sync gossip subnet with "+
|
||||
"committee index %d. Searching network for peers subscribed to the subnet.", i)
|
||||
_, err := s.cfg.P2P.FindPeersWithSubnet(
|
||||
s.ctx,
|
||||
s.addDigestAndIndexToTopic(topic, digest, i),
|
||||
i,
|
||||
params.BeaconNetworkConfig().MinimumPeersInSubnet,
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not search for peers")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// subscribe to a dynamically changing list of subnets. This method expects a fmt compatible
|
||||
// string for the topic name and the list of subnets for subscribed topics that should be
|
||||
// maintained.
|
||||
func (s *Service) subscribeDynamicWithSyncSubnets(
|
||||
topicFormat string,
|
||||
validate pubsub.ValidatorEx,
|
||||
handle subHandler,
|
||||
digest [4]byte,
|
||||
) {
|
||||
genRoot := s.cfg.Chain.GenesisValidatorRoot()
|
||||
_, e, err := p2putils.RetrieveForkDataFromDigest(digest, genRoot[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topicFormat, e)
|
||||
if base == nil {
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topicFormat))
|
||||
}
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
genesis := s.cfg.Chain.GenesisTime()
|
||||
ticker := slotutil.NewSlotTicker(genesis, params.BeaconConfig().SecondsPerSlot)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
case currentSlot := <-ticker.C():
|
||||
if s.chainStarted.IsSet() && s.cfg.InitialSync.Syncing() {
|
||||
continue
|
||||
}
|
||||
valid, err := isDigestValid(digest, genesis, genRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
log.Warnf("Sync subnets with digest %#x are no longer valid, unsubscribing from all of them.", digest)
|
||||
// Unsubscribes from all our current subnets.
|
||||
s.reValidateSubscriptions(subscriptions, []uint64{}, topicFormat, digest)
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
|
||||
wantedSubs := s.retrieveActiveSyncSubnets(helpers.SlotToEpoch(currentSlot))
|
||||
// Resize as appropriate.
|
||||
s.reValidateSubscriptions(subscriptions, wantedSubs, topicFormat, digest)
|
||||
|
||||
// subscribe desired aggregator subnets.
|
||||
for _, idx := range wantedSubs {
|
||||
s.subscribeSyncSubnet(subscriptions, idx, digest, validate, handle)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// lookup peers for attester specific subnets.
|
||||
func (s *Service) lookupAttesterSubnets(digest [4]byte, idx uint64) {
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.Attestation{})]
|
||||
@@ -351,6 +597,21 @@ func (s *Service) lookupAttesterSubnets(digest [4]byte, idx uint64) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) unSubscribeFromTopic(topic string) {
|
||||
log.WithField("topic", topic).Debug("Unsubscribing from topic")
|
||||
if err := s.cfg.P2P.PubSub().UnregisterTopicValidator(topic); err != nil {
|
||||
log.WithError(err).Error("Could not unregister topic validator")
|
||||
}
|
||||
sub := s.subHandler.subForTopic(topic)
|
||||
if sub != nil {
|
||||
sub.Cancel()
|
||||
}
|
||||
s.subHandler.removeTopic(topic)
|
||||
if err := s.cfg.P2P.LeaveTopic(topic); err != nil {
|
||||
log.WithError(err).Error("Unable to leave topic")
|
||||
}
|
||||
}
|
||||
|
||||
// find if we have peers who are subscribed to the same subnet
|
||||
func (s *Service) validPeersExist(subnetTopic string) bool {
|
||||
numOfPeers := s.cfg.P2P.PubSub().ListPeers(subnetTopic + s.cfg.P2P.Encoding().ProtocolSuffix())
|
||||
@@ -367,6 +628,11 @@ func (s *Service) retrievePersistentSubs(currSlot types.Slot) []uint64 {
|
||||
return sliceutil.SetUint64(append(persistentSubs, wantedSubs...))
|
||||
}
|
||||
|
||||
func (s *Service) retrieveActiveSyncSubnets(currEpoch types.Epoch) []uint64 {
|
||||
subs := cache.SyncSubnetIDs.GetAllSubnets(currEpoch)
|
||||
return sliceutil.SetUint64(subs)
|
||||
}
|
||||
|
||||
// filters out required peers for the node to function, not
|
||||
// pruning peers who are in our attestation subnets.
|
||||
func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID {
|
||||
@@ -374,7 +640,7 @@ func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID {
|
||||
if len(pids) == 0 {
|
||||
return pids
|
||||
}
|
||||
digest, err := s.forkDigest()
|
||||
digest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute fork digest")
|
||||
return pids
|
||||
@@ -417,30 +683,41 @@ func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID {
|
||||
}
|
||||
|
||||
// Add fork digest to topic.
|
||||
func (s *Service) addDigestToTopic(topic string) string {
|
||||
func (s *Service) addDigestToTopic(topic string, digest [4]byte) string {
|
||||
if !strings.Contains(topic, "%x") {
|
||||
log.Fatal("Topic does not have appropriate formatter for digest")
|
||||
}
|
||||
digest, err := s.forkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not compute fork digest")
|
||||
}
|
||||
return fmt.Sprintf(topic, digest)
|
||||
}
|
||||
|
||||
// Add the digest and index to subnet topic.
|
||||
func (s *Service) addDigestAndIndexToTopic(topic string, idx uint64) string {
|
||||
func (s *Service) addDigestAndIndexToTopic(topic string, digest [4]byte, idx uint64) string {
|
||||
if !strings.Contains(topic, "%x") {
|
||||
log.Fatal("Topic does not have appropriate formatter for digest")
|
||||
}
|
||||
digest, err := s.forkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Could not compute fork digest")
|
||||
}
|
||||
return fmt.Sprintf(topic, digest, idx)
|
||||
}
|
||||
|
||||
func (s *Service) forkDigest() ([4]byte, error) {
|
||||
func (s *Service) currentForkDigest() ([4]byte, error) {
|
||||
genRoot := s.cfg.Chain.GenesisValidatorRoot()
|
||||
return p2putils.CreateForkDigest(s.cfg.Chain.GenesisTime(), genRoot[:])
|
||||
}
|
||||
|
||||
// Checks if the provided digest matches up with the current supposed digest.
|
||||
func isDigestValid(digest [4]byte, genesis time.Time, genValRoot [32]byte) (bool, error) {
|
||||
retDigest, err := p2putils.CreateForkDigest(genesis, genValRoot[:])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
isNextEpoch, err := p2putils.IsForkNextEpoch(genesis, genValRoot[:])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// In the event there is a fork the next epoch,
|
||||
// we skip the check, as we subscribe subnets an
|
||||
// epoch in advance.
|
||||
if isNextEpoch {
|
||||
return true, nil
|
||||
}
|
||||
return retDigest == digest, nil
|
||||
}
|
||||
|
||||
@@ -2,22 +2,23 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state/interop"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
wrapperv2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
rBlock, ok := msg.(*ethpb.SignedBeaconBlock)
|
||||
if !ok {
|
||||
return errors.New("message is not type *ethpb.SignedBeaconBlock")
|
||||
signed, err := blockFromProto(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signed := wrapper.WrappedPhase0SignedBeaconBlock(rBlock)
|
||||
|
||||
if signed.IsNil() || signed.Block().IsNil() {
|
||||
return errors.New("nil block")
|
||||
@@ -65,3 +66,14 @@ func (s *Service) deleteAttsInPool(atts []*ethpb.Attestation) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func blockFromProto(msg proto.Message) (block.SignedBeaconBlock, error) {
|
||||
switch t := msg.(type) {
|
||||
case *ethpb.SignedBeaconBlock:
|
||||
return wrapper.WrappedPhase0SignedBeaconBlock(t), nil
|
||||
case *ethpb.SignedBeaconBlockAltair:
|
||||
return wrapperv2.WrappedAltairSignedBeaconBlock(t)
|
||||
default:
|
||||
return nil, errors.Errorf("message has invalid underlying type: %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
@@ -10,6 +11,8 @@ import (
|
||||
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
@@ -108,3 +111,53 @@ func TestService_beaconBlockSubscriber(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockFromProto(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
msgCreator func(t *testing.T) proto.Message
|
||||
want block.SignedBeaconBlock
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "invalid type provided",
|
||||
msgCreator: func(t *testing.T) proto.Message {
|
||||
return ðpb.SignedAggregateAttestationAndProof{}
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "phase 0 type provided",
|
||||
msgCreator: func(t *testing.T) proto.Message {
|
||||
return ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 100}}
|
||||
},
|
||||
want: wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 100}}),
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "altair type provided",
|
||||
msgCreator: func(t *testing.T) proto.Message {
|
||||
return ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{Slot: 100}}
|
||||
},
|
||||
want: func() block.SignedBeaconBlock {
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{Slot: 100}})
|
||||
require.NoError(t, err)
|
||||
return wsb
|
||||
}(),
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := blockFromProto(tt.msgCreator(t))
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("blockFromProto() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("blockFromProto() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
24
beacon-chain/sync/subscriber_sync_committee_message.go
Normal file
24
beacon-chain/sync/subscriber_sync_committee_message.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// skipcq: SCC-U1000
|
||||
func (s *Service) syncCommitteeMessageSubscriber(_ context.Context, msg proto.Message) error {
|
||||
m, ok := msg.(*ethpb.SyncCommitteeMessage)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type *eth.SyncCommitteeMessage, type=%T", msg)
|
||||
}
|
||||
|
||||
if m == nil {
|
||||
return errors.New("nil sync committee message")
|
||||
}
|
||||
|
||||
return s.cfg.SyncCommsPool.SaveSyncCommitteeMessage(m)
|
||||
}
|
||||
26
beacon-chain/sync/subscriber_sync_contribution_proof.go
Normal file
26
beacon-chain/sync/subscriber_sync_contribution_proof.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// syncContributionAndProofSubscriber forwards the incoming validated sync contributions and proof to the
|
||||
// contribution pool for processing.
|
||||
// skipcq: SCC-U1000
|
||||
func (s *Service) syncContributionAndProofSubscriber(_ context.Context, msg proto.Message) error {
|
||||
a, ok := msg.(*ethpb.SignedContributionAndProof)
|
||||
if !ok {
|
||||
return fmt.Errorf("message was not type *eth.SignedAggregateAttestationAndProof, type=%T", msg)
|
||||
}
|
||||
|
||||
if a.Message == nil || a.Message.Contribution == nil {
|
||||
return errors.New("nil contribution")
|
||||
}
|
||||
|
||||
return s.cfg.SyncCommsPool.SaveSyncCommitteeContribution(a.Message.Contribution)
|
||||
}
|
||||
@@ -18,11 +18,13 @@ import (
|
||||
db "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/abool"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2putils"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -43,10 +45,11 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
},
|
||||
subHandler: newSubTopicHandler(),
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
var err error
|
||||
p2pService.Digest, err = r.forkDigest()
|
||||
p2pService.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic := "/eth2/%x/voluntary_exit"
|
||||
var wg sync.WaitGroup
|
||||
@@ -60,7 +63,7 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
|
||||
}
|
||||
wg.Done()
|
||||
return nil
|
||||
})
|
||||
}, p2pService.Digest)
|
||||
r.markForChainStart()
|
||||
|
||||
p2pService.ReceivePubSub(topic, &pb.SignedVoluntaryExit{Exit: &pb.VoluntaryExit{Epoch: 55}, Signature: make([]byte, 96)})
|
||||
@@ -89,17 +92,21 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
|
||||
},
|
||||
seenAttesterSlashingCache: make(map[uint64]bool),
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
topic := "/eth2/%x/attester_slashing"
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
var err error
|
||||
p2pService.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
r.subscribe(topic, r.noopValidator, func(ctx context.Context, msg proto.Message) error {
|
||||
require.NoError(t, r.attesterSlashingSubscriber(ctx, msg))
|
||||
wg.Done()
|
||||
return nil
|
||||
})
|
||||
}, p2pService.Digest)
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
|
||||
chainService.State = beaconState
|
||||
r.markForChainStart()
|
||||
@@ -111,8 +118,6 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
|
||||
require.NoError(t, err, "Error generating attester slashing")
|
||||
err = r.cfg.DB.SaveState(ctx, beaconState, bytesutil.ToBytes32(attesterSlashing.Attestation_1.Data.BeaconBlockRoot))
|
||||
require.NoError(t, err)
|
||||
p2pService.Digest, err = r.forkDigest()
|
||||
require.NoError(t, err)
|
||||
p2pService.ReceivePubSub(topic, attesterSlashing)
|
||||
|
||||
if testutil.WaitTimeout(&wg, time.Second) {
|
||||
@@ -143,17 +148,20 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
},
|
||||
seenProposerSlashingCache: c,
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
topic := "/eth2/%x/proposer_slashing"
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
p2pService.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
r.subscribe(topic, r.noopValidator, func(ctx context.Context, msg proto.Message) error {
|
||||
require.NoError(t, r.proposerSlashingSubscriber(ctx, msg))
|
||||
wg.Done()
|
||||
return nil
|
||||
})
|
||||
}, p2pService.Digest)
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, 64)
|
||||
chainService.State = beaconState
|
||||
r.markForChainStart()
|
||||
@@ -163,8 +171,7 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
1, /* validator index */
|
||||
)
|
||||
require.NoError(t, err, "Error generating proposer slashing")
|
||||
p2pService.Digest, err = r.forkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
p2pService.ReceivePubSub(topic, proposerSlashing)
|
||||
|
||||
if testutil.WaitTimeout(&wg, time.Second) {
|
||||
@@ -185,10 +192,11 @@ func TestSubscribe_HandlesPanic(t *testing.T) {
|
||||
},
|
||||
P2P: p,
|
||||
},
|
||||
subHandler: newSubTopicHandler(),
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
var err error
|
||||
p.Digest, err = r.forkDigest()
|
||||
p.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.SignedVoluntaryExit{})]
|
||||
@@ -198,7 +206,7 @@ func TestSubscribe_HandlesPanic(t *testing.T) {
|
||||
r.subscribe(topic, r.noopValidator, func(_ context.Context, msg proto.Message) error {
|
||||
defer wg.Done()
|
||||
panic("bad")
|
||||
})
|
||||
}, p.Digest)
|
||||
r.markForChainStart()
|
||||
p.ReceivePubSub(topic, &pb.SignedVoluntaryExit{Exit: &pb.VoluntaryExit{Epoch: 55}, Signature: make([]byte, 96)})
|
||||
|
||||
@@ -220,8 +228,9 @@ func TestRevalidateSubscription_CorrectlyFormatsTopic(t *testing.T) {
|
||||
P2P: p,
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
digest, err := r.forkDigest()
|
||||
digest, err := r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
subscriptions := make(map[uint64]*pubsub.Subscription, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
|
||||
@@ -256,12 +265,15 @@ func TestStaticSubnets(t *testing.T) {
|
||||
P2P: p,
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
defaultTopic := "/eth2/%x/beacon_attestation_%d"
|
||||
d, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
r.subscribeStaticWithSubnets(defaultTopic, r.noopValidator, func(_ context.Context, msg proto.Message) error {
|
||||
// no-op
|
||||
return nil
|
||||
})
|
||||
}, d)
|
||||
topics := r.cfg.P2P.PubSub().GetTopics()
|
||||
if uint64(len(topics)) != params.BeaconNetworkConfig().AttestationSubnetCount {
|
||||
t.Errorf("Wanted the number of subnet topics registered to be %d but got %d", params.BeaconNetworkConfig().AttestationSubnetCount, len(topics))
|
||||
@@ -270,6 +282,13 @@ func TestStaticSubnets(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_wrapAndReportValidation(t *testing.T) {
|
||||
mChain := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{0x01},
|
||||
}
|
||||
fd, err := p2putils.CreateForkDigest(mChain.GenesisTime(), mChain.ValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
mockTopic := fmt.Sprintf(p2p.BlockSubnetTopicFormat, fd) + encoder.SszNetworkEncoder{}.ProtocolSuffix()
|
||||
type args struct {
|
||||
topic string
|
||||
v pubsub.ValidatorEx
|
||||
@@ -323,7 +342,7 @@ func Test_wrapAndReportValidation(t *testing.T) {
|
||||
{
|
||||
name: "validator OK",
|
||||
args: args{
|
||||
topic: "foo",
|
||||
topic: mockTopic,
|
||||
v: func(ctx context.Context, id peer.ID, message *pubsub.Message) pubsub.ValidationResult {
|
||||
return pubsub.ValidationAccept
|
||||
},
|
||||
@@ -331,7 +350,7 @@ func Test_wrapAndReportValidation(t *testing.T) {
|
||||
msg: &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Topic: func() *string {
|
||||
s := "foo"
|
||||
s := mockTopic
|
||||
return &s
|
||||
}(),
|
||||
},
|
||||
@@ -361,6 +380,9 @@ func Test_wrapAndReportValidation(t *testing.T) {
|
||||
chainStarted.SetTo(tt.args.chainstarted)
|
||||
s := &Service{
|
||||
chainStarted: chainStarted,
|
||||
cfg: &Config{
|
||||
Chain: mChain,
|
||||
},
|
||||
}
|
||||
_, v := s.wrapAndReportValidation(tt.args.topic, tt.args.v)
|
||||
got := v(context.Background(), tt.args.pid, tt.args.msg)
|
||||
@@ -389,12 +411,13 @@ func TestFilterSubnetPeers(t *testing.T) {
|
||||
}
|
||||
// Empty cache at the end of the test.
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
defaultTopic := "/eth2/%x/beacon_attestation_%d" + r.cfg.P2P.Encoding().ProtocolSuffix()
|
||||
subnet10 := r.addDigestAndIndexToTopic(defaultTopic, 10)
|
||||
subnet10 := r.addDigestAndIndexToTopic(defaultTopic, digest, 10)
|
||||
cache.SubnetIDs.AddAggregatorSubnetID(currSlot, 10)
|
||||
|
||||
subnet20 := r.addDigestAndIndexToTopic(defaultTopic, 20)
|
||||
subnet20 := r.addDigestAndIndexToTopic(defaultTopic, digest, 20)
|
||||
cache.SubnetIDs.AddAttesterSubnetID(currSlot, 20)
|
||||
|
||||
p1 := createPeer(t, subnet10)
|
||||
|
||||
90
beacon-chain/sync/subscription_topic_handler.go
Normal file
90
beacon-chain/sync/subscription_topic_handler.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
)
|
||||
|
||||
// This is a subscription topic handler that is used to handle basic
|
||||
// CRUD operations on the topic map. All operations are thread safe
|
||||
// so they can be called from multiple routines.
|
||||
type subTopicHandler struct {
|
||||
sync.RWMutex
|
||||
subTopics map[string]*pubsub.Subscription
|
||||
digestMap map[[4]byte]int
|
||||
}
|
||||
|
||||
func newSubTopicHandler() *subTopicHandler {
|
||||
return &subTopicHandler{
|
||||
subTopics: map[string]*pubsub.Subscription{},
|
||||
digestMap: map[[4]byte]int{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *subTopicHandler) addTopic(topic string, sub *pubsub.Subscription) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.subTopics[topic] = sub
|
||||
digest, err := p2p.ExtractGossipDigest(topic)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve digest")
|
||||
return
|
||||
}
|
||||
s.digestMap[digest] += 1
|
||||
}
|
||||
|
||||
func (s *subTopicHandler) topicExists(topic string) bool {
|
||||
s.RLock()
|
||||
_, ok := s.subTopics[topic]
|
||||
s.RUnlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
func (s *subTopicHandler) removeTopic(topic string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
delete(s.subTopics, topic)
|
||||
digest, err := p2p.ExtractGossipDigest(topic)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve digest")
|
||||
return
|
||||
}
|
||||
currAmt, ok := s.digestMap[digest]
|
||||
// Should never be possible, is a
|
||||
// defensive check.
|
||||
if !ok || currAmt <= 0 {
|
||||
delete(s.digestMap, digest)
|
||||
return
|
||||
}
|
||||
s.digestMap[digest] -= 1
|
||||
if s.digestMap[digest] == 0 {
|
||||
delete(s.digestMap, digest)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *subTopicHandler) digestExists(digest [4]byte) bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
count, ok := s.digestMap[digest]
|
||||
return ok && count > 0
|
||||
}
|
||||
|
||||
func (s *subTopicHandler) allTopics() []string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
topics := []string{}
|
||||
for t := range s.subTopics {
|
||||
copiedTopic := t
|
||||
topics = append(topics, copiedTopic)
|
||||
}
|
||||
return topics
|
||||
}
|
||||
|
||||
func (s *subTopicHandler) subForTopic(topic string) *pubsub.Subscription {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.subTopics[topic]
|
||||
}
|
||||
96
beacon-chain/sync/subscription_topic_handler_test.go
Normal file
96
beacon-chain/sync/subscription_topic_handler_test.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2putils"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
)
|
||||
|
||||
func TestSubTopicHandler_CRUD(t *testing.T) {
|
||||
h := newSubTopicHandler()
|
||||
// Non-existent topic
|
||||
assert.Equal(t, false, h.topicExists("junk"))
|
||||
assert.Equal(t, false, h.digestExists([4]byte{}))
|
||||
|
||||
digest, err := p2putils.CreateForkDigest(time.Now(), make([]byte, 32))
|
||||
assert.NoError(t, err)
|
||||
enc := encoder.SszNetworkEncoder{}
|
||||
|
||||
// Valid topic added in.
|
||||
topic := fmt.Sprintf(p2p.BlockSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
assert.Equal(t, true, h.digestExists(digest))
|
||||
assert.Equal(t, 1, len(h.allTopics()))
|
||||
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
assert.Equal(t, false, h.digestExists(digest))
|
||||
assert.Equal(t, 0, len(h.allTopics()))
|
||||
|
||||
h = newSubTopicHandler()
|
||||
// Multiple Topics added in.
|
||||
topic = fmt.Sprintf(p2p.BlockSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.ExitSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.ProposerSlashingSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.AttesterSlashingSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.AggregateAndProofSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.SyncContributionAndProofSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.addTopic(topic, new(pubsub.Subscription))
|
||||
assert.Equal(t, true, h.topicExists(topic))
|
||||
|
||||
assert.Equal(t, 6, len(h.allTopics()))
|
||||
|
||||
// Remove multiple topics
|
||||
topic = fmt.Sprintf(p2p.AttesterSlashingSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.ExitSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.ProposerSlashingSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
assert.Equal(t, true, h.digestExists(digest))
|
||||
assert.Equal(t, 3, len(h.allTopics()))
|
||||
|
||||
// Remove remaining topics.
|
||||
topic = fmt.Sprintf(p2p.BlockSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.AggregateAndProofSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
topic = fmt.Sprintf(p2p.SyncContributionAndProofSubnetTopicFormat, digest) + enc.ProtocolSuffix()
|
||||
h.removeTopic(topic)
|
||||
assert.Equal(t, false, h.topicExists(topic))
|
||||
|
||||
assert.Equal(t, false, h.digestExists(digest))
|
||||
assert.Equal(t, 0, len(h.allTopics()))
|
||||
}
|
||||
@@ -382,6 +382,9 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(signedAggregateAndProof)]
|
||||
d, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, d)
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -476,6 +479,9 @@ func TestVerifyIndexInCommittee_SeenAggregatorEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(signedAggregateAndProof)]
|
||||
d, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, d)
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
|
||||
@@ -82,10 +82,11 @@ func TestValidateAttesterSlashing_ValidSlashing(t *testing.T) {
|
||||
r := &Service{
|
||||
cfg: &Config{
|
||||
P2P: p,
|
||||
Chain: &mock.ChainService{State: s},
|
||||
Chain: &mock.ChainService{State: s, Genesis: time.Now()},
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
seenAttesterSlashingCache: make(map[uint64]bool),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
@@ -93,6 +94,9 @@ func TestValidateAttesterSlashing_ValidSlashing(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(slashing)]
|
||||
d, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, d)
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -113,16 +117,21 @@ func TestValidateAttesterSlashing_CanFilter(t *testing.T) {
|
||||
cfg: &Config{
|
||||
P2P: p,
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: &mock.ChainService{Genesis: time.Now()},
|
||||
},
|
||||
seenAttesterSlashingCache: make(map[uint64]bool),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
|
||||
r.setAttesterSlashingIndicesSeen([]uint64{1, 2, 3, 4}, []uint64{3, 4, 5, 6})
|
||||
|
||||
// The below attestations should be filtered hence bad signature is ok.
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.AttesterSlashing{})]
|
||||
d, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, d)
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, ðpb.AttesterSlashing{
|
||||
_, err = p.Encoding().EncodeGossip(buf, ðpb.AttesterSlashing{
|
||||
Attestation_1: testutil.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{3},
|
||||
}),
|
||||
|
||||
@@ -43,19 +43,12 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
// Override topic for decoding.
|
||||
originalTopic := msg.Topic
|
||||
format := p2p.GossipTypeMapping[reflect.TypeOf(ð.Attestation{})]
|
||||
msg.Topic = &format
|
||||
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not decode message")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
// Restore topic.
|
||||
msg.Topic = originalTopic
|
||||
|
||||
att, ok := m.(*eth.Attestation)
|
||||
if !ok {
|
||||
@@ -98,7 +91,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
// Verify the block being voted and the processed state is in DB and. The block should have passed validation if it's in the DB.
|
||||
// Verify the block being voted and the processed state is in DB and the block has passed validation if it's in the DB.
|
||||
blockRoot := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
|
||||
if !s.hasBlockAndState(ctx, blockRoot) {
|
||||
// A node doesn't have the block, it'll request from peer while saving the pending attestation to a queue.
|
||||
@@ -122,7 +115,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
validationRes := s.validateUnaggregatedAttTopic(ctx, att, preState, *originalTopic)
|
||||
validationRes := s.validateUnaggregatedAttTopic(ctx, att, preState, *msg.Topic)
|
||||
if validationRes != pubsub.ValidationAccept {
|
||||
return validationRes
|
||||
}
|
||||
@@ -156,7 +149,7 @@ func (s *Service) validateUnaggregatedAttTopic(ctx context.Context, a *eth.Attes
|
||||
}
|
||||
subnet := helpers.ComputeSubnetForAttestation(valCount, a)
|
||||
format := p2p.GossipTypeMapping[reflect.TypeOf(ð.Attestation{})]
|
||||
digest, err := s.forkDigest()
|
||||
digest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute fork digest")
|
||||
traceutil.AnnotateError(span, err)
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
invalidRoot := [32]byte{'A', 'B', 'C', 'D'}
|
||||
s.setBadBlock(ctx, invalidRoot)
|
||||
|
||||
digest, err := s.forkDigest()
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := testutil.NewBeaconBlock()
|
||||
|
||||
@@ -14,9 +14,7 @@ import (
|
||||
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
@@ -54,12 +52,11 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
s.validateBlockLock.Lock()
|
||||
defer s.validateBlockLock.Unlock()
|
||||
|
||||
rblk, ok := m.(*ethpb.SignedBeaconBlock)
|
||||
blk, ok := m.(block.SignedBeaconBlock)
|
||||
if !ok {
|
||||
log.WithError(errors.New("msg is not ethpb.SignedBeaconBlock")).Debug("Rejected block")
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
blk := wrapper.WrappedPhase0SignedBeaconBlock(rblk)
|
||||
|
||||
if blk.IsNil() || blk.Block().IsNil() {
|
||||
log.WithError(errors.New("block.Block is nil")).Debug("Rejected block")
|
||||
@@ -144,7 +141,7 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
}
|
||||
|
||||
// Handle block when the parent is unknown.
|
||||
if !s.cfg.DB.HasBlock(ctx, bytesutil.ToBytes32(blk.Block().ParentRoot())) {
|
||||
if !s.cfg.DB.HasBlock(ctx, bytesutil.ToBytes32(blk.Block().ParentRoot())) && !s.cfg.Chain.HasInitSyncBlock(bytesutil.ToBytes32(blk.Block().ParentRoot())) {
|
||||
s.pendingQueueLock.Lock()
|
||||
if err := s.insertBlockToPendingQueue(blk.Block().Slot(), blk, blockRoot); err != nil {
|
||||
s.pendingQueueLock.Unlock()
|
||||
@@ -157,12 +154,14 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
}
|
||||
|
||||
if err := s.validateBeaconBlock(ctx, blk, blockRoot); err != nil {
|
||||
log.WithError(err).WithField("blockSlot", blk.Block().Slot()).Warn("Rejected block")
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"blockSlot": blk.Block().Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", blockRoot)}).Warn("Rejected block")
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
// Record attribute of valid block.
|
||||
span.AddAttributes(trace.Int64Attribute("slotInEpoch", int64(blk.Block().Slot()%params.BeaconConfig().SlotsPerEpoch)))
|
||||
msg.ValidatorData = rblk // Used in downstream subscriber
|
||||
msg.ValidatorData = blk.Proto() // Used in downstream subscriber
|
||||
|
||||
// Log the arrival time of the accepted block
|
||||
startTime, err := helpers.SlotToTime(genesisTime, blk.Block().Slot())
|
||||
@@ -198,7 +197,7 @@ func (s *Service) validateBeaconBlock(ctx context.Context, blk block.SignedBeaco
|
||||
return err
|
||||
}
|
||||
|
||||
if err := blocks.VerifyBlockSignature(parentState, blk.Block().ProposerIndex(), blk.Signature(), blk.Block().HashTreeRoot); err != nil {
|
||||
if err := blocks.VerifyBlockSignatureUsingCurrentFork(parentState, blk); err != nil {
|
||||
s.setBadBlock(ctx, blockRoot)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/abool"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
@@ -49,7 +48,7 @@ func TestValidateBeaconBlockPubSub_InvalidSignature(t *testing.T) {
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, &statepb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(t, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(copied)
|
||||
@@ -190,6 +189,9 @@ func TestValidateBeaconBlockPubSub_CanRecoverStateSummary(t *testing.T) {
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, digest)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -211,7 +213,7 @@ func TestValidateBeaconBlockPubSub_ValidProposerSignature(t *testing.T) {
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, &statepb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(t, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(copied)
|
||||
@@ -253,6 +255,9 @@ func TestValidateBeaconBlockPubSub_ValidProposerSignature(t *testing.T) {
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, digest)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -274,7 +279,7 @@ func TestValidateBeaconBlockPubSub_WithLookahead(t *testing.T) {
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, &statepb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
// The next block is only 1 epoch ahead so as to not induce a new seed.
|
||||
blkSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(helpers.NextEpoch(copied)))
|
||||
@@ -313,11 +318,15 @@ func TestValidateBeaconBlockPubSub_WithLookahead(t *testing.T) {
|
||||
badBlockCache: c2,
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, digest)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -339,7 +348,7 @@ func TestValidateBeaconBlockPubSub_AdvanceEpochsForState(t *testing.T) {
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, &statepb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
// The next block is at least 2 epochs ahead to induce shuffling and a new seed.
|
||||
blkSlot := params.BeaconConfig().SlotsPerEpoch * 2
|
||||
@@ -383,6 +392,9 @@ func TestValidateBeaconBlockPubSub_AdvanceEpochsForState(t *testing.T) {
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, digest)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -445,7 +457,7 @@ func TestValidateBeaconBlockPubSub_AcceptBlocksFromNearFuture(t *testing.T) {
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, &statepb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(t, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(copied)
|
||||
@@ -488,6 +500,9 @@ func TestValidateBeaconBlockPubSub_AcceptBlocksFromNearFuture(t *testing.T) {
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, digest)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -731,7 +746,7 @@ func TestValidateBeaconBlockPubSub_ParentNotFinalizedDescendant(t *testing.T) {
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, &statepb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(t, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(copied)
|
||||
@@ -774,6 +789,9 @@ func TestValidateBeaconBlockPubSub_ParentNotFinalizedDescendant(t *testing.T) {
|
||||
_, err = p.Encoding().EncodeGossip(buf, msg)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(msg)]
|
||||
digest, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, digest)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
@@ -794,7 +812,7 @@ func TestValidateBeaconBlockPubSub_InvalidParentBlock(t *testing.T) {
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, &statepb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
copied := beaconState.Copy()
|
||||
require.NoError(t, copied.SetSlot(1))
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(copied)
|
||||
@@ -883,7 +901,7 @@ func TestValidateBeaconBlockPubSub_RejectEvilBlocksFromFuture(t *testing.T) {
|
||||
bRoot, err := parentBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, &statepb.StateSummary{Root: bRoot[:]}))
|
||||
require.NoError(t, db.SaveStateSummary(ctx, ðpb.StateSummary{Root: bRoot[:]}))
|
||||
|
||||
copied := beaconState.Copy()
|
||||
// The next block is at least 2 epochs ahead to induce shuffling and a new seed.
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -45,11 +44,11 @@ func setupValidProposerSlashing(t *testing.T) (*ethpb.ProposerSlashing, state.Be
|
||||
}
|
||||
|
||||
currentSlot := types.Slot(0)
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
Slot: currentSlot,
|
||||
Balances: validatorBalances,
|
||||
Fork: &statepb.Fork{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
@@ -118,7 +117,7 @@ func TestValidateProposerSlashing_ValidSlashing(t *testing.T) {
|
||||
r := &Service{
|
||||
cfg: &Config{
|
||||
P2P: p,
|
||||
Chain: &mock.ChainService{State: s},
|
||||
Chain: &mock.ChainService{State: s, Genesis: time.Now()},
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
seenProposerSlashingCache: c,
|
||||
@@ -128,6 +127,9 @@ func TestValidateProposerSlashing_ValidSlashing(t *testing.T) {
|
||||
_, err = p.Encoding().EncodeGossip(buf, slashing)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(slashing)]
|
||||
d, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, d)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
|
||||
170
beacon-chain/sync/validate_sync_committee_message.go
Normal file
170
beacon-chain/sync/validate_sync_committee_message.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func (s *Service) validateSyncCommitteeMessage(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateSyncCommitteeMessage")
|
||||
defer span.End()
|
||||
|
||||
// Accept the sync committee message if the message came from itself.
|
||||
if pid == s.cfg.P2P.PeerID() {
|
||||
return pubsub.ValidationAccept
|
||||
}
|
||||
|
||||
// Ignore the sync committee message if the beacon node is syncing.
|
||||
if s.cfg.InitialSync.Syncing() {
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
raw, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not decode message")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
m, ok := raw.(*ethpb.SyncCommitteeMessage)
|
||||
if !ok {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
if m == nil {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
// The message's `slot` is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance)
|
||||
if err := altair.ValidateSyncMessageTime(m.Slot, s.cfg.Chain.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
// The `subnet_id` is valid for the given validator. This implies the validator is part of the broader current sync committee along with the correct subcommittee.
|
||||
// Check for validity of validator index.
|
||||
pubKey, err := s.cfg.Chain.HeadValidatorIndexToPublicKey(ctx, m.ValidatorIndex)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
committeeIndices, err := s.cfg.Chain.HeadCurrentSyncCommitteeIndices(ctx, m.ValidatorIndex, m.Slot)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
if len(committeeIndices) == 0 {
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
isValid := false
|
||||
digest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
format := p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
// Validate that the validator is in the correct committee.
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
for _, idx := range committeeIndices {
|
||||
subnet := uint64(idx) / subCommitteeSize
|
||||
if strings.HasPrefix(*msg.Topic, fmt.Sprintf(format, digest, subnet)) {
|
||||
isValid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isValid {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
// There has been no other valid sync committee signature for the declared `slot`, `validator_index` and `subcommittee_index`.
|
||||
// In the event of `validator_index` belongs to multiple subnets, as long as one subnet has not been seen, we should let it in.
|
||||
for _, idx := range committeeIndices {
|
||||
subnet := uint64(idx) / subCommitteeSize
|
||||
if s.hasSeenSyncMessageIndexSlot(m.Slot, m.ValidatorIndex, subnet) {
|
||||
isValid = false
|
||||
} else {
|
||||
isValid = true
|
||||
}
|
||||
}
|
||||
if !isValid {
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
// The signature is valid for the message `beacon_block_root` for the validator referenced by `validator_index`.
|
||||
d, err := s.cfg.Chain.HeadSyncCommitteeDomain(ctx, m.Slot)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
rawBytes := p2ptypes.SSZBytes(m.BlockRoot)
|
||||
sigRoot, err := helpers.ComputeSigningRoot(&rawBytes, d)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
blsSig, err := bls.SignatureFromBytes(m.Signature)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
pKey, err := bls.PublicKeyFromBytes(pubKey[:])
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
verified := blsSig.Verify(pKey, sigRoot[:])
|
||||
if !verified {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
for _, idx := range committeeIndices {
|
||||
subnet := uint64(idx) / subCommitteeSize
|
||||
s.setSeenSyncMessageIndexSlot(m.Slot, m.ValidatorIndex, subnet)
|
||||
}
|
||||
|
||||
msg.ValidatorData = m
|
||||
return pubsub.ValidationAccept
|
||||
}
|
||||
|
||||
// Returns true if the node has received sync committee for the validator with index and slot.
|
||||
func (s *Service) hasSeenSyncMessageIndexSlot(slot types.Slot, valIndex types.ValidatorIndex, subCommitteeIndex uint64) bool {
|
||||
s.seenSyncMessageLock.RLock()
|
||||
defer s.seenSyncMessageLock.RUnlock()
|
||||
|
||||
b := append(bytesutil.Bytes32(uint64(slot)), bytesutil.Bytes32(uint64(valIndex))...)
|
||||
b = append(b, bytesutil.Bytes32(subCommitteeIndex)...)
|
||||
_, seen := s.seenSyncMessageCache.Get(string(b))
|
||||
return seen
|
||||
}
|
||||
|
||||
// Set sync committee message validator index and slot as seen.
|
||||
func (s *Service) setSeenSyncMessageIndexSlot(slot types.Slot, valIndex types.ValidatorIndex, subCommitteeIndex uint64) {
|
||||
s.seenSyncMessageLock.Lock()
|
||||
defer s.seenSyncMessageLock.Unlock()
|
||||
|
||||
b := append(bytesutil.Bytes32(uint64(slot)), bytesutil.Bytes32(uint64(valIndex))...)
|
||||
b = append(b, bytesutil.Bytes32(subCommitteeIndex)...)
|
||||
s.seenSyncMessageCache.Add(string(b), true)
|
||||
}
|
||||
427
beacon-chain/sync/validate_sync_committee_message_test.go
Normal file
427
beacon-chain/sync/validate_sync_committee_message_test.go
Normal file
@@ -0,0 +1,427 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testingDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
)
|
||||
|
||||
func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
|
||||
db := testingDB.SetupDB(t)
|
||||
headRoot, keys := fillUpBlocksAndState(context.Background(), t, db)
|
||||
defaultTopic := p2p.SyncCommitteeSubnetTopicFormat
|
||||
fakeDigest := []byte{0xAB, 0x00, 0xCC, 0x9E}
|
||||
defaultTopic = defaultTopic + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
emptySig := [96]byte{}
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
pid peer.ID
|
||||
msg *ethpb.SyncCommitteeMessage
|
||||
topic string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
svc *Service
|
||||
setupSvc func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string)
|
||||
args args
|
||||
want pubsub.ValidationResult
|
||||
}{
|
||||
{
|
||||
name: "Is syncing",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: true},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
msg.BlockRoot = headRoot[:]
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
return s, topic
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: "junk",
|
||||
msg: ðpb.SyncCommitteeMessage{
|
||||
Slot: 1,
|
||||
ValidatorIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "Bad Topic",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
msg.BlockRoot = headRoot[:]
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
return s, topic
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: "junk",
|
||||
msg: ðpb.SyncCommitteeMessage{
|
||||
Slot: 1,
|
||||
ValidatorIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
name: "Future Slot Message",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
return s, topic
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: fmt.Sprintf(defaultTopic, fakeDigest, 0),
|
||||
msg: ðpb.SyncCommitteeMessage{
|
||||
Slot: 10,
|
||||
ValidatorIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "Already Seen Message",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
|
||||
s.setSeenSyncMessageIndexSlot(1, 1, 0)
|
||||
return s, topic
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: fmt.Sprintf(defaultTopic, fakeDigest, 0),
|
||||
msg: ðpb.SyncCommitteeMessage{
|
||||
Slot: 1,
|
||||
ValidatorIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "Non-existent block root",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
s.cfg.Chain = &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)),
|
||||
}
|
||||
incorrectRoot := [32]byte{0xBB}
|
||||
msg.BlockRoot = incorrectRoot[:]
|
||||
|
||||
return s, topic
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: fmt.Sprintf(defaultTopic, fakeDigest, 0),
|
||||
msg: ðpb.SyncCommitteeMessage{
|
||||
Slot: 1,
|
||||
ValidatorIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "Subnet is non-existent",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
msg.BlockRoot = headRoot[:]
|
||||
hState, err := db.State(context.Background(), headRoot)
|
||||
assert.NoError(t, err)
|
||||
s.cfg.Chain = &mockChain.ChainService{
|
||||
CurrentSyncCommitteeIndices: []types.CommitteeIndex{0},
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)),
|
||||
}
|
||||
numOfVals := hState.NumValidators()
|
||||
|
||||
chosenVal := numOfVals - 10
|
||||
msg.Signature = emptySig[:]
|
||||
msg.BlockRoot = headRoot[:]
|
||||
msg.ValidatorIndex = types.ValidatorIndex(chosenVal)
|
||||
msg.Slot = helpers.PrevSlot(hState.Slot())
|
||||
|
||||
// Set Bad Topic and Subnet
|
||||
digest, err := s.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
actualTopic := fmt.Sprintf(defaultTopic, digest, 5)
|
||||
|
||||
return s, actualTopic
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SyncCommitteeMessage{
|
||||
Slot: 1,
|
||||
ValidatorIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
name: "Validator is non-existent",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
msg.BlockRoot = headRoot[:]
|
||||
hState, err := db.State(context.Background(), headRoot)
|
||||
assert.NoError(t, err)
|
||||
s.cfg.Chain = &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)),
|
||||
}
|
||||
|
||||
numOfVals := hState.NumValidators()
|
||||
|
||||
chosenVal := numOfVals + 10
|
||||
msg.Signature = emptySig[:]
|
||||
msg.BlockRoot = headRoot[:]
|
||||
msg.ValidatorIndex = types.ValidatorIndex(chosenVal)
|
||||
msg.Slot = helpers.PrevSlot(hState.Slot())
|
||||
|
||||
return s, topic
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SyncCommitteeMessage{
|
||||
Slot: 1,
|
||||
ValidatorIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
name: "Invalid Sync Committee Signature",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
msg.BlockRoot = headRoot[:]
|
||||
hState, err := db.State(context.Background(), headRoot)
|
||||
assert.NoError(t, err)
|
||||
|
||||
numOfVals := hState.NumValidators()
|
||||
|
||||
chosenVal := numOfVals - 10
|
||||
msg.Signature = emptySig[:]
|
||||
msg.BlockRoot = headRoot[:]
|
||||
msg.ValidatorIndex = types.ValidatorIndex(chosenVal)
|
||||
msg.Slot = helpers.PrevSlot(hState.Slot())
|
||||
|
||||
d, err := helpers.Domain(hState.Fork(), helpers.SlotToEpoch(hState.Slot()), params.BeaconConfig().DomainSyncCommittee, hState.GenesisValidatorRoot())
|
||||
assert.NoError(t, err)
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
s.cfg.Chain = &mockChain.ChainService{
|
||||
CurrentSyncCommitteeIndices: []types.CommitteeIndex{types.CommitteeIndex(subCommitteeSize)},
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)),
|
||||
SyncCommitteeDomain: d,
|
||||
PublicKey: bytesutil.ToBytes48(keys[chosenVal].PublicKey().Marshal()),
|
||||
}
|
||||
|
||||
// Set Topic and Subnet
|
||||
digest, err := s.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
actualTopic := fmt.Sprintf(defaultTopic, digest, 1)
|
||||
|
||||
return s, actualTopic
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SyncCommitteeMessage{
|
||||
Slot: 1,
|
||||
ValidatorIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
name: "Valid Sync Committee Signature",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SyncCommitteeMessage, topic string) (*Service, string) {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
msg.BlockRoot = headRoot[:]
|
||||
hState, err := db.State(context.Background(), headRoot)
|
||||
assert.NoError(t, err)
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
|
||||
numOfVals := hState.NumValidators()
|
||||
|
||||
chosenVal := numOfVals - 10
|
||||
d, err := helpers.Domain(hState.Fork(), helpers.SlotToEpoch(hState.Slot()), params.BeaconConfig().DomainSyncCommittee, hState.GenesisValidatorRoot())
|
||||
assert.NoError(t, err)
|
||||
rawBytes := p2ptypes.SSZBytes(headRoot[:])
|
||||
sigRoot, err := helpers.ComputeSigningRoot(&rawBytes, d)
|
||||
assert.NoError(t, err)
|
||||
|
||||
s.cfg.Chain = &mockChain.ChainService{
|
||||
CurrentSyncCommitteeIndices: []types.CommitteeIndex{types.CommitteeIndex(subCommitteeSize)},
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)),
|
||||
SyncCommitteeDomain: d,
|
||||
PublicKey: bytesutil.ToBytes48(keys[chosenVal].PublicKey().Marshal()),
|
||||
}
|
||||
|
||||
msg.Signature = keys[chosenVal].Sign(sigRoot[:]).Marshal()
|
||||
msg.BlockRoot = headRoot[:]
|
||||
msg.ValidatorIndex = types.ValidatorIndex(chosenVal)
|
||||
msg.Slot = helpers.PrevSlot(hState.Slot())
|
||||
|
||||
// Set Topic and Subnet
|
||||
digest, err := s.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
actualTopic := fmt.Sprintf(defaultTopic, digest, 1)
|
||||
|
||||
return s, actualTopic
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SyncCommitteeMessage{
|
||||
Slot: 1,
|
||||
ValidatorIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationAccept,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.name == "Bad Topic" {
|
||||
t.Skip()
|
||||
}
|
||||
tt.svc, tt.args.topic = tt.setupSvc(tt.svc, tt.args.msg, tt.args.topic)
|
||||
marshalledObj, err := tt.args.msg.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
marshalledObj = snappy.Encode(nil, marshalledObj)
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsub_pb.Message{
|
||||
Data: marshalledObj,
|
||||
Topic: &tt.args.topic,
|
||||
},
|
||||
ReceivedFrom: "",
|
||||
ValidatorData: nil,
|
||||
}
|
||||
if got := tt.svc.validateSyncCommitteeMessage(tt.args.ctx, tt.args.pid, msg); got != tt.want {
|
||||
t.Errorf("validateSyncCommitteeMessage() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
206
beacon-chain/sync/validate_sync_contribution_proof.go
Normal file
206
beacon-chain/sync/validate_sync_contribution_proof.go
Normal file
@@ -0,0 +1,206 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// validateSyncContributionAndProof verifies the aggregated signature and the selection proof is valid before forwarding to the
|
||||
// network and downstream services.
|
||||
func (s *Service) validateSyncContributionAndProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult {
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateSyncContributionAndProof")
|
||||
defer span.End()
|
||||
|
||||
// Accept the sync committee contribution if the contribution came from itself.
|
||||
if pid == s.cfg.P2P.PeerID() {
|
||||
return pubsub.ValidationAccept
|
||||
}
|
||||
|
||||
// Ignore the sync committee contribution if the beacon node is syncing.
|
||||
if s.cfg.InitialSync.Syncing() {
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
raw, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not decode message")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
m, ok := raw.(*ethpb.SignedContributionAndProof)
|
||||
if !ok {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
if m == nil || m.Message == nil {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
if err := altair.ValidateNilSyncContribution(m); err != nil {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
// The contribution's `slot` is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
|
||||
if err := helpers.VerifySlotTime(uint64(s.cfg.Chain.GenesisTime().Unix()), m.Message.Contribution.Slot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
// The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index` < `SYNC_COMMITTEE_SUBNET_COUNT`.
|
||||
if m.Message.Contribution.SubcommitteeIndex >= params.BeaconConfig().SyncCommitteeSubnetCount {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
// The sync committee contribution is the first valid contribution received for the aggregator with index
|
||||
// `contribution_and_proof.aggregator_index` for the slot `contribution.slot` and subcommittee index `contribution.subcommittee_index`.
|
||||
if s.hasSeenSyncContributionIndexSlot(m.Message.Contribution.Slot, m.Message.AggregatorIndex, types.CommitteeIndex(m.Message.Contribution.SubcommitteeIndex)) {
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
|
||||
// The `contribution_and_proof.selection_proof` selects the validator as an aggregator for the slot.
|
||||
isAggregator, err := altair.IsSyncCommitteeAggregator(m.Message.SelectionProof)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
if !isAggregator {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
// The aggregator's validator index is in the declared subcommittee of the current sync committee.
|
||||
committeeIndices, err := s.cfg.Chain.HeadCurrentSyncCommitteeIndices(ctx, m.Message.AggregatorIndex, m.Message.Contribution.Slot)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
if len(committeeIndices) == 0 {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
isValid := false
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
for _, i := range committeeIndices {
|
||||
if uint64(i)/subCommitteeSize == m.Message.Contribution.SubcommitteeIndex {
|
||||
isValid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isValid {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
// The `contribution_and_proof.selection_proof` is a valid signature of the `SyncAggregatorSelectionData`.
|
||||
if err := s.verifySyncSelectionData(ctx, m.Message); err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
// The aggregator signature, `signed_contribution_and_proof.signature`, is valid.
|
||||
d, err := s.cfg.Chain.HeadSyncContributionProofDomain(ctx, m.Message.Contribution.Slot)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
pubkey, err := s.cfg.Chain.HeadValidatorIndexToPublicKey(ctx, m.Message.AggregatorIndex)
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
if err := helpers.VerifySigningRoot(m.Message, pubkey[:], m.Signature, d); err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
// The aggregate signature is valid for the message `beacon_block_root` and aggregate pubkey
|
||||
// derived from the participation info in `aggregation_bits` for the subcommittee specified by the `contribution.subcommittee_index`.
|
||||
activePubkeys := []bls.PublicKey{}
|
||||
syncPubkeys, err := s.cfg.Chain.HeadSyncCommitteePubKeys(ctx, m.Message.Contribution.Slot, types.CommitteeIndex(m.Message.Contribution.SubcommitteeIndex))
|
||||
if err != nil {
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
bVector := m.Message.Contribution.AggregationBits
|
||||
// In the event no bit is set for the
|
||||
// sync contribution, we reject the message.
|
||||
if bVector.Count() == 0 {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
for i, pk := range syncPubkeys {
|
||||
if bVector.BitAt(uint64(i)) {
|
||||
pubK, err := bls.PublicKeyFromBytes(pk)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
activePubkeys = append(activePubkeys, pubK)
|
||||
}
|
||||
}
|
||||
sig, err := bls.SignatureFromBytes(m.Message.Contribution.Signature)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
d, err = s.cfg.Chain.HeadSyncCommitteeDomain(ctx, m.Message.Contribution.Slot)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
rawBytes := p2ptypes.SSZBytes(m.Message.Contribution.BlockRoot)
|
||||
sigRoot, err := helpers.ComputeSigningRoot(&rawBytes, d)
|
||||
if err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
verified := sig.Eth2FastAggregateVerify(activePubkeys, sigRoot)
|
||||
if !verified {
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
|
||||
s.setSyncContributionIndexSlotSeen(m.Message.Contribution.Slot, m.Message.AggregatorIndex, types.CommitteeIndex(m.Message.Contribution.SubcommitteeIndex))
|
||||
|
||||
msg.ValidatorData = m
|
||||
|
||||
return pubsub.ValidationAccept
|
||||
}
|
||||
|
||||
// Returns true if the node has received sync contribution for the aggregator with index, slot and subcommittee index.
|
||||
func (s *Service) hasSeenSyncContributionIndexSlot(slot types.Slot, aggregatorIndex types.ValidatorIndex, subComIdx types.CommitteeIndex) bool {
|
||||
s.seenSyncContributionLock.RLock()
|
||||
defer s.seenSyncContributionLock.RUnlock()
|
||||
|
||||
b := append(bytesutil.Bytes32(uint64(aggregatorIndex)), bytesutil.Bytes32(uint64(slot))...)
|
||||
b = append(b, bytesutil.Bytes32(uint64(subComIdx))...)
|
||||
_, seen := s.seenSyncContributionCache.Get(string(b))
|
||||
return seen
|
||||
}
|
||||
|
||||
// Set sync contributor's aggregate index, slot and subcommittee index as seen.
|
||||
func (s *Service) setSyncContributionIndexSlotSeen(slot types.Slot, aggregatorIndex types.ValidatorIndex, subComIdx types.CommitteeIndex) {
|
||||
s.seenSyncContributionLock.Lock()
|
||||
defer s.seenSyncContributionLock.Unlock()
|
||||
b := append(bytesutil.Bytes32(uint64(aggregatorIndex)), bytesutil.Bytes32(uint64(slot))...)
|
||||
b = append(b, bytesutil.Bytes32(uint64(subComIdx))...)
|
||||
s.seenSyncContributionCache.Add(string(b), true)
|
||||
}
|
||||
|
||||
// verifySyncSelectionData verifies that the provided sync contribution has a valid
|
||||
// selection proof.
|
||||
func (s *Service) verifySyncSelectionData(ctx context.Context, m *ethpb.ContributionAndProof) error {
|
||||
selectionData := ðpb.SyncAggregatorSelectionData{Slot: m.Contribution.Slot, SubcommitteeIndex: uint64(m.Contribution.SubcommitteeIndex)}
|
||||
domain, err := s.cfg.Chain.HeadSyncSelectionProofDomain(ctx, m.Contribution.Slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pubkey, err := s.cfg.Chain.HeadValidatorIndexToPublicKey(ctx, m.AggregatorIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return helpers.VerifySigningRoot(selectionData, pubkey[:], m.SelectionProof, domain)
|
||||
}
|
||||
870
beacon-chain/sync/validate_sync_contribution_proof_test.go
Normal file
870
beacon-chain/sync/validate_sync_contribution_proof_test.go
Normal file
@@ -0,0 +1,870 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
testingDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestService_ValidateSyncContributionAndProof(t *testing.T) {
|
||||
db := testingDB.SetupDB(t)
|
||||
headRoot, keys := fillUpBlocksAndState(context.Background(), t, db)
|
||||
defaultTopic := p2p.SyncContributionAndProofSubnetTopicFormat
|
||||
defaultTopic = fmt.Sprintf(defaultTopic, []byte{0xAB, 0x00, 0xCC, 0x9E})
|
||||
defaultTopic = defaultTopic + "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
emptySig := [96]byte{}
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
pid peer.ID
|
||||
msg *ethpb.SignedContributionAndProof
|
||||
topic string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
svc *Service
|
||||
setupSvc func(s *Service, msg *ethpb.SignedContributionAndProof) *Service
|
||||
args args
|
||||
want pubsub.ValidationResult
|
||||
}{
|
||||
{
|
||||
name: "Is syncing",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: true},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
return s
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: "junk",
|
||||
msg: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
AggregationBits: bitfield.NewBitvector128(),
|
||||
Signature: emptySig[:],
|
||||
},
|
||||
SelectionProof: emptySig[:],
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "Bad Topic",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
return s
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: "junk",
|
||||
msg: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
AggregationBits: bitfield.NewBitvector128(),
|
||||
Signature: emptySig[:],
|
||||
},
|
||||
SelectionProof: emptySig[:],
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
name: "Future Slot Message",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
return s
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 30,
|
||||
SubcommitteeIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
AggregationBits: bitfield.NewBitvector128(),
|
||||
Signature: emptySig[:],
|
||||
},
|
||||
SelectionProof: emptySig[:],
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "Already Seen Message",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
|
||||
s.setSyncContributionIndexSlotSeen(1, 1, 1)
|
||||
return s
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
AggregationBits: bitfield.NewBitvector128(),
|
||||
Signature: emptySig[:],
|
||||
},
|
||||
SelectionProof: emptySig[:],
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "Invalid Selection Proof",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
s.cfg.Chain = &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)),
|
||||
}
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
incorrectProof := [96]byte{0xBB}
|
||||
msg.Message.SelectionProof = incorrectProof[:]
|
||||
|
||||
return s
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
AggregationBits: bitfield.NewBitvector128(),
|
||||
Signature: emptySig[:],
|
||||
},
|
||||
SelectionProof: emptySig[:],
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
name: "Invalid Aggregator",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
assert.NoError(t, s.initCaches())
|
||||
s.cfg.Chain = &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(10)),
|
||||
}
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
hState, err := db.State(context.Background(), headRoot)
|
||||
assert.NoError(t, err)
|
||||
sc, err := hState.CurrentSyncCommittee()
|
||||
assert.NoError(t, err)
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
coms, err := altair.SyncSubCommitteePubkeys(sc, types.CommitteeIndex(i))
|
||||
assert.NoError(t, err)
|
||||
for _, p := range coms {
|
||||
idx, ok := hState.ValidatorIndexByPubkey(bytesutil.ToBytes48(p))
|
||||
assert.Equal(t, true, ok)
|
||||
rt, err := syncSelectionProofSigningRoot(hState, helpers.PrevSlot(hState.Slot()), types.CommitteeIndex(i))
|
||||
assert.NoError(t, err)
|
||||
sig := keys[idx].Sign(rt[:])
|
||||
isAggregator, err := altair.IsSyncCommitteeAggregator(sig.Marshal())
|
||||
require.NoError(t, err)
|
||||
if !isAggregator {
|
||||
msg.Message.AggregatorIndex = idx
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return s
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
AggregationBits: bitfield.NewBitvector128(),
|
||||
Signature: emptySig[:],
|
||||
},
|
||||
SelectionProof: emptySig[:],
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
name: "Failed Selection Proof Verification ",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
hState, err := db.State(context.Background(), headRoot)
|
||||
assert.NoError(t, err)
|
||||
sc, err := hState.CurrentSyncCommittee()
|
||||
assert.NoError(t, err)
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
coms, err := altair.SyncSubCommitteePubkeys(sc, types.CommitteeIndex(i))
|
||||
assert.NoError(t, err)
|
||||
for _, p := range coms {
|
||||
idx, ok := hState.ValidatorIndexByPubkey(bytesutil.ToBytes48(p))
|
||||
assert.Equal(t, true, ok)
|
||||
rt, err := syncSelectionProofSigningRoot(hState, helpers.PrevSlot(hState.Slot()), types.CommitteeIndex(i))
|
||||
assert.NoError(t, err)
|
||||
sig := keys[idx].Sign(rt[:])
|
||||
isAggregator, err := altair.IsSyncCommitteeAggregator(sig.Marshal())
|
||||
require.NoError(t, err)
|
||||
if !isAggregator {
|
||||
msg.Message.AggregatorIndex = idx
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
s.cfg.Chain = &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)),
|
||||
CurrentSyncCommitteeIndices: []types.CommitteeIndex{types.CommitteeIndex(msg.Message.Contribution.SubcommitteeIndex * subCommitteeSize)},
|
||||
}
|
||||
|
||||
assert.NoError(t, s.initCaches())
|
||||
return s
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
AggregationBits: bitfield.NewBitvector128(),
|
||||
Signature: emptySig[:],
|
||||
},
|
||||
SelectionProof: emptySig[:],
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
name: "Invalid Proof Signature",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
s.cfg.Chain = chainService
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
hState, err := db.State(context.Background(), headRoot)
|
||||
assert.NoError(t, err)
|
||||
sc, err := hState.CurrentSyncCommittee()
|
||||
assert.NoError(t, err)
|
||||
var pubkey []byte
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
coms, err := altair.SyncSubCommitteePubkeys(sc, types.CommitteeIndex(i))
|
||||
assert.NoError(t, err)
|
||||
for _, p := range coms {
|
||||
idx, ok := hState.ValidatorIndexByPubkey(bytesutil.ToBytes48(p))
|
||||
assert.Equal(t, true, ok)
|
||||
rt, err := syncSelectionProofSigningRoot(hState, helpers.PrevSlot(hState.Slot()), types.CommitteeIndex(i))
|
||||
assert.NoError(t, err)
|
||||
sig := keys[idx].Sign(rt[:])
|
||||
isAggregator, err := altair.IsSyncCommitteeAggregator(sig.Marshal())
|
||||
require.NoError(t, err)
|
||||
if isAggregator {
|
||||
infiniteSig := [96]byte{0xC0}
|
||||
pubkey = keys[idx].PublicKey().Marshal()
|
||||
msg.Message.AggregatorIndex = idx
|
||||
msg.Message.SelectionProof = sig.Marshal()
|
||||
msg.Message.Contribution.Slot = helpers.PrevSlot(hState.Slot())
|
||||
msg.Message.Contribution.SubcommitteeIndex = i
|
||||
msg.Message.Contribution.Signature = infiniteSig[:]
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
msg.Message.Contribution.AggregationBits = bitfield.NewBitvector128()
|
||||
msg.Signature = infiniteSig[:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
d, err := helpers.Domain(hState.Fork(), helpers.SlotToEpoch(helpers.PrevSlot(hState.Slot())), params.BeaconConfig().DomainSyncCommitteeSelectionProof, hState.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
s.cfg.Chain = &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)),
|
||||
CurrentSyncCommitteeIndices: []types.CommitteeIndex{types.CommitteeIndex(msg.Message.Contribution.SubcommitteeIndex * subCommitteeSize)},
|
||||
PublicKey: bytesutil.ToBytes48(pubkey),
|
||||
SyncSelectionProofDomain: d,
|
||||
}
|
||||
|
||||
assert.NoError(t, s.initCaches())
|
||||
return s
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
AggregationBits: bitfield.NewBitvector128(),
|
||||
Signature: emptySig[:],
|
||||
},
|
||||
SelectionProof: emptySig[:],
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
name: "Invalid Sync Aggregate",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
s.cfg.DB = db
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
hState, err := db.State(context.Background(), headRoot)
|
||||
assert.NoError(t, err)
|
||||
sc, err := hState.CurrentSyncCommittee()
|
||||
assert.NoError(t, err)
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
coms, err := altair.SyncSubCommitteePubkeys(sc, types.CommitteeIndex(i))
|
||||
assert.NoError(t, err)
|
||||
for _, p := range coms {
|
||||
idx, ok := hState.ValidatorIndexByPubkey(bytesutil.ToBytes48(p))
|
||||
assert.Equal(t, true, ok)
|
||||
rt, err := syncSelectionProofSigningRoot(hState, helpers.PrevSlot(hState.Slot()), types.CommitteeIndex(i))
|
||||
assert.NoError(t, err)
|
||||
sig := keys[idx].Sign(rt[:])
|
||||
isAggregator, err := altair.IsSyncCommitteeAggregator(sig.Marshal())
|
||||
require.NoError(t, err)
|
||||
if isAggregator {
|
||||
infiniteSig := [96]byte{0xC0}
|
||||
junkRoot := [32]byte{'A'}
|
||||
badSig := keys[idx].Sign(junkRoot[:])
|
||||
msg.Message.AggregatorIndex = idx
|
||||
msg.Message.SelectionProof = sig.Marshal()
|
||||
msg.Message.Contribution.Slot = helpers.PrevSlot(hState.Slot())
|
||||
msg.Message.Contribution.SubcommitteeIndex = i
|
||||
msg.Message.Contribution.Signature = badSig.Marshal()
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
msg.Message.Contribution.AggregationBits = bitfield.NewBitvector128()
|
||||
msg.Signature = infiniteSig[:]
|
||||
|
||||
d, err := helpers.Domain(hState.Fork(), helpers.SlotToEpoch(helpers.PrevSlot(hState.Slot())), params.BeaconConfig().DomainContributionAndProof, hState.GenesisValidatorRoot())
|
||||
assert.NoError(t, err)
|
||||
sigRoot, err := helpers.ComputeSigningRoot(msg.Message, d)
|
||||
assert.NoError(t, err)
|
||||
contrSig := keys[idx].Sign(sigRoot[:])
|
||||
|
||||
msg.Signature = contrSig.Marshal()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
s.cfg.Chain = &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)),
|
||||
CurrentSyncCommitteeIndices: []types.CommitteeIndex{1},
|
||||
}
|
||||
|
||||
assert.NoError(t, s.initCaches())
|
||||
return s
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
AggregationBits: bitfield.NewBitvector128(),
|
||||
Signature: emptySig[:],
|
||||
},
|
||||
SelectionProof: emptySig[:],
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
name: "Invalid Signed Sync Contribution And Proof - Zero Bits Set",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
s.cfg.DB = db
|
||||
hState, err := db.State(context.Background(), headRoot)
|
||||
assert.NoError(t, err)
|
||||
sc, err := hState.CurrentSyncCommittee()
|
||||
assert.NoError(t, err)
|
||||
cd, err := helpers.Domain(hState.Fork(), helpers.SlotToEpoch(helpers.PrevSlot(hState.Slot())), params.BeaconConfig().DomainContributionAndProof, hState.GenesisValidatorRoot())
|
||||
assert.NoError(t, err)
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
coms, err := altair.SyncSubCommitteePubkeys(sc, types.CommitteeIndex(i))
|
||||
assert.NoError(t, err)
|
||||
for _, p := range coms {
|
||||
idx, ok := hState.ValidatorIndexByPubkey(bytesutil.ToBytes48(p))
|
||||
assert.Equal(t, true, ok)
|
||||
rt, err := syncSelectionProofSigningRoot(hState, helpers.PrevSlot(hState.Slot()), types.CommitteeIndex(i))
|
||||
assert.NoError(t, err)
|
||||
sig := keys[idx].Sign(rt[:])
|
||||
isAggregator, err := altair.IsSyncCommitteeAggregator(sig.Marshal())
|
||||
require.NoError(t, err)
|
||||
if isAggregator {
|
||||
infiniteSig := [96]byte{0xC0}
|
||||
msg.Message.AggregatorIndex = idx
|
||||
msg.Message.SelectionProof = sig.Marshal()
|
||||
msg.Message.Contribution.Slot = helpers.PrevSlot(hState.Slot())
|
||||
msg.Message.Contribution.SubcommitteeIndex = i
|
||||
msg.Message.Contribution.Signature = infiniteSig[:]
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
msg.Message.Contribution.AggregationBits = bitfield.NewBitvector128()
|
||||
sigRoot, err := helpers.ComputeSigningRoot(msg.Message, cd)
|
||||
assert.NoError(t, err)
|
||||
contrSig := keys[idx].Sign(sigRoot[:])
|
||||
|
||||
msg.Signature = contrSig.Marshal()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d, err := helpers.Domain(hState.Fork(), helpers.SlotToEpoch(helpers.PrevSlot(hState.Slot())), params.BeaconConfig().DomainSyncCommitteeSelectionProof, hState.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
s.cfg.Chain = &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)),
|
||||
CurrentSyncCommitteeIndices: []types.CommitteeIndex{types.CommitteeIndex(msg.Message.Contribution.SubcommitteeIndex * subCommitteeSize)},
|
||||
PublicKey: bytesutil.ToBytes48(keys[msg.Message.AggregatorIndex].PublicKey().Marshal()),
|
||||
SyncSelectionProofDomain: d,
|
||||
SyncContributionProofDomain: cd,
|
||||
SyncCommitteeDomain: make([]byte, 32),
|
||||
}
|
||||
assert.NoError(t, s.initCaches())
|
||||
return s
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
AggregationBits: bitfield.NewBitvector128(),
|
||||
Signature: emptySig[:],
|
||||
},
|
||||
SelectionProof: emptySig[:],
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationReject,
|
||||
},
|
||||
{
|
||||
name: "Valid Signed Sync Contribution And Proof - Single Bit Set",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
s.cfg.DB = db
|
||||
hState, err := db.State(context.Background(), headRoot)
|
||||
assert.NoError(t, err)
|
||||
sc, err := hState.CurrentSyncCommittee()
|
||||
assert.NoError(t, err)
|
||||
cd, err := helpers.Domain(hState.Fork(), helpers.SlotToEpoch(helpers.PrevSlot(hState.Slot())), params.BeaconConfig().DomainContributionAndProof, hState.GenesisValidatorRoot())
|
||||
assert.NoError(t, err)
|
||||
d, err := helpers.Domain(hState.Fork(), helpers.SlotToEpoch(hState.Slot()), params.BeaconConfig().DomainSyncCommittee, hState.GenesisValidatorRoot())
|
||||
assert.NoError(t, err)
|
||||
var pubkeys [][]byte
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
coms, err := altair.SyncSubCommitteePubkeys(sc, types.CommitteeIndex(i))
|
||||
pubkeys = coms
|
||||
assert.NoError(t, err)
|
||||
for _, p := range coms {
|
||||
idx, ok := hState.ValidatorIndexByPubkey(bytesutil.ToBytes48(p))
|
||||
assert.Equal(t, true, ok)
|
||||
rt, err := syncSelectionProofSigningRoot(hState, helpers.PrevSlot(hState.Slot()), types.CommitteeIndex(i))
|
||||
assert.NoError(t, err)
|
||||
sig := keys[idx].Sign(rt[:])
|
||||
isAggregator, err := altair.IsSyncCommitteeAggregator(sig.Marshal())
|
||||
require.NoError(t, err)
|
||||
if isAggregator {
|
||||
msg.Message.AggregatorIndex = idx
|
||||
msg.Message.SelectionProof = sig.Marshal()
|
||||
msg.Message.Contribution.Slot = helpers.PrevSlot(hState.Slot())
|
||||
msg.Message.Contribution.SubcommitteeIndex = i
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
msg.Message.Contribution.AggregationBits = bitfield.NewBitvector128()
|
||||
// Only Sign for 1 validator.
|
||||
rawBytes := p2ptypes.SSZBytes(headRoot[:])
|
||||
sigRoot, err := helpers.ComputeSigningRoot(&rawBytes, d)
|
||||
assert.NoError(t, err)
|
||||
valIdx, ok := hState.ValidatorIndexByPubkey(bytesutil.ToBytes48(coms[0]))
|
||||
assert.Equal(t, true, ok)
|
||||
sig = keys[valIdx].Sign(sigRoot[:])
|
||||
msg.Message.Contribution.AggregationBits.SetBitAt(uint64(0), true)
|
||||
msg.Message.Contribution.Signature = sig.Marshal()
|
||||
|
||||
sigRoot, err = helpers.ComputeSigningRoot(msg.Message, cd)
|
||||
assert.NoError(t, err)
|
||||
contrSig := keys[idx].Sign(sigRoot[:])
|
||||
msg.Signature = contrSig.Marshal()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pd, err := helpers.Domain(hState.Fork(), helpers.SlotToEpoch(helpers.PrevSlot(hState.Slot())), params.BeaconConfig().DomainSyncCommitteeSelectionProof, hState.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
s.cfg.Chain = &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)),
|
||||
CurrentSyncCommitteeIndices: []types.CommitteeIndex{types.CommitteeIndex(msg.Message.Contribution.SubcommitteeIndex * subCommitteeSize)},
|
||||
PublicKey: bytesutil.ToBytes48(keys[msg.Message.AggregatorIndex].PublicKey().Marshal()),
|
||||
SyncSelectionProofDomain: pd,
|
||||
SyncContributionProofDomain: cd,
|
||||
SyncCommitteeDomain: d,
|
||||
SyncCommitteePubkeys: pubkeys,
|
||||
}
|
||||
assert.NoError(t, s.initCaches())
|
||||
return s
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
AggregationBits: bitfield.NewBitvector128(),
|
||||
Signature: emptySig[:],
|
||||
},
|
||||
SelectionProof: emptySig[:],
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationAccept,
|
||||
},
|
||||
{
|
||||
name: "Valid Signed Sync Contribution And Proof with Multiple Signatures",
|
||||
svc: NewService(context.Background(), &Config{
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
Chain: chainService,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
OperationNotifier: chainService.OperationNotifier(),
|
||||
}),
|
||||
setupSvc: func(s *Service, msg *ethpb.SignedContributionAndProof) *Service {
|
||||
s.cfg.StateGen = stategen.New(db)
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
s.cfg.DB = db
|
||||
hState, err := db.State(context.Background(), headRoot)
|
||||
assert.NoError(t, err)
|
||||
sc, err := hState.CurrentSyncCommittee()
|
||||
assert.NoError(t, err)
|
||||
cd, err := helpers.Domain(hState.Fork(), helpers.SlotToEpoch(helpers.PrevSlot(hState.Slot())), params.BeaconConfig().DomainContributionAndProof, hState.GenesisValidatorRoot())
|
||||
assert.NoError(t, err)
|
||||
d, err := helpers.Domain(hState.Fork(), helpers.SlotToEpoch(hState.Slot()), params.BeaconConfig().DomainSyncCommittee, hState.GenesisValidatorRoot())
|
||||
assert.NoError(t, err)
|
||||
var pubkeys [][]byte
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
|
||||
coms, err := altair.SyncSubCommitteePubkeys(sc, types.CommitteeIndex(i))
|
||||
pubkeys = coms
|
||||
assert.NoError(t, err)
|
||||
for _, p := range coms {
|
||||
idx, ok := hState.ValidatorIndexByPubkey(bytesutil.ToBytes48(p))
|
||||
assert.Equal(t, true, ok)
|
||||
rt, err := syncSelectionProofSigningRoot(hState, helpers.PrevSlot(hState.Slot()), types.CommitteeIndex(i))
|
||||
assert.NoError(t, err)
|
||||
sig := keys[idx].Sign(rt[:])
|
||||
isAggregator, err := altair.IsSyncCommitteeAggregator(sig.Marshal())
|
||||
require.NoError(t, err)
|
||||
if isAggregator {
|
||||
msg.Message.AggregatorIndex = idx
|
||||
msg.Message.SelectionProof = sig.Marshal()
|
||||
msg.Message.Contribution.Slot = helpers.PrevSlot(hState.Slot())
|
||||
msg.Message.Contribution.SubcommitteeIndex = i
|
||||
msg.Message.Contribution.BlockRoot = headRoot[:]
|
||||
msg.Message.Contribution.AggregationBits = bitfield.NewBitvector128()
|
||||
rawBytes := p2ptypes.SSZBytes(headRoot[:])
|
||||
sigRoot, err := helpers.ComputeSigningRoot(&rawBytes, d)
|
||||
assert.NoError(t, err)
|
||||
sigs := []bls.Signature{}
|
||||
for i, p2 := range coms {
|
||||
idx, ok := hState.ValidatorIndexByPubkey(bytesutil.ToBytes48(p2))
|
||||
assert.Equal(t, true, ok)
|
||||
sig := keys[idx].Sign(sigRoot[:])
|
||||
sigs = append(sigs, sig)
|
||||
msg.Message.Contribution.AggregationBits.SetBitAt(uint64(i), true)
|
||||
}
|
||||
msg.Message.Contribution.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
sigRoot, err = helpers.ComputeSigningRoot(msg.Message, cd)
|
||||
assert.NoError(t, err)
|
||||
contrSig := keys[idx].Sign(sigRoot[:])
|
||||
msg.Signature = contrSig.Marshal()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pd, err := helpers.Domain(hState.Fork(), helpers.SlotToEpoch(helpers.PrevSlot(hState.Slot())), params.BeaconConfig().DomainSyncCommitteeSelectionProof, hState.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
s.cfg.Chain = &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Now().Add(-time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Duration(hState.Slot()-1)),
|
||||
CurrentSyncCommitteeIndices: []types.CommitteeIndex{types.CommitteeIndex(msg.Message.Contribution.SubcommitteeIndex * subCommitteeSize)},
|
||||
PublicKey: bytesutil.ToBytes48(keys[msg.Message.AggregatorIndex].PublicKey().Marshal()),
|
||||
SyncSelectionProofDomain: pd,
|
||||
SyncContributionProofDomain: cd,
|
||||
SyncCommitteeDomain: d,
|
||||
SyncCommitteePubkeys: pubkeys,
|
||||
}
|
||||
|
||||
assert.NoError(t, s.initCaches())
|
||||
return s
|
||||
},
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
pid: "random",
|
||||
topic: defaultTopic,
|
||||
msg: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
Slot: 1,
|
||||
SubcommitteeIndex: 1,
|
||||
BlockRoot: params.BeaconConfig().ZeroHash[:],
|
||||
AggregationBits: bitfield.NewBitvector128(),
|
||||
Signature: emptySig[:],
|
||||
},
|
||||
SelectionProof: emptySig[:],
|
||||
},
|
||||
Signature: emptySig[:],
|
||||
}},
|
||||
want: pubsub.ValidationAccept,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.svc = tt.setupSvc(tt.svc, tt.args.msg)
|
||||
marshalledObj, err := tt.args.msg.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
marshalledObj = snappy.Encode(nil, marshalledObj)
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsub_pb.Message{
|
||||
Data: marshalledObj,
|
||||
Topic: &tt.args.topic,
|
||||
},
|
||||
ReceivedFrom: "",
|
||||
ValidatorData: nil,
|
||||
}
|
||||
if got := tt.svc.validateSyncContributionAndProof(tt.args.ctx, tt.args.pid, msg); got != tt.want {
|
||||
t.Errorf("validateSyncContributionAndProof() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func fillUpBlocksAndState(ctx context.Context, t *testing.T, beaconDB db.Database) ([32]byte, []bls.SecretKey) {
|
||||
gs, keys := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
sCom, err := altair.NextSyncCommittee(ctx, gs)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, gs.SetCurrentSyncCommittee(sCom))
|
||||
assert.NoError(t, beaconDB.SaveGenesisData(context.Background(), gs))
|
||||
|
||||
testState := gs.Copy()
|
||||
hRoot := [32]byte{}
|
||||
for i := types.Slot(1); i <= params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
blk, err := testutil.GenerateFullBlockAltair(testState, keys, testutil.DefaultBlockGenConfig(), i)
|
||||
require.NoError(t, err)
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
_, testState, err = core.ExecuteStateTransitionNoVerifyAnySig(ctx, testState, wsb)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
assert.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: i, Root: r[:]}))
|
||||
assert.NoError(t, beaconDB.SaveState(ctx, testState, r))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, r))
|
||||
hRoot = r
|
||||
}
|
||||
return hRoot, keys
|
||||
}
|
||||
|
||||
func syncSelectionProofSigningRoot(st state.BeaconState, slot types.Slot, comIdx types.CommitteeIndex) ([32]byte, error) {
|
||||
dom, err := helpers.Domain(st.Fork(), helpers.SlotToEpoch(slot), params.BeaconConfig().DomainSyncCommitteeSelectionProof, st.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
selectionData := ðpb.SyncAggregatorSelectionData{Slot: slot, SubcommitteeIndex: uint64(comIdx)}
|
||||
return helpers.ComputeSigningRoot(selectionData, dom)
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"crypto/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
@@ -18,7 +19,6 @@ import (
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
statepb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -38,9 +38,9 @@ func setupValidExit(t *testing.T) (*ethpb.SignedVoluntaryExit, state.BeaconState
|
||||
ActivationEpoch: 0,
|
||||
},
|
||||
}
|
||||
state, err := v1.InitializeFromProto(&statepb.BeaconState{
|
||||
state, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Fork: &statepb.Fork{
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
@@ -79,7 +79,8 @@ func TestValidateVoluntaryExit_ValidExit(t *testing.T) {
|
||||
cfg: &Config{
|
||||
P2P: p,
|
||||
Chain: &mock.ChainService{
|
||||
State: s,
|
||||
State: s,
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
InitialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
@@ -90,6 +91,9 @@ func TestValidateVoluntaryExit_ValidExit(t *testing.T) {
|
||||
_, err = p.Encoding().EncodeGossip(buf, exit)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(exit)]
|
||||
d, err := r.currentForkDigest()
|
||||
assert.NoError(t, err)
|
||||
topic = r.addDigestToTopic(topic, d)
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
|
||||
@@ -141,7 +141,7 @@ var (
|
||||
// EnableDebugRPCEndpoints as /v1/beacon/state.
|
||||
EnableDebugRPCEndpoints = &cli.BoolFlag{
|
||||
Name: "enable-debug-rpc-endpoints",
|
||||
Usage: "Enables the debug rpc service, containing utility endpoints such as /eth/v1alpha1/beacon/state.",
|
||||
Usage: "Enables the debug rpc service, containing utility endpoints such as /prysm/v1alpha1/beacon/state.",
|
||||
}
|
||||
// SubscribeToAllSubnets defines a flag to specify whether to subscribe to all possible attestation subnets or not.
|
||||
SubscribeToAllSubnets = &cli.BoolFlag{
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -222,17 +223,27 @@ func (r *testRunner) runEvaluators(conns []*grpc.ClientConn, tickingStartTime ti
|
||||
secondsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
ticker := helpers.NewEpochTicker(tickingStartTime, secondsPerEpoch)
|
||||
for currentEpoch := range ticker.C() {
|
||||
for _, evaluator := range config.Evaluators {
|
||||
wg := new(sync.WaitGroup)
|
||||
for _, ev := range config.Evaluators {
|
||||
// Fix reference to evaluator as it will be running
|
||||
// in a separate goroutine.
|
||||
evaluator := ev
|
||||
// Only run if the policy says so.
|
||||
if !evaluator.Policy(types.Epoch(currentEpoch)) {
|
||||
continue
|
||||
}
|
||||
t.Run(fmt.Sprintf(evaluator.Name, currentEpoch), func(t *testing.T) {
|
||||
|
||||
// Add evaluator to our waitgroup.
|
||||
wg.Add(1)
|
||||
|
||||
go t.Run(fmt.Sprintf(evaluator.Name, currentEpoch), func(t *testing.T) {
|
||||
err := evaluator.Evaluation(conns...)
|
||||
assert.NoError(t, err, "Evaluation failed for epoch %d: %v", currentEpoch, err)
|
||||
wg.Done()
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for all evaluators to finish their evaluation for the epoch.
|
||||
wg.Wait()
|
||||
if t.Failed() || currentEpoch >= config.EpochsToRun-1 {
|
||||
ticker.Done()
|
||||
if t.Failed() {
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"api_gateway_v1alpha1.go",
|
||||
"data.go",
|
||||
"finality.go",
|
||||
"fork.go",
|
||||
"metrics.go",
|
||||
"node.go",
|
||||
"operations.go",
|
||||
@@ -24,6 +25,8 @@ go_library(
|
||||
"//endtoend/policies:go_default_library",
|
||||
"//endtoend/types:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/p2putils:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
|
||||
63
endtoend/evaluators/fork.go
Normal file
63
endtoend/evaluators/fork.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package evaluators
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/endtoend/policies"
|
||||
"github.com/prysmaticlabs/prysm/endtoend/types"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
wrapperv2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// ForkTransition ensures that the hard fork has occurred successfully.
|
||||
var ForkTransition = types.Evaluator{
|
||||
Name: "fork_transition_%d",
|
||||
Policy: policies.OnEpoch(params.AltairE2EForkEpoch),
|
||||
Evaluation: forkOccurs,
|
||||
}
|
||||
|
||||
func forkOccurs(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewBeaconNodeValidatorClient(conn)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
stream, err := client.StreamBlocksAltair(ctx, ðpb.StreamBlocksRequest{VerifiedOnly: true})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get stream")
|
||||
}
|
||||
fSlot, err := helpers.StartSlot(params.AltairE2EForkEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.Err() == context.Canceled {
|
||||
return errors.New("context canceled prematurely")
|
||||
}
|
||||
res, err := stream.Recv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res == nil || res.Block == nil {
|
||||
return errors.New("nil block returned by beacon node")
|
||||
}
|
||||
if res.GetPhase0Block() == nil && res.GetAltairBlock() == nil {
|
||||
return errors.New("nil block returned by beacon node")
|
||||
}
|
||||
if res.GetPhase0Block() != nil {
|
||||
return errors.New("phase 0 block returned after altair fork has occurred")
|
||||
}
|
||||
blk, err := wrapperv2.WrappedAltairSignedBeaconBlock(res.GetAltairBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if blk == nil || blk.IsNil() {
|
||||
return errors.New("nil altair block received from stream")
|
||||
}
|
||||
if blk.Block().Slot() < fSlot {
|
||||
return errors.Errorf("wanted a block >= %d but received %d", fSlot, blk.Block().Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -13,7 +13,10 @@ import (
|
||||
e2e "github.com/prysmaticlabs/prysm/endtoend/params"
|
||||
"github.com/prysmaticlabs/prysm/endtoend/policies"
|
||||
e2etypes "github.com/prysmaticlabs/prysm/endtoend/types"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
wrapperv2 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
@@ -91,27 +94,31 @@ var ValidatorsVoteWithTheMajority = e2etypes.Evaluator{
|
||||
|
||||
func processesDepositsInBlocks(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := eth.NewBeaconChainClient(conn)
|
||||
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
altairClient := ethpb.NewBeaconChainClient(conn)
|
||||
chainHead, err := client.GetChainHead(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get chain head")
|
||||
}
|
||||
|
||||
req := ð.ListBlocksRequest{QueryFilter: ð.ListBlocksRequest_Epoch{Epoch: chainHead.HeadEpoch - 1}}
|
||||
blks, err := client.ListBlocks(context.Background(), req)
|
||||
req := ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: chainHead.HeadEpoch - 1}}
|
||||
blks, err := altairClient.ListBlocksAltair(context.Background(), req)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blocks from beacon-chain")
|
||||
}
|
||||
var deposits uint64
|
||||
for _, blk := range blks.BlockContainers {
|
||||
for _, ctr := range blks.BlockContainers {
|
||||
blk, err := convertToBlockInterface(ctr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf(
|
||||
"Slot: %d with %d deposits, Eth1 block %#x with %d deposits\n",
|
||||
blk.Block.Block.Slot,
|
||||
len(blk.Block.Block.Body.Deposits),
|
||||
blk.Block.Block.Body.Eth1Data.BlockHash, blk.Block.Block.Body.Eth1Data.DepositCount,
|
||||
blk.Block().Slot(),
|
||||
len(blk.Block().Body().Deposits()),
|
||||
blk.Block().Body().Eth1Data().BlockHash, blk.Block().Body().Eth1Data().DepositCount,
|
||||
)
|
||||
deposits += uint64(len(blk.Block.Block.Body.Deposits))
|
||||
deposits += uint64(len(blk.Block().Body().Deposits()))
|
||||
}
|
||||
if deposits != depositValCount {
|
||||
return fmt.Errorf("expected %d deposits to be processed, received %d", depositValCount, deposits)
|
||||
@@ -121,27 +128,32 @@ func processesDepositsInBlocks(conns ...*grpc.ClientConn) error {
|
||||
|
||||
func verifyGraffitiInBlocks(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := eth.NewBeaconChainClient(conn)
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
altairClient := ethpb.NewBeaconChainClient(conn)
|
||||
|
||||
chainHead, err := client.GetChainHead(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get chain head")
|
||||
}
|
||||
|
||||
req := ð.ListBlocksRequest{QueryFilter: ð.ListBlocksRequest_Epoch{Epoch: chainHead.HeadEpoch - 1}}
|
||||
blks, err := client.ListBlocks(context.Background(), req)
|
||||
req := ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: chainHead.HeadEpoch - 1}}
|
||||
blks, err := altairClient.ListBlocksAltair(context.Background(), req)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blocks from beacon-chain")
|
||||
}
|
||||
for _, blk := range blks.BlockContainers {
|
||||
for _, ctr := range blks.BlockContainers {
|
||||
blk, err := convertToBlockInterface(ctr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var e bool
|
||||
for _, graffiti := range helpers.Graffiti {
|
||||
if bytes.Equal(bytesutil.PadTo([]byte(graffiti), 32), blk.Block.Block.Body.Graffiti) {
|
||||
if bytes.Equal(bytesutil.PadTo([]byte(graffiti), 32), blk.Block().Body().Graffiti()) {
|
||||
e = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !e && blk.Block.Block.Slot != 0 {
|
||||
if !e && blk.Block().Slot() != 0 {
|
||||
return errors.New("could not get graffiti from the list")
|
||||
}
|
||||
}
|
||||
@@ -151,14 +163,14 @@ func verifyGraffitiInBlocks(conns ...*grpc.ClientConn) error {
|
||||
|
||||
func activatesDepositedValidators(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := eth.NewBeaconChainClient(conn)
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
|
||||
chainHead, err := client.GetChainHead(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get chain head")
|
||||
}
|
||||
|
||||
validatorRequest := ð.ListValidatorsRequest{
|
||||
validatorRequest := ðpb.ListValidatorsRequest{
|
||||
PageSize: int32(params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
PageToken: "1",
|
||||
}
|
||||
@@ -210,8 +222,8 @@ func activatesDepositedValidators(conns ...*grpc.ClientConn) error {
|
||||
|
||||
func depositedValidatorsAreActive(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := eth.NewBeaconChainClient(conn)
|
||||
validatorRequest := ð.ListValidatorsRequest{
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
validatorRequest := ðpb.ListValidatorsRequest{
|
||||
PageSize: int32(params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
PageToken: "1",
|
||||
}
|
||||
@@ -260,8 +272,8 @@ func depositedValidatorsAreActive(conns ...*grpc.ClientConn) error {
|
||||
|
||||
func proposeVoluntaryExit(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
valClient := eth.NewBeaconNodeValidatorClient(conn)
|
||||
beaconClient := eth.NewBeaconChainClient(conn)
|
||||
valClient := ethpb.NewBeaconNodeValidatorClient(conn)
|
||||
beaconClient := ethpb.NewBeaconChainClient(conn)
|
||||
|
||||
ctx := context.Background()
|
||||
chainHead, err := beaconClient.GetChainHead(ctx, &emptypb.Empty{})
|
||||
@@ -277,11 +289,11 @@ func proposeVoluntaryExit(conns ...*grpc.ClientConn) error {
|
||||
exitedIndex = types.ValidatorIndex(rand.Uint64() % params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
valExited = true
|
||||
|
||||
voluntaryExit := ð.VoluntaryExit{
|
||||
voluntaryExit := ðpb.VoluntaryExit{
|
||||
Epoch: chainHead.HeadEpoch,
|
||||
ValidatorIndex: exitedIndex,
|
||||
}
|
||||
req := ð.DomainRequest{
|
||||
req := ðpb.DomainRequest{
|
||||
Epoch: chainHead.HeadEpoch,
|
||||
Domain: params.BeaconConfig().DomainVoluntaryExit[:],
|
||||
}
|
||||
@@ -294,7 +306,7 @@ func proposeVoluntaryExit(conns ...*grpc.ClientConn) error {
|
||||
return err
|
||||
}
|
||||
signature := privKeys[exitedIndex].Sign(signingData[:])
|
||||
signedExit := ð.SignedVoluntaryExit{
|
||||
signedExit := ðpb.SignedVoluntaryExit{
|
||||
Exit: voluntaryExit,
|
||||
Signature: signature.Marshal(),
|
||||
}
|
||||
@@ -307,9 +319,9 @@ func proposeVoluntaryExit(conns ...*grpc.ClientConn) error {
|
||||
|
||||
func validatorIsExited(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := eth.NewBeaconChainClient(conn)
|
||||
validatorRequest := ð.GetValidatorRequest{
|
||||
QueryFilter: ð.GetValidatorRequest_Index{
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
validatorRequest := ðpb.GetValidatorRequest{
|
||||
QueryFilter: ðpb.GetValidatorRequest_Index{
|
||||
Index: exitedIndex,
|
||||
},
|
||||
}
|
||||
@@ -325,21 +337,24 @@ func validatorIsExited(conns ...*grpc.ClientConn) error {
|
||||
|
||||
func validatorsVoteWithTheMajority(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := eth.NewBeaconChainClient(conn)
|
||||
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
chainHead, err := client.GetChainHead(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get chain head")
|
||||
}
|
||||
|
||||
req := ð.ListBlocksRequest{QueryFilter: ð.ListBlocksRequest_Epoch{Epoch: chainHead.HeadEpoch - 1}}
|
||||
blks, err := client.ListBlocks(context.Background(), req)
|
||||
req := ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: chainHead.HeadEpoch - 1}}
|
||||
blks, err := client.ListBlocksAltair(context.Background(), req)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blocks from beacon-chain")
|
||||
}
|
||||
|
||||
for _, blk := range blks.BlockContainers {
|
||||
slot, vote := blk.Block.Block.Slot, blk.Block.Block.Body.Eth1Data.BlockHash
|
||||
for _, ctr := range blks.BlockContainers {
|
||||
blk, err := convertToBlockInterface(ctr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
slot, vote := blk.Block().Slot(), blk.Block().Body().Eth1Data().BlockHash
|
||||
slotsPerVotingPeriod := params.E2ETestConfig().SlotsPerEpoch.Mul(uint64(params.E2ETestConfig().EpochsPerEth1VotingPeriod))
|
||||
|
||||
// We treat epoch 1 differently from other epoch for two reasons:
|
||||
@@ -369,3 +384,13 @@ func validatorsVoteWithTheMajority(conns ...*grpc.ClientConn) error {
|
||||
}
|
||||
|
||||
var expectedEth1DataVote []byte
|
||||
|
||||
func convertToBlockInterface(obj *ethpb.BeaconBlockContainerAltair) (block.SignedBeaconBlock, error) {
|
||||
if obj.GetPhase0Block() != nil {
|
||||
return wrapper.WrappedPhase0SignedBeaconBlock(obj.GetPhase0Block()), nil
|
||||
}
|
||||
if obj.GetAltairBlock() != nil {
|
||||
return wrapperv2.WrappedAltairSignedBeaconBlock(obj.GetAltairBlock())
|
||||
}
|
||||
return nil, errors.New("container has no block")
|
||||
}
|
||||
|
||||
@@ -5,15 +5,19 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/endtoend/policies"
|
||||
"github.com/prysmaticlabs/prysm/endtoend/types"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
var expectedParticipation = 0.95 // 95% participation to make room for minor issues.
|
||||
|
||||
var expectedSyncParticipation = 0.95 // 95% participation for sync committee members.
|
||||
|
||||
// ValidatorsAreActive ensures the expected amount of validators are active.
|
||||
var ValidatorsAreActive = types.Evaluator{
|
||||
Name: "validators_active_epoch_%d",
|
||||
@@ -28,11 +32,19 @@ var ValidatorsParticipating = types.Evaluator{
|
||||
Evaluation: validatorsParticipating,
|
||||
}
|
||||
|
||||
// ValidatorSyncParticipation ensures the expected amount of sync committee participants
|
||||
// are active.
|
||||
var ValidatorSyncParticipation = types.Evaluator{
|
||||
Name: "validator_sync_participation_%d",
|
||||
Policy: policies.AfterNthEpoch(params.AltairE2EForkEpoch - 1),
|
||||
Evaluation: validatorsSyncParticipation,
|
||||
}
|
||||
|
||||
func validatorsAreActive(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := eth.NewBeaconChainClient(conn)
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
// Balances actually fluctuate but we just want to check initial balance.
|
||||
validatorRequest := ð.ListValidatorsRequest{
|
||||
validatorRequest := ðpb.ListValidatorsRequest{
|
||||
PageSize: int32(params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
Active: true,
|
||||
}
|
||||
@@ -83,8 +95,8 @@ func validatorsAreActive(conns ...*grpc.ClientConn) error {
|
||||
// validatorsParticipating ensures the validators have an acceptable participation rate.
|
||||
func validatorsParticipating(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := eth.NewBeaconChainClient(conn)
|
||||
validatorRequest := ð.GetValidatorParticipationRequest{}
|
||||
client := ethpb.NewBeaconChainClient(conn)
|
||||
validatorRequest := ðpb.GetValidatorParticipationRequest{}
|
||||
participation, err := client.GetValidatorParticipation(context.Background(), validatorRequest)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get validator participation")
|
||||
@@ -102,3 +114,78 @@ func validatorsParticipating(conns ...*grpc.ClientConn) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validatorsSyncParticipation ensures the validators have an acceptable participation rate for
|
||||
// sync committee assignments.
|
||||
func validatorsSyncParticipation(conns ...*grpc.ClientConn) error {
|
||||
conn := conns[0]
|
||||
client := ethpb.NewNodeClient(conn)
|
||||
altairClient := ethpb.NewBeaconChainClient(conn)
|
||||
genesis, err := client.GetGenesis(context.Background(), &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get genesis data")
|
||||
}
|
||||
currSlot := helpers.CurrentSlot(uint64(genesis.GenesisTime.AsTime().Unix()))
|
||||
currEpoch := helpers.SlotToEpoch(currSlot)
|
||||
lowestBound := currEpoch - 1
|
||||
|
||||
if lowestBound < params.AltairE2EForkEpoch {
|
||||
lowestBound = params.AltairE2EForkEpoch
|
||||
}
|
||||
blockCtrs, err := altairClient.ListBlocksAltair(context.Background(), ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: lowestBound}})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get validator participation")
|
||||
}
|
||||
for _, ctr := range blockCtrs.BlockContainers {
|
||||
if ctr.GetAltairBlock() == nil {
|
||||
return errors.Errorf("Altair block type doesn't exist for block at epoch %d", lowestBound)
|
||||
}
|
||||
blk := ctr.GetAltairBlock()
|
||||
if blk.Block == nil || blk.Block.Body == nil || blk.Block.Body.SyncAggregate == nil {
|
||||
return errors.New("nil block provided")
|
||||
}
|
||||
forkSlot, err := helpers.StartSlot(params.AltairE2EForkEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip evaluation of the fork slot.
|
||||
if blk.Block.Slot == forkSlot {
|
||||
continue
|
||||
}
|
||||
syncAgg := blk.Block.Body.SyncAggregate
|
||||
threshold := uint64(float64(syncAgg.SyncCommitteeBits.Len()) * expectedSyncParticipation)
|
||||
if syncAgg.SyncCommitteeBits.Count() < threshold {
|
||||
return errors.Errorf("In block of slot %d ,the aggregate bitvector with length of %d only got a count of %d", blk.Block.Slot, threshold, syncAgg.SyncCommitteeBits.Count())
|
||||
}
|
||||
}
|
||||
if lowestBound == currEpoch {
|
||||
return nil
|
||||
}
|
||||
blockCtrs, err = altairClient.ListBlocksAltair(context.Background(), ðpb.ListBlocksRequest{QueryFilter: ðpb.ListBlocksRequest_Epoch{Epoch: currEpoch}})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get validator participation")
|
||||
}
|
||||
for _, ctr := range blockCtrs.BlockContainers {
|
||||
if ctr.GetAltairBlock() == nil {
|
||||
return errors.Errorf("Altair block type doesn't exist for block at epoch %d", lowestBound)
|
||||
}
|
||||
blk := ctr.GetAltairBlock()
|
||||
if blk.Block == nil || blk.Block.Body == nil || blk.Block.Body.SyncAggregate == nil {
|
||||
return errors.New("nil block provided")
|
||||
}
|
||||
forkSlot, err := helpers.StartSlot(params.AltairE2EForkEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Skip evaluation of the fork slot.
|
||||
if blk.Block.Slot == forkSlot {
|
||||
continue
|
||||
}
|
||||
syncAgg := blk.Block.Body.SyncAggregate
|
||||
threshold := uint64(float64(syncAgg.SyncCommitteeBits.Len()) * expectedSyncParticipation)
|
||||
if syncAgg.SyncCommitteeBits.Count() < threshold {
|
||||
return errors.Errorf("In block of slot %d ,the aggregate bitvector with length of %d only got a count of %d", blk.Block.Slot, threshold, syncAgg.SyncCommitteeBits.Count())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user