mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
849 Commits
v1.0.0-alp
...
v1.3.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0f1d14437c | ||
|
|
79754bded2 | ||
|
|
90da16432f | ||
|
|
f074c5ee89 | ||
|
|
edd86fd358 | ||
|
|
067a519b37 | ||
|
|
32f2f711db | ||
|
|
c1d4ff6239 | ||
|
|
e36c3dd668 | ||
|
|
1c7c62cf8a | ||
|
|
d215607e55 | ||
|
|
ff329df808 | ||
|
|
c9858b5e6b | ||
|
|
565d51009d | ||
|
|
c6b74b2ba7 | ||
|
|
090fbbf18c | ||
|
|
cdea2debc9 | ||
|
|
4c49d4af7e | ||
|
|
46f6bd6d08 | ||
|
|
b3dcbfe2a4 | ||
|
|
a92b20d2bb | ||
|
|
8a27449595 | ||
|
|
a3781e2ffc | ||
|
|
f973924fbf | ||
|
|
9547f53e88 | ||
|
|
c30ee6c166 | ||
|
|
29d1959b81 | ||
|
|
878bc15229 | ||
|
|
e39ce36f1c | ||
|
|
b4648f1df9 | ||
|
|
f4adc0ea86 | ||
|
|
658cbf5631 | ||
|
|
b400098296 | ||
|
|
c3f875bf65 | ||
|
|
5db5ca7056 | ||
|
|
f0eb843138 | ||
|
|
d44c27ec63 | ||
|
|
0625a6906c | ||
|
|
4d28d5e4d2 | ||
|
|
08b938982b | ||
|
|
6ee290a9af | ||
|
|
4da7a7797e | ||
|
|
6e831920bf | ||
|
|
eca8d85446 | ||
|
|
1db3c86b68 | ||
|
|
0c599521b1 | ||
|
|
e40fba7679 | ||
|
|
6b82873737 | ||
|
|
3edfa8cb88 | ||
|
|
b577869ed6 | ||
|
|
5c14b4c833 | ||
|
|
dd08305aa7 | ||
|
|
1395c11c29 | ||
|
|
ef48f6a061 | ||
|
|
09ddfae1d9 | ||
|
|
ad9cd1933a | ||
|
|
0f515784d8 | ||
|
|
7d3e53f3e4 | ||
|
|
dc1bec79ed | ||
|
|
d472380fef | ||
|
|
eea0160dd4 | ||
|
|
1ae429dc61 | ||
|
|
fc265055df | ||
|
|
917252d7d0 | ||
|
|
4f9752bb3e | ||
|
|
6391dec5de | ||
|
|
1ba414bd77 | ||
|
|
6f2438436c | ||
|
|
2515dc4c06 | ||
|
|
2c36e6534c | ||
|
|
fe27ca359c | ||
|
|
bf7425bae4 | ||
|
|
558b16275d | ||
|
|
a069738c20 | ||
|
|
aef5a7b428 | ||
|
|
4bed8d4ed4 | ||
|
|
f4a6b90e8b | ||
|
|
36b6a71af4 | ||
|
|
7c3827a9a4 | ||
|
|
d17f210024 | ||
|
|
28631e7791 | ||
|
|
28839fbab2 | ||
|
|
0716519be9 | ||
|
|
068f758f49 | ||
|
|
e2c5ae53e7 | ||
|
|
473172ca8b | ||
|
|
47fdb3b99a | ||
|
|
143d3a3203 | ||
|
|
66471c2f13 | ||
|
|
7f6b15271a | ||
|
|
cbb0f1e11d | ||
|
|
25caa6d1be | ||
|
|
d551c8e1d0 | ||
|
|
2fd2bafdfa | ||
|
|
e236dedc32 | ||
|
|
7d2f7ae9e1 | ||
|
|
ed9c69ec61 | ||
|
|
b6a40094a6 | ||
|
|
de15d6d2c1 | ||
|
|
143cb142bc | ||
|
|
d3e93dd106 | ||
|
|
56c5938898 | ||
|
|
d44ab1ace5 | ||
|
|
ae028d9c1d | ||
|
|
cbd01d4ff4 | ||
|
|
65645face1 | ||
|
|
cd3851c3d5 | ||
|
|
2f98e6aaaf | ||
|
|
9afc9d92d9 | ||
|
|
a8e501b3cf | ||
|
|
5727d4eb8a | ||
|
|
fed65122fe | ||
|
|
86a9d4c6a4 | ||
|
|
0a180dc662 | ||
|
|
f9303ca2e4 | ||
|
|
4c25fe978a | ||
|
|
cf88afb287 | ||
|
|
c97ea766ca | ||
|
|
e52a821f73 | ||
|
|
d4f241d875 | ||
|
|
48ae49765e | ||
|
|
afa5b5e790 | ||
|
|
c5551ebebb | ||
|
|
616081fbdd | ||
|
|
842bafb002 | ||
|
|
953cc9733c | ||
|
|
caac08df33 | ||
|
|
3fd8c4c046 | ||
|
|
b5a82b9075 | ||
|
|
c6e96204e8 | ||
|
|
2456e6f34d | ||
|
|
48ed506487 | ||
|
|
4595789ac8 | ||
|
|
d53fdcf781 | ||
|
|
372dc47b64 | ||
|
|
82426abf5f | ||
|
|
f5f1284cef | ||
|
|
609418ecd3 | ||
|
|
f20c9122e8 | ||
|
|
afc3b3168a | ||
|
|
902c30e389 | ||
|
|
3aaa98decf | ||
|
|
e592cd7a80 | ||
|
|
b74dd967af | ||
|
|
d254f24a23 | ||
|
|
8d505e06bd | ||
|
|
09b1e06885 | ||
|
|
d9c451d547 | ||
|
|
e677b19d31 | ||
|
|
2bf03d5cba | ||
|
|
0753636159 | ||
|
|
b8037b0b50 | ||
|
|
2f063d0ddc | ||
|
|
1cfae7e098 | ||
|
|
91fe32a3d1 | ||
|
|
c0fda583e7 | ||
|
|
9f62405a81 | ||
|
|
d121b19145 | ||
|
|
a7345c1094 | ||
|
|
db79481c21 | ||
|
|
8d986bd414 | ||
|
|
c827672a30 | ||
|
|
d5ec248691 | ||
|
|
b2d6012371 | ||
|
|
d21365b882 | ||
|
|
1c43ea9e69 | ||
|
|
fc8dc21aa2 | ||
|
|
7f5ffb7dd1 | ||
|
|
92932ae58e | ||
|
|
8ffb95bd9d | ||
|
|
229abed848 | ||
|
|
c5e9b1ec9e | ||
|
|
7842fd9da6 | ||
|
|
75fc3b045b | ||
|
|
50e5b1b4f5 | ||
|
|
33e266388f | ||
|
|
2586be29ac | ||
|
|
9cc1438ea9 | ||
|
|
6e643aca12 | ||
|
|
cc5a847eeb | ||
|
|
241322a7c1 | ||
|
|
d697b0b2e5 | ||
|
|
9ec4f727c6 | ||
|
|
ffcadcf184 | ||
|
|
7c59615ae2 | ||
|
|
d6cccc18c3 | ||
|
|
befe8d88b8 | ||
|
|
153737803a | ||
|
|
4b14fa4317 | ||
|
|
27847ee2fe | ||
|
|
578dabe27c | ||
|
|
ed2c0a3e5e | ||
|
|
5d841874f7 | ||
|
|
ab9d596a5f | ||
|
|
084e5bd020 | ||
|
|
bc2c206832 | ||
|
|
655a7e98c3 | ||
|
|
f89fd67952 | ||
|
|
20b836d038 | ||
|
|
b33a8eb59f | ||
|
|
8b6abcbf0c | ||
|
|
9b367b36fc | ||
|
|
09a792ded4 | ||
|
|
cf343be76a | ||
|
|
4c19e622cd | ||
|
|
d7d2c6354b | ||
|
|
b6c4bc197f | ||
|
|
ce397ce797 | ||
|
|
2d75b12791 | ||
|
|
40155c9828 | ||
|
|
a2d4e3302c | ||
|
|
612e6ebdc4 | ||
|
|
fff6472a04 | ||
|
|
daf6da5beb | ||
|
|
9369bb6781 | ||
|
|
eeda9f18fe | ||
|
|
e967a65b68 | ||
|
|
c87ef2f0e7 | ||
|
|
1a9207ba46 | ||
|
|
9f423617cb | ||
|
|
015102c2d5 | ||
|
|
aa69e5edcc | ||
|
|
5dda2ca328 | ||
|
|
470d5aa491 | ||
|
|
d2bd954a6c | ||
|
|
e5556db49d | ||
|
|
d97596348e | ||
|
|
323eac6d6c | ||
|
|
5fd03f8fb0 | ||
|
|
18bb86754a | ||
|
|
97320a0a8e | ||
|
|
9a1866b735 | ||
|
|
6738fa3493 | ||
|
|
35ed01e36c | ||
|
|
c4ab67832f | ||
|
|
0ff2a53b2f | ||
|
|
25bba9f43f | ||
|
|
3858068201 | ||
|
|
0d5e2cfb27 | ||
|
|
bc650c82b4 | ||
|
|
a855f282c6 | ||
|
|
4253888a36 | ||
|
|
fb9f4e828d | ||
|
|
9ff825a570 | ||
|
|
d20065218c | ||
|
|
353c1f6387 | ||
|
|
1b6a0703e3 | ||
|
|
9135774720 | ||
|
|
e52c3d48cf | ||
|
|
ba9b563e6e | ||
|
|
dd3ac6c2ed | ||
|
|
9b3e1eb643 | ||
|
|
da59fdd22b | ||
|
|
f014374de2 | ||
|
|
392e61fbee | ||
|
|
7135a8542f | ||
|
|
c354871762 | ||
|
|
bc2cd29d4b | ||
|
|
023e258f6a | ||
|
|
9d737d60f4 | ||
|
|
1abe92fd8b | ||
|
|
e5c69bd387 | ||
|
|
318f83957a | ||
|
|
f67f8dd6df | ||
|
|
0e5da504f4 | ||
|
|
f6af79f415 | ||
|
|
af2c36ec40 | ||
|
|
5dc8eb45d3 | ||
|
|
b54743edbf | ||
|
|
04b2e0776d | ||
|
|
70da296a3b | ||
|
|
2defff0886 | ||
|
|
ee8aacbbbf | ||
|
|
4055841952 | ||
|
|
bf673ecb12 | ||
|
|
7c25d5c852 | ||
|
|
4c6e0c5f46 | ||
|
|
768994550c | ||
|
|
f038d782c2 | ||
|
|
ff64fdcfb5 | ||
|
|
d8c31b79df | ||
|
|
ea88799585 | ||
|
|
72dc43989f | ||
|
|
e772e8c8c2 | ||
|
|
df93affb4e | ||
|
|
d19c57cdb6 | ||
|
|
756ccbe5e4 | ||
|
|
46c67f1e9e | ||
|
|
44c3adb367 | ||
|
|
25b151ab78 | ||
|
|
f75b8a3be1 | ||
|
|
dfdf77cb95 | ||
|
|
d5bf8376c2 | ||
|
|
e2d7ec6f97 | ||
|
|
d650034734 | ||
|
|
a7cf77fc26 | ||
|
|
0dcbf177aa | ||
|
|
82bba593eb | ||
|
|
148e7fcd59 | ||
|
|
20dede7532 | ||
|
|
3fb49433a1 | ||
|
|
4326cbbf08 | ||
|
|
dc27cd7a1e | ||
|
|
0449cd3450 | ||
|
|
6244163770 | ||
|
|
f5c87075f2 | ||
|
|
ad7d3c74cc | ||
|
|
508c5fcf2f | ||
|
|
a0c475671c | ||
|
|
72a92fe708 | ||
|
|
6a5589f99e | ||
|
|
70c0bb106b | ||
|
|
0f18867f08 | ||
|
|
630d57377a | ||
|
|
3e9d721280 | ||
|
|
2428880058 | ||
|
|
4d1f01aacc | ||
|
|
b9848dc94f | ||
|
|
579335f81a | ||
|
|
11bbf06d03 | ||
|
|
29804fa572 | ||
|
|
4ec396c025 | ||
|
|
1fbfd52e52 | ||
|
|
2e18df642d | ||
|
|
99b3835f19 | ||
|
|
46d99fdc00 | ||
|
|
923e4d3a5e | ||
|
|
bb9e2ba12c | ||
|
|
f44b2a35e4 | ||
|
|
00dacbd00d | ||
|
|
92736d0188 | ||
|
|
c96db1a122 | ||
|
|
c5770a2e56 | ||
|
|
ade3b2f2df | ||
|
|
9d7052796b | ||
|
|
fbbdd94fea | ||
|
|
b51aec6981 | ||
|
|
b4437e6cec | ||
|
|
8ad328d9b3 | ||
|
|
21ede7634e | ||
|
|
57b74283d3 | ||
|
|
be078d6a16 | ||
|
|
ccba8cfa5a | ||
|
|
afbfaedea4 | ||
|
|
3ce96701de | ||
|
|
d2ba45aad9 | ||
|
|
14e1f08208 | ||
|
|
647b4cf108 | ||
|
|
c090c6a1c5 | ||
|
|
7dd0c24fea | ||
|
|
e1755b6066 | ||
|
|
3092f75ec2 | ||
|
|
821620c520 | ||
|
|
5417e8cf31 | ||
|
|
323769bf1a | ||
|
|
c51754fa8a | ||
|
|
2153a2d7c3 | ||
|
|
20514cd97f | ||
|
|
32f6bfd0a5 | ||
|
|
c7f7a29d7e | ||
|
|
387f7b28c1 | ||
|
|
cf3181e2de | ||
|
|
9d2fe80140 | ||
|
|
f9c696ed54 | ||
|
|
3bd5e58a5c | ||
|
|
fc7c6776f6 | ||
|
|
9a1423d62d | ||
|
|
a13de7da11 | ||
|
|
01bf97293f | ||
|
|
645931802f | ||
|
|
ea10784a4a | ||
|
|
3af7809964 | ||
|
|
b150acffca | ||
|
|
54a42ce4a8 | ||
|
|
9d174d5927 | ||
|
|
1b1b36497f | ||
|
|
04615cb97b | ||
|
|
b243665d3e | ||
|
|
654ef1afe5 | ||
|
|
0dbf5c4e63 | ||
|
|
c456dcdce8 | ||
|
|
10857223d0 | ||
|
|
4ef8a0f99e | ||
|
|
af0977b8d0 | ||
|
|
528272fc66 | ||
|
|
c5c868d0a6 | ||
|
|
622ab94465 | ||
|
|
ae8a619775 | ||
|
|
36b1eb66d5 | ||
|
|
639dcb028c | ||
|
|
edb40ddea4 | ||
|
|
0d2e3d978c | ||
|
|
29c6a0c42c | ||
|
|
aefa3e191a | ||
|
|
987205afc6 | ||
|
|
3ae4b793e6 | ||
|
|
600427bdb0 | ||
|
|
c7dd33431f | ||
|
|
0cf9800b75 | ||
|
|
3cc2ebc5d5 | ||
|
|
2cb814648a | ||
|
|
dc897a2007 | ||
|
|
a051e684ae | ||
|
|
64be627a6d | ||
|
|
6f766ed583 | ||
|
|
b0dfc46603 | ||
|
|
8c3faaa4c7 | ||
|
|
4c0db8bca4 | ||
|
|
0c5c246ee7 | ||
|
|
f871e1f3ef | ||
|
|
658dd95313 | ||
|
|
57fe012bc2 | ||
|
|
d62420b940 | ||
|
|
11bbea2562 | ||
|
|
2172cd60a6 | ||
|
|
2a546cc50b | ||
|
|
7d0031ee77 | ||
|
|
acf49fb38f | ||
|
|
26658a9f1f | ||
|
|
60d99c83eb | ||
|
|
98557e8f5e | ||
|
|
1ba747b3c9 | ||
|
|
71ec919306 | ||
|
|
34a26740ff | ||
|
|
7e76b02bb7 | ||
|
|
519b003fc3 | ||
|
|
9a10462c64 | ||
|
|
ac60ff2bc2 | ||
|
|
f8a855d168 | ||
|
|
74c7733abf | ||
|
|
f63e89813d | ||
|
|
c021e2e8bc | ||
|
|
c3fc40907d | ||
|
|
3fb78ff575 | ||
|
|
7bd97546f0 | ||
|
|
5140ceec68 | ||
|
|
97ad5cd5fd | ||
|
|
4dc65c5787 | ||
|
|
1b012ccfa5 | ||
|
|
60cdd69b05 | ||
|
|
90a66df529 | ||
|
|
c4a1fe4d0d | ||
|
|
8a256de2dd | ||
|
|
c3451a6ce9 | ||
|
|
4b6441f626 | ||
|
|
eb7ab16f92 | ||
|
|
e6ecda5ebe | ||
|
|
095c4d5dd5 | ||
|
|
59d63087b1 | ||
|
|
e1dd532af3 | ||
|
|
cfed4fa1b5 | ||
|
|
7735a083b2 | ||
|
|
fec469291e | ||
|
|
acb47f2920 | ||
|
|
925fba0570 | ||
|
|
1a72733c53 | ||
|
|
2976bf7723 | ||
|
|
7c54cfea3f | ||
|
|
d3f8599d19 | ||
|
|
2034c662af | ||
|
|
ad5151f25d | ||
|
|
f75a8efc0d | ||
|
|
39817c0586 | ||
|
|
168cffb0dd | ||
|
|
194ee7c439 | ||
|
|
5889670cc7 | ||
|
|
7449eba612 | ||
|
|
d85cf028ef | ||
|
|
71c6164c42 | ||
|
|
83601245f2 | ||
|
|
758ec96d6d | ||
|
|
977e539fe9 | ||
|
|
f361450e8d | ||
|
|
0c9389a438 | ||
|
|
f200a16418 | ||
|
|
28ad21c410 | ||
|
|
da835afbaf | ||
|
|
16bccf05cf | ||
|
|
8dcdfea2a8 | ||
|
|
244d9633af | ||
|
|
d281ef9c56 | ||
|
|
58fcb52220 | ||
|
|
8d50fa10e6 | ||
|
|
21d4c8f3f8 | ||
|
|
5fdb916b4f | ||
|
|
18be4a4e3e | ||
|
|
e9136e9679 | ||
|
|
5f9239595b | ||
|
|
47daedaf11 | ||
|
|
52d850f355 | ||
|
|
d1b9f12a1e | ||
|
|
56fd535dd5 | ||
|
|
79d19ea438 | ||
|
|
ec2e677668 | ||
|
|
8e3c6e45ef | ||
|
|
a21a2c9e95 | ||
|
|
25118fb8dc | ||
|
|
06902c667d | ||
|
|
d3ca9985eb | ||
|
|
bd506bf4e8 | ||
|
|
1a05fcae3c | ||
|
|
3c5bf9bf72 | ||
|
|
24457e1aae | ||
|
|
660ed2d9a8 | ||
|
|
4290ba416c | ||
|
|
9e9a172248 | ||
|
|
2f11e55869 | ||
|
|
7f7d18e910 | ||
|
|
e22dd3758d | ||
|
|
8638e2c0b5 | ||
|
|
0fb465ba07 | ||
|
|
09e3f0360e | ||
|
|
7b0ee3adfe | ||
|
|
f57bab78aa | ||
|
|
ce75b2f684 | ||
|
|
742808c6cf | ||
|
|
b4bce7c726 | ||
|
|
9e9a913069 | ||
|
|
93c11e0e53 | ||
|
|
1b9911ccc3 | ||
|
|
be40e1a3b9 | ||
|
|
d4c954648c | ||
|
|
15706a36cb | ||
|
|
5995d2394c | ||
|
|
1c5d533c93 | ||
|
|
8cac198692 | ||
|
|
4dcae8707a | ||
|
|
b8644bdeb4 | ||
|
|
d733f2781a | ||
|
|
20370e2017 | ||
|
|
51796d77f6 | ||
|
|
135ec5f247 | ||
|
|
d4b23e6821 | ||
|
|
6e21b7a623 | ||
|
|
f6cbfd5e27 | ||
|
|
bafc7479b0 | ||
|
|
edf7ed614e | ||
|
|
882d30c382 | ||
|
|
d2694ee198 | ||
|
|
c5a8363998 | ||
|
|
7acd73e1fe | ||
|
|
3485f3b8b0 | ||
|
|
3a06f6e228 | ||
|
|
8661eb356f | ||
|
|
090b71bec5 | ||
|
|
f1e6aba34e | ||
|
|
b996824446 | ||
|
|
0b0d77dd0c | ||
|
|
1a03dad6bc | ||
|
|
5d93ee1843 | ||
|
|
2da1ec8052 | ||
|
|
c949913822 | ||
|
|
cd00b6f594 | ||
|
|
d22f48f84d | ||
|
|
19ac6782c9 | ||
|
|
0b5db9d4a1 | ||
|
|
40368bedd3 | ||
|
|
51b39420dc | ||
|
|
d2ae1b9286 | ||
|
|
4bc7cb6959 | ||
|
|
3584bcba8e | ||
|
|
926d3b9b34 | ||
|
|
817c16a2f4 | ||
|
|
92b6e0b6af | ||
|
|
b3155a04f5 | ||
|
|
df762bbfee | ||
|
|
f79b168ab2 | ||
|
|
211d9bc0b9 | ||
|
|
386bfdd6eb | ||
|
|
ddc8dc36f8 | ||
|
|
99f15943a8 | ||
|
|
581bed2017 | ||
|
|
2d4bfbbe31 | ||
|
|
fb2dfec1f4 | ||
|
|
2e4dee5aeb | ||
|
|
46c04b98d9 | ||
|
|
301499d134 | ||
|
|
4fc0a50569 | ||
|
|
c1c0b53c25 | ||
|
|
37bf6617c0 | ||
|
|
149d3b84fa | ||
|
|
5092093389 | ||
|
|
3b34954e75 | ||
|
|
ec5e59e212 | ||
|
|
7d1a1643ee | ||
|
|
5f80754013 | ||
|
|
d9e4084d6d | ||
|
|
ec8eab21ae | ||
|
|
0cbd8bc03d | ||
|
|
e57770bd0a | ||
|
|
8c2fff3a75 | ||
|
|
4f5726b3af | ||
|
|
424488bf8a | ||
|
|
21c5ba8ed8 | ||
|
|
dbbbc7586f | ||
|
|
fcbb168c76 | ||
|
|
f1bce1001d | ||
|
|
ec77196197 | ||
|
|
a468a12ef0 | ||
|
|
b0dff891fc | ||
|
|
687251fedc | ||
|
|
a04b7c2e4f | ||
|
|
e6d688f6d5 | ||
|
|
8a3b75e9e3 | ||
|
|
a73c539fab | ||
|
|
92efe64b8a | ||
|
|
b1c047b9ee | ||
|
|
ff50ea2e0d | ||
|
|
ebb3fa71f1 | ||
|
|
9ea69a070e | ||
|
|
c59edb3358 | ||
|
|
7e2112b4ba | ||
|
|
cdbbf66027 | ||
|
|
e5e51e66e1 | ||
|
|
6eb022ffa1 | ||
|
|
e776eb5409 | ||
|
|
6a2bb65fe2 | ||
|
|
ecc25d2b8c | ||
|
|
e07a12e6b7 | ||
|
|
840ffc84ac | ||
|
|
ca081e8639 | ||
|
|
e54ac48f9d | ||
|
|
075f1458b4 | ||
|
|
be6481e178 | ||
|
|
ab76bdad15 | ||
|
|
e7723c4d1f | ||
|
|
a688b9e030 | ||
|
|
ff15621fe1 | ||
|
|
4a78071e41 | ||
|
|
113b2cd6cf | ||
|
|
13af8a7a37 | ||
|
|
7131cd991c | ||
|
|
17a08a75ea | ||
|
|
ddbece5988 | ||
|
|
ff658ba641 | ||
|
|
483f7f8177 | ||
|
|
b4c1c1db9b | ||
|
|
70d923cf85 | ||
|
|
e4e8dd4838 | ||
|
|
553492e6e9 | ||
|
|
9554ad3221 | ||
|
|
d2f7240255 | ||
|
|
42b7a37281 | ||
|
|
9a6a70e804 | ||
|
|
9bd3cede23 | ||
|
|
544dac298a | ||
|
|
78ca8c9265 | ||
|
|
a5ce6db38e | ||
|
|
0b53a89d00 | ||
|
|
fdef581e02 | ||
|
|
7c5ee0a806 | ||
|
|
81b553a00a | ||
|
|
816eb94adf | ||
|
|
ec0af98a9e | ||
|
|
a39db494eb | ||
|
|
05678b6724 | ||
|
|
bec91d348e | ||
|
|
1bc86d2658 | ||
|
|
9db6c0042b | ||
|
|
3d70d757a1 | ||
|
|
e25cd08049 | ||
|
|
dccf0992e5 | ||
|
|
f6ed3f141a | ||
|
|
88b2a4c905 | ||
|
|
ab40a112c5 | ||
|
|
329fbff814 | ||
|
|
f31d40cf34 | ||
|
|
49909ce351 | ||
|
|
53ab1dff6d | ||
|
|
b502876e98 | ||
|
|
f474c4b1c5 | ||
|
|
78fd3b8a87 | ||
|
|
7e44d1eec7 | ||
|
|
9a0d579607 | ||
|
|
07e7e030d9 | ||
|
|
a81c863ddb | ||
|
|
7aaefd123e | ||
|
|
7abc1feaf5 | ||
|
|
acf201428e | ||
|
|
daf0b51361 | ||
|
|
1462b7e57e | ||
|
|
ec9c6f30bf | ||
|
|
0b64a335c8 | ||
|
|
1caf2ca96f | ||
|
|
5f9ea35b3f | ||
|
|
7076a1ec4a | ||
|
|
a840fa563d | ||
|
|
803d7c9bd2 | ||
|
|
76300cef09 | ||
|
|
e5ed2cd141 | ||
|
|
a005c77b3f | ||
|
|
8f04c555fc | ||
|
|
0a007384c8 | ||
|
|
022b6667e5 | ||
|
|
022b09f2e2 | ||
|
|
2f6f79724f | ||
|
|
7de161e917 | ||
|
|
b9844024b4 | ||
|
|
5cd6f65a2c | ||
|
|
88083d1000 | ||
|
|
9e712e4598 | ||
|
|
b742511193 | ||
|
|
aaabec5cb7 | ||
|
|
e9c23673c5 | ||
|
|
06d16a24b9 | ||
|
|
ac1a4a078c | ||
|
|
a019a0db4c | ||
|
|
db48e12270 | ||
|
|
9434d66ad0 | ||
|
|
3d0fc8bc64 | ||
|
|
7cc32c4dda | ||
|
|
0214553415 | ||
|
|
b5a913d862 | ||
|
|
43765b5cb0 | ||
|
|
4c09e59b3b | ||
|
|
551b03d6e6 | ||
|
|
c4e64afd07 | ||
|
|
d98a6dda8f | ||
|
|
8aaa5b6ad0 | ||
|
|
f92244d497 | ||
|
|
e91165b0b4 | ||
|
|
e15a0b08aa | ||
|
|
a3a77ab5a8 | ||
|
|
650ec797da | ||
|
|
f629c72107 | ||
|
|
98a20766c9 | ||
|
|
1f707842d2 | ||
|
|
c944f29c7c | ||
|
|
6b5265d2d4 | ||
|
|
8f64eb622e | ||
|
|
4ddacd57c6 | ||
|
|
a66f434236 | ||
|
|
796c336a29 | ||
|
|
48fcb08ebc | ||
|
|
1315a15d9d | ||
|
|
25ebed9a70 | ||
|
|
703907bd99 | ||
|
|
d4e6ce6998 | ||
|
|
390a589afb | ||
|
|
d34156bfe6 | ||
|
|
7c8492e83f | ||
|
|
668163d740 | ||
|
|
7ad2929f0f | ||
|
|
9ce64e2428 | ||
|
|
ae78546323 | ||
|
|
23bce8d0c5 | ||
|
|
29137f7b39 | ||
|
|
bf4a8dcee9 | ||
|
|
7b5f71229e | ||
|
|
4d7797827e | ||
|
|
c0ed43d920 | ||
|
|
842c15856b | ||
|
|
20ac925ee4 | ||
|
|
6e8ff10003 | ||
|
|
419fad07cd | ||
|
|
cf1c346beb | ||
|
|
3e0b20529b | ||
|
|
d9ae2073e2 | ||
|
|
7b3efcf62b | ||
|
|
c7d01fd73c | ||
|
|
2916d183e8 | ||
|
|
c24fb792cb | ||
|
|
70f3fcdbd9 | ||
|
|
ecfdb354a7 | ||
|
|
690fa12f1a | ||
|
|
63c1057ae6 | ||
|
|
1eee1948bb | ||
|
|
e36e9250f1 | ||
|
|
b589ddd774 | ||
|
|
ba01abbc8f | ||
|
|
fa82b09cec | ||
|
|
7d9a706cfa | ||
|
|
437bab7df0 | ||
|
|
ee747ca6d4 | ||
|
|
de93551332 | ||
|
|
0839f10dbc | ||
|
|
d169b490fa | ||
|
|
490cf9b7ba | ||
|
|
95a5b4945b | ||
|
|
4aea039324 | ||
|
|
23181c8629 | ||
|
|
d23b247a82 | ||
|
|
5178474280 | ||
|
|
d5caee35fa | ||
|
|
558ee2678b | ||
|
|
e1f8a37710 | ||
|
|
e07b71b81d | ||
|
|
db92d90309 | ||
|
|
e2eb2fb0d8 | ||
|
|
953960c860 | ||
|
|
7664eab32d | ||
|
|
16cdcf5dec | ||
|
|
3b5ef50733 | ||
|
|
529554f3f9 | ||
|
|
fe9921457c | ||
|
|
bedb16cfb0 | ||
|
|
0921c00094 | ||
|
|
6ef4995329 | ||
|
|
1e0b4e150e | ||
|
|
951c139cff | ||
|
|
eb3e4944e9 | ||
|
|
bb98046608 | ||
|
|
4ac0bbca19 | ||
|
|
c0c34f3d3a | ||
|
|
77c95f3051 | ||
|
|
68d0c09daf | ||
|
|
282f3eec01 | ||
|
|
4a549ffe2b | ||
|
|
d138c608bc | ||
|
|
c7ff3a4f22 | ||
|
|
7a96412ef2 | ||
|
|
5821454ac7 | ||
|
|
90978cd22e | ||
|
|
76a3070fd7 | ||
|
|
1bc0cc7049 | ||
|
|
ff69375fbd | ||
|
|
5b814009fa | ||
|
|
99164761f5 | ||
|
|
e1cd9143e0 | ||
|
|
4d232feda8 | ||
|
|
d687270a89 | ||
|
|
6d8207801c | ||
|
|
6ad117b175 | ||
|
|
f9e062407f | ||
|
|
dca93ce641 | ||
|
|
347aa14a28 | ||
|
|
3621b2ff25 | ||
|
|
49ae42c249 | ||
|
|
1ce7cd5f50 | ||
|
|
305fdd2e1b | ||
|
|
984cb38680 | ||
|
|
bbdd20e898 | ||
|
|
568cd3c9ec | ||
|
|
b09b1f3fa5 | ||
|
|
22bcfd2c34 | ||
|
|
ba440abe2d | ||
|
|
09640ae22d | ||
|
|
719e99ffd9 | ||
|
|
1a4129f5a6 | ||
|
|
de3f112a05 | ||
|
|
3734bfacce | ||
|
|
d5e2b51d66 | ||
|
|
cdd28abc4b | ||
|
|
8c8f59e242 | ||
|
|
b1f9f97062 | ||
|
|
7545d3f2b3 | ||
|
|
bdf8bf7be2 | ||
|
|
b928e9531c | ||
|
|
1f6afa8547 |
18
.bazelrc
18
.bazelrc
@@ -5,6 +5,9 @@ test --test_verbose_timeout_warnings
|
||||
test --build_tests_only
|
||||
test --test_output=errors
|
||||
|
||||
# E2E run with debug gotag
|
||||
test:e2e --define gotags=debug
|
||||
|
||||
# Clearly indicate that coverage is enabled to disable certain nogo checks.
|
||||
coverage --define=coverage_enabled=1
|
||||
|
||||
@@ -14,9 +17,12 @@ test --host_force_python=PY2
|
||||
run --host_force_python=PY2
|
||||
|
||||
# Networking is blocked for tests by default, add "requires-network" tag to your test if networking
|
||||
# is required within the sandbox. This flag is no longer experimental after 0.29.0.
|
||||
# Network sandboxing only works on linux.
|
||||
--experimental_sandbox_default_allow_network=false
|
||||
# is required within the sandbox. Network sandboxing only works on linux.
|
||||
build --sandbox_default_allow_network=false
|
||||
|
||||
# Stamp binaries with git information
|
||||
build --workspace_status_command=./scripts/workspace_status.sh
|
||||
build --stamp
|
||||
|
||||
# Use mainnet protobufs at runtime
|
||||
run --define ssz=mainnet
|
||||
@@ -42,10 +48,9 @@ build:kafka_enabled --define kafka_enabled=true
|
||||
build:kafka_enabled --define gotags=kafka_enabled
|
||||
|
||||
build:blst_enabled --define blst_enabled=true
|
||||
build:blst_enabled --define gotags=blst_enabled
|
||||
|
||||
# Release flags
|
||||
build:release --workspace_status_command=./scripts/workspace_status.sh
|
||||
build:release --stamp
|
||||
build:release --compilation_mode=opt
|
||||
build:release --config=llvm
|
||||
|
||||
@@ -233,3 +238,6 @@ build:remote --remote_timeout=3600
|
||||
build:remote --experimental_remote_download_outputs=toplevel --experimental_inmemory_jdeps_files --experimental_inmemory_dotd_files
|
||||
|
||||
build:remote --remote_local_fallback
|
||||
|
||||
# Ignore GoStdLib with remote caching
|
||||
build --modify_execution_info='GoStdlib.*=+no-remote-cache'
|
||||
|
||||
@@ -1 +1 @@
|
||||
3.2.0
|
||||
3.7.0
|
||||
|
||||
@@ -34,19 +34,5 @@ build --flaky_test_attempts=5
|
||||
# Disabled race detection due to unstable test results under constrained environment build kite
|
||||
# build --features=race
|
||||
|
||||
# Enable kafka for CI tests only.
|
||||
test --config=kafka_enabled
|
||||
|
||||
# Disable blst for CI tests.
|
||||
test --define blst_enabled=true
|
||||
|
||||
build --bes_backend=grpcs://builds.prylabs.net:1985
|
||||
build --bes_results_url=https://builds.prylabs.net/invocation/
|
||||
|
||||
# Disable flaky test detection for fuzzing.
|
||||
test:fuzz --flaky_test_attempts=1
|
||||
|
||||
# Expose test environment variables in CI
|
||||
test:fuzzit --test_env=GITHUB_REF
|
||||
test:fuzzit --test_env=GITHUB_SHA
|
||||
test:fuzzit --test_env=DOCKER_HOST
|
||||
|
||||
22
.deepsource.toml
Normal file
22
.deepsource.toml
Normal file
@@ -0,0 +1,22 @@
|
||||
version = 1
|
||||
|
||||
exclude_patterns = ["tools/analyzers/**"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "go"
|
||||
enabled = true
|
||||
|
||||
[analyzers.meta]
|
||||
import_paths = ["github.com/prysmaticlabs/prysm"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "test-coverage"
|
||||
enabled = true
|
||||
|
||||
[[analyzers]]
|
||||
name = "shell"
|
||||
enabled = true
|
||||
|
||||
[[analyzers]]
|
||||
name = "secrets"
|
||||
enabled = true
|
||||
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,8 +1,8 @@
|
||||
<!-- Thanks for sending a PR! Before submitting:
|
||||
|
||||
1. If this is your first PR, please read CONTRIBUTING.md and sign the CLA
|
||||
first. We cannot review code without a signed CLA.
|
||||
2. Please file an issue *first*. All features and most bug fixes should have
|
||||
1. If this is your first PR, check out our contribution guide here https://docs.prylabs.network/docs/contribute/contribution-guidelines
|
||||
You will then need to sign our Contributor License Agreement (CLA), which will show up as a comment from a bot in this pull request after you open it. We cannot review code without a signed CLA.
|
||||
2. Please file an associated tracking issue if this pull request is non-trivial and requires context for our team to understand. All features and most bug fixes should have
|
||||
an associated issue with a design discussed and decided upon. Small bug
|
||||
fixes and documentation improvements don't need issues.
|
||||
3. New features and bug fixes must have tests. Documentation may need to
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -33,3 +33,6 @@ dist
|
||||
# libfuzzer
|
||||
oom-*
|
||||
crash-*
|
||||
|
||||
# deepsource cli
|
||||
bin
|
||||
|
||||
91
.policy.yml
Normal file
91
.policy.yml
Normal file
@@ -0,0 +1,91 @@
|
||||
policy:
|
||||
approval:
|
||||
- or:
|
||||
- only test files are changed
|
||||
- only proto files are changed
|
||||
- touches consensus critical code
|
||||
- touches sync/blockchain/p2p code
|
||||
- large line count
|
||||
- is critical priority
|
||||
approval_rules:
|
||||
- name: only test files are changed
|
||||
if:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- "*_test.go"
|
||||
- "*.bazel"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
users: ["bulldozer[bot]"]
|
||||
requires:
|
||||
count: 1
|
||||
teams:
|
||||
- "prysmaticlabs/core-team"
|
||||
- name: only proto files are changed
|
||||
if:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- "*pb.go"
|
||||
- "*pb.gw.go"
|
||||
- "*.bazel"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
users: ["bulldozer[bot]"]
|
||||
requires:
|
||||
count: 1
|
||||
teams:
|
||||
- "prysmaticlabs/core-team"
|
||||
- name: touches consensus critical code
|
||||
if:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- "beacon-chain/core/*"
|
||||
- "beacon-chain/state/*"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
users: ["bulldozer[bot]"]
|
||||
requires:
|
||||
count: 2
|
||||
teams:
|
||||
- "prysmaticlabs/core-team"
|
||||
- name: touches sync/blockchain/p2p code
|
||||
if:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- "beacon-chain/blockchain/*"
|
||||
- "beacon-chain/sync/*"
|
||||
- "beacon-chain/p2p/*"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
users: ["bulldozer[bot]"]
|
||||
requires:
|
||||
count: 2
|
||||
teams:
|
||||
- "prysmaticlabs/core-team"
|
||||
- name: large line count
|
||||
if:
|
||||
modified_lines:
|
||||
total: "> 1000"
|
||||
changed_files:
|
||||
ignore:
|
||||
- "*pb.go"
|
||||
- "*pb.gw.go"
|
||||
- "*.bazel"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
users: ["bulldozer[bot]"]
|
||||
requires:
|
||||
count: 2
|
||||
teams:
|
||||
- "prysmaticlabs/core-team"
|
||||
- name: is critical priority
|
||||
if:
|
||||
has_labels:
|
||||
- "Priority: Critical"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
users: ["bulldozer[bot]"]
|
||||
requires:
|
||||
count: 3
|
||||
teams:
|
||||
- "prysmaticlabs/core-team"
|
||||
218
.well-known/security.pub
Normal file
218
.well-known/security.pub
Normal file
@@ -0,0 +1,218 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQINBFpZUO0BEAC/tqN6QctJKSOF+A7jrBzvNvs6rVEi9Hn55u/scKNIPkSRJRTy
|
||||
8kVsDMha+EKXka8oQiVM+sHoHMIMCa3tdicm1/k5f67X8dqadQmFP4DhYRYIKCKT
|
||||
TkntNG568ynf/yUs/YY9Ce8H17JvgytkLK50mxMxycUlYREMaRPMR8Wt1Arrd9QT
|
||||
U0I2cZemeqUORKuiVuZjj/7BVDHvSXHvzpi5zKI86sJQTnzcuGqyNsrUU4n4sYrb
|
||||
I+0TfEHzmSdxoAXSaMYjomPLmbaSdBiK/CeNKF8xuFCKJRd8wOUe5FcIN3DCdk0e
|
||||
yoFye5RMC+W09Ro+tK/jTQ/sTUuNLJm0VUlddQQeoGlhQdLLwiU4PJqcyeL4KaN1
|
||||
l8cVml7xr1CdemhGV4wmEqAgxnNFN5mKnS2KcDaHxRz7MoGNdgVVQuxxaE0+OsdJ
|
||||
zCKtA12Q/OcR24qYVFg5+O+bUNhy23mqIxx0HiQ0DsK+IDvcLLWqta0aP9wd9wxG
|
||||
eObh9WkCxELTLg8xlbe0d7R3juaRjBLdD5d3UyjqGh+7zUflMsFhpUPraNXdKzvm
|
||||
AqT25cveadM7q/CNzFXeCwmKjZab8Jic8VB80KcikmX6y9eGOHwjFixBogxowlBq
|
||||
3KpeNTqJMNHYBzEb0V18P8huKVO0SMfg11Z1/nU4NA4lcjiVImb5xnJS0wARAQAB
|
||||
tCxQcmVzdG9uIFZhbiBMb29uIDxwcmVzdG9uQHByeXNtYXRpY2xhYnMuY29tPokC
|
||||
NwQTAQgAIQUCWuZ7uwIbAwULCQgHAgYVCAkKCwIEFgIDAQIeAQIXgAAKCRBy4z5N
|
||||
8aUDbtchD/4hQDTj7qsccQKpaL8furw+OWuoZtX38smO9LrcSoLaZlk5NRxFNlhQ
|
||||
FaL6iyDLoDkWUXEPg94x1dMFBUfMdIuh6iEF2pcvCN6WG3Pd2j2kG6OxaIx+rI7a
|
||||
eaD2Wds312etYATu+7AI6pxlQVqPeZi6kZVvXo8r9qzEnl1nifo7d3L4ut6FerDz
|
||||
/ptvIuhW++BEgksILkwMA44U7owTllkZ5wSbXRJer0bI/jLPLTaRETVMgWmF5L2n
|
||||
PWKhBnhBXf/P00zTHVoba0peJ/RGdzlb1JZH+uCQk0wGBl0rBMUmiihIB8DZBZDX
|
||||
5prwMdoHG9qAT9SuJ8ZOjPKaBHVVVw4JOU6rX2+p9E49CVATsdoPfWVbNyVRGi5f
|
||||
Oo0bJPZU3uO10Q09CIeyqT6JPDCZYS7po2bpfFjQTDkoIv0uPWOsV5l3cvFxVlcD
|
||||
Pvir4669xhujmoVBOrp18mn/W/rMft2TJ84inp0seKSKdwBUeEyIZwwu1YTorFe4
|
||||
bgJghDu/Y+K4y0wq7rpPimoiXSoJOaaIPrOzsKAVu20zJB4eoulXlPwHexix8wwf
|
||||
xeVH4bGl3wtTFWKja0+EQVdxpt+uUlABvrheQbNlNGz5ROOAtjvwASI3YW/jVVaG
|
||||
wSbOUuMEHfC1rSnj5Y9fN6FbilxJAZ2UbvZE6iXqqemfRdFuB5yedbQmUHJlc3Rv
|
||||
biBWYW4gTG9vbiA8cHJlc3RvbjkwQGdtYWlsLmNvbT6JAjcEEwEIACEFAlqrvbMC
|
||||
GwMFCwkIBwIGFQgJCgsCBBYCAwECHgECF4AACgkQcuM+TfGlA269zg/9FynobfLE
|
||||
Vh+Aid7l5C6pprHflHpdNVhdHzjvosLKfPMCcBJLyCLAIueKC0vvKbt5kyNV6Adl
|
||||
6fibdO/vRrNsEdmKXTdfDnN03yVX+VO54rqyuweR09CbLHakECh5awWpk0x9E/Gc
|
||||
3MlU8YDCVNPcWAw8JPZZ0y6s50/MFFXya+/opw45neH6WXrbqbIapzZ4V1bhkloF
|
||||
TTz7LYp1dk6qjus/f1jHgujclJ6FazG+OqCpzJ22lnuwNYs8sv1DclW+lEUfIin5
|
||||
PvzcSRCCd6NilFRSGzfBhM7wxZrAp0JzWXpM1jmd2WHtgErusTaTXRTATjPf9Chg
|
||||
SE9UT3EJvJ5fPxuxm+qOAowpJwe8Irv+YuMzL8C6C2BhZIV8XID3/cErNeQbPocj
|
||||
QFksmBEwpe9EG3Yhd5SmSXRhCtBFnyFKyOqPpAE2s+U0b7qsnWc50UFbIytPEm4C
|
||||
YmdNL6IVilp/+LYFcRWWK/ppOtvtgbj8r7+Foidi/tCauJGt2BzhWH7DkLEdXGxk
|
||||
PfR/rTgyuAQZowl03DaNwAPKegY2SIuROO9kpQACwTWZNg4l2yrZv09qrKBhohyn
|
||||
b2i4pwHPZ5/bWLikdzENcJuvbH5qhf6tV0FIaNGbTkWX00uI++bvWAxxE25jZv56
|
||||
e7VTaWuMt8Ly4DBcwl5JyWuvZ74+yv7QGp20WlByZXN0b24gVmFuIExvb24gKDB4
|
||||
ZjcxRTlDNzY2Q2RmMTY5ZURGYkUyNzQ5NDkwOTQzQzFEQzZiOEE1NSkgPHByZXN0
|
||||
b25AbWFjaGluZXBvd2VyZWQuY29tPokCNwQTAQgAIQUCWllQ7QIbAwULCQgHAgYV
|
||||
CAkKCwIEFgIDAQIeAQIXgAAKCRBy4z5N8aUDbmvdD/4jkT1IXJyj65sgHus0HHYC
|
||||
MBp6mzvtpDcPljg/5Nl1WXydv4zYGnEnIwWIqDYwwvokOKllxAjgevcplHqZa0cb
|
||||
7XKCFk5h+56FLPHv9e0WK1fOFeo2ad3imNRstSHgaumGAWHJMg9vWbDisGv1zjQ0
|
||||
usFkrMKoX34YzMt8KBD7IuQyeBkYNupT8EfByeA9Ta+wkvS+X6BojsFB1UWDAFCY
|
||||
z8RgTcWIFjWtGZwIkWUKCzebqXbElpJF8ckZU9q4qVhyZ2DT+8BS/Lm0Sf4z6VTC
|
||||
4EN10ZuN+F+J3DMR2Zuudp4X5OUGkDG4KuU/kvj6EJRWpbTS1D9ReK7hoApIbV72
|
||||
Um6Mf7kC9EKvxgL1gOfl4aj3Io9M0R8FT/0WSMduQzf/jI3XqdNH0nYo2kTDdZm8
|
||||
S972cyvt6frKYy6RsOlPz1+S61iCmupsRY+HyKDTa0vnpXg2c4dE1neF5eMI6SVw
|
||||
viJvCG2cflctJ2PLiINZJYAg87gV5Rm1i/Lt2YG5zdxAXXJt2R0uuU9fv4DcHZLU
|
||||
yx69Yuh+5UiEaXYU7xoRCdZJYyHbvaC2hPcDGnEa3K1QbgnI5hGAtG3cwdbYC5e3
|
||||
bcaJb/LdwzkdRnHLgpxa/UTAIfejEI1U2kvVvNoe/HvEXq/OPrhFDvE4rW8AzbX+
|
||||
ISTWWRY0lLSr8/SD0TDJMbkBDQRam2geAQgA0kESOibOO3CcXrHtqP9lPGmj6rVe
|
||||
G18fRmPDJiWtx863QqJeByuuUKwrGkPW/sqtIa5Ano+iXVHpk7m955nRjBmz4gd8
|
||||
xqSGZd9XpNObYPA5iirAO8ztpgQvuvsHH9y9Ge50NnR7hQSMUbGVeCUU/vljwT60
|
||||
/U+UPnsTAmkhvkEI72x50l5Ih9ihcBcr5jJpi08XktE3sFOoannac0kZQJ6WXFrY
|
||||
4ILbv8fVqcRf44xMKOlFB9qHhleGW0H9ZUjTKs9quRt7r5D1MOiwrZDjNN3LqMco
|
||||
oWj37hb+3KkmIIsAYB2DjnWYkMPu2B0O4uSOHYAWfEJtRvA8qW7sWj+q1wARAQAB
|
||||
iQIlBBgBCAAPBQJam2geAhsgBQkB4TOAAAoJEHLjPk3xpQNulz0P/2m9veAAGGCO
|
||||
yPsqiNVVa8lrhmLGh/W+BaoszQ/r+pfin4xTOV5K5h3MC5CVJM0JxP/cxNol+Nmr
|
||||
cYGbZgq4QhLlH6PdQ7cReL5ED6Wi+eHb4ocvXJqUuZ2Gl8Z7gzQzp+WFbLrn8vXj
|
||||
LcyGGEETV84jy+X5cKu24eAjTFK+unfqwfxXFZP5vhIEVe7+uG6A/pMB5eLDqEUh
|
||||
FQUcWqCF2Imt08Kcn7HL31GoIY0ABHdD+ICXZE5lTm6iJGzpFBKdTMm/e5igCJ3v
|
||||
/tgtWZbnvyseLR/T/gU1JyheS9kNXP5sICwicBbY/vnGdEP6OKpDOSfQam5x4BBj
|
||||
cvsBnsNuonY7OJn4iLY6LviQ0MM91PbaJUUJrp9Uyi4hj9iO/MZBaG0Giu0FKjG6
|
||||
Vk+RlCYPjYIvHflQLZHe9BWLPN1AvaLKszt3IYaxS5seXCu0ZqHDGCBVqVCNDDJk
|
||||
MJbHlrOVZ9T6mZhA+aQ1EJvTWk3MNj1AOyCJdiXtOOdg+4Fm5dN7nLpumLIg2yP2
|
||||
afI7YfrPGA7sm+T0IMnOWxkK+KODC7OV9h/QjyBJDcUYGXLapuK9eP80cr8hKvH7
|
||||
S3G4Top/rXDpnBNQ2azCqmUwaJWNRhLucC80Vd00d4pWIOwAPWpiV70Fq8OhQFhT
|
||||
PNXwFXVLwtNfPvPxN1s+Vk+BBXp+M19AuQENBFqbaC8BCADSq89Z9xWRS2fvWI/N
|
||||
+yEWliIU8XiqC9Ch+/6mS2LEyzB1pPLIIQcRvM6rq2dxXIRveVGpb63ck9vUtuJG
|
||||
//es+DnDkO7re+ZmWHX+LAqMYNdaobSYxHkkR4CcY2HbPSEUbb//Zwk4BDyp3g3W
|
||||
bKK9noVbslZuSwWNrxjX/Hieh/dIGYkNFeWOlmNfUYsevzqNDjsziOagyXKxLc++
|
||||
hUM3GKgzXRQJBvBpgzQc4bRY+YGHXtZurk9AiZ4ZBhWv2Qrb5OYYislE9sdv3KWV
|
||||
Iv1EBpbkAJ9MM1HgS8bkIOIpNs4XxHY6fTWWdrXi+NgZXQwQRYaWTQsXL3mktarS
|
||||
fFfTABEBAAGJAiUEGAEIAA8FAlqbaC8CGwwFCQHhM4AACgkQcuM+TfGlA24vqg/8
|
||||
CsVBHO4mh8SSaxdWNEU+mG4pU230BRCwrfn42wuSKb7WNLTTcWMDNI0KY/JNcWSq
|
||||
G2qa6lngiAyOS72Jd635ptZ6Wvjd0WtBt90NN2jtn+aRqwQ8uItlYMQscofFzskj
|
||||
QnBF+NWER+44nxboghuQ041m6aI2XmYUErSOBZi6onbC3svH6coMldkkiueWFbdB
|
||||
qac3UXue4LUcaGR5T9pCQsLgTl3D8V5icEM+HpTVVGQZkVrOuKMKe6T9N5FS/WFu
|
||||
T6CuFf/OyU15a6RE4WW9QqKYsaHl8B6+0P7uqPoVIxs8hfJcwaUu9XwIiZYBZg7N
|
||||
yYCQ7JBapC5KZlIcTCfFSxby8lCJoZbIM3Pk/pgCuKxGaS9rHHUGuIvz8TfnM9FO
|
||||
2zlxl4E6k3NFgGKF3p6oiKayC74o6EOw50p6DMjrisG7kkWVsTRCMINF7CcfeVme
|
||||
MI9ljGAMB1hPtHPhl27hMRfq+/iIbR9gb7+Xw2yKQL2QRjMDIGGxP3q4NjD4tV8P
|
||||
VyTbAAwNARG8oMpNM628v3tsW+WYNot1SPUQuZbIx1pCwMeTqljWsdQxWRVW5UxM
|
||||
dnXTCqZhQwH0ICG/jbepbP6ciVB/CSa7TVohEK6jiTMorhqynRMMJ6p48Z6HIlyY
|
||||
0Ss8q9K29eXaqmLB8Oy3HmupqxH95TqMntqivzf6i6e5AQ0EWptoPQEIAL1OdDIl
|
||||
7E3VKAEWH5GnMzdnl9bju/ovoMjLqGevHS9Gyz4OPyZvCQ2pS8k0sgqwsn4F8vWM
|
||||
7L3xKTyQTSYOPby3do58+kxUrdTNlqGKEEcZDG+MSzxKyft7m+37fzbg6tcU+O3L
|
||||
/7m8nYWQSRKJeist7Q8HrQJzehuzcgAnNmpeBqXHnAwRBvtqORvz5cQAIQ4EsEvP
|
||||
f/unTjw95JtL1LtBOaOaynF9ap/TZ34OvDdARmZSdqPpRtEvjfgIboIYYt1dNmDH
|
||||
kiSaaKaqBLCJTD2z5KT8ccDeF8lzYHAYzNy8v2lTc9vagCZH+lf3d2d6umNcr4y1
|
||||
NGEN4ZEhrmt/lP0AEQEAAYkDRAQYAQgADwUCWptoPQIbAgUJAeEzgAEpCRBy4z5N
|
||||
8aUDbsBdIAQZAQgABgUCWptoPQAKCRD619WmVzqkJDTvB/49MQNQk8YJTwAnbdSH
|
||||
7stU2uQFBbkljE8Juz5WJK53lL6xUVUp/3gKrQio1F+z9uRVqRh0cQnqX6mPMe5a
|
||||
2dlHEIDHTJjSlR5GCCBRDbssV6SN72xSVgbxVGZ9L32qUYtiwGnxwXQC9D9KsonD
|
||||
YfGfUhD1CLAldr6HwhJkOq4QKz5GF4ty8sMKEcpM5UaR2sa2y6Ebn9Vzma10YaVp
|
||||
X7RlZM/iTiDhTmWuxLh7e21DI95k/eFqHpKA912N0n1BdzZPbwk83nVRxXg793NU
|
||||
RFpegzlLawtaCW9oVJOYqErMGOYN5nbXAHXsr5X7Or70v1Yo1khgzNOfnirO57T9
|
||||
VjT0J1QP/j1xobgMuNda7Fpwc0xo2oIKEf+SWY9GQrkUK7wCLDbpgbGVxTmREkyE
|
||||
6oyDzW1QpQXRjLS9Wvtun0J3Wn6r6Aar0VaGKa7uiiq8UORWtFkWQAzzyBj87Rjx
|
||||
eWZWV1dzLK7eMJdyN0gsOzcejrsOqf1sydzvhm4K66byjDZ78piv0DdyPIb4OsiQ
|
||||
QU2GH+QwGRYIkYlU9f9g+hSasAfzvrATHlJZNrOjOCgXbut/yP9ug3DHKj76wmoU
|
||||
n/Y4rpmskAzIQrZIpimkpNBmZVTGr4bkWcokVzrRFor3NCpl1qA1K9Cd43wARH8t
|
||||
Zwk4evI4abbqUId0vVNCKSSDyCCjgNwRsmU8RXdn7r0vs4ObKuWfY9Yl2y8tq3qO
|
||||
YPHr0r50YXWtKqUNy5JUc3Ow9DFR1p4O4yfmiSyTLUyOYbglfvtnO32OJkrrZ8Kq
|
||||
iSDnyiq9u2nYJAEHk7AchF5TJrTCnd8yWNIjohild+wc+rMKktspoEcxmT6zaK4T
|
||||
AymmEe3jzQhlxptpBm68t2E5kaYFQh+NnizXlRcmFjBgEhH2CxCoerUpK82Y+MuE
|
||||
S/ODmF9kULre14aXAAspj4vldQ/LJm0SYEfTkE1sDipJQUzv6oS5AFloQDXZPWTB
|
||||
15P+Xsj8sJV9hWCfJZVmppWuPg/pCYXwdjUHUYouTz3ZL6qnUbm2uQINBFpZUO0B
|
||||
EADZkfsbXPpDcMALUgVmB3cJU90tFqc8Q5guK9oSs3ibx4SmyhBVmeF2TF6PQNoK
|
||||
YvpUR50hAcx2AbwtY51u0XxrAr8kFiL6R5ce/0pVMfXGchcC58CJ3uf+O+OPt080
|
||||
ftyVSTsY9xEnBohMoJescn0L/IPaM6KkkIFIMAI4Ct3QCHox7WHgPLrqB7Efx2Dj
|
||||
Qkgjbioqj8zVKDwxTs0S3sknch675gOhsCkaI7KMcRGACDinKRF3wQha4brNa8En
|
||||
vPTV01Cv0ttCo6NCcbQzQi8QQYpMQcWVkquEJWweZQvL/OvYWdT13JgnIp66pkmo
|
||||
+JvGTOqQpSnIx6OQnV9yqwqsg4E0dkCE+9rDYAHpwvmRkaI2sItjN4KAEQdTWES8
|
||||
RgGVgfCtvNH87PqpaPIgarMDY/j5KqTDp/7Nc5oGLCmhwZiYzQa7Bm5uVNnYIyOO
|
||||
d1IjfclgVdVAzMOmrFytvXFCBcga1khL15taC7s6Vuld89TgMZdSot2RVz8W8Xc+
|
||||
39ZrBvzrCeYsPq5/U0Z85cnOSw4skwh06wsxTvL1D70SilI2c0YdR1sVgbhq04HN
|
||||
7FyE7SDQ1GqxyTJAU+OPH3Pk97Bl25vWD43RCCIjSUHhKzQRPno2ItObFepE+QJH
|
||||
lSO1YMXmZDAfsRts3dca3VSDOdAQely6G7HQu5kXWXGtRQARAQABiQIfBBgBCAAJ
|
||||
BQJaWVDtAhsMAAoJEHLjPk3xpQNuRlsQAKnkyjjXRvfMB3eKZ1PrWf7DBx5WL8Hk
|
||||
r2QAnv0diDeRTzotQyzKivf3/W3gQc9kQi/ilaPuuXeW+yKiNQaIbai7EC0DdyHa
|
||||
qG9s8F7UDoojFOGCc3OVpQvuucVbv4a6EpqXBI6ayrQW0jMXakiuIF/dL5uCGRLn
|
||||
sPMB87xJfMrkqEziymJMooQTRckgj2S9J2QYDZFxmKChPl1kXMlmx6Y4FhzrsYwo
|
||||
I47hW9hHG1+8129FOgwYTwELEuX6FKShcKpyy77b4Tar3UvdzNkfaysFPvX3O7Oh
|
||||
Bes2VgfslEZSgK2CScigvLIz9Ehe9CUw6WEC6LZW3bbC+Cbz624gOsm/GYI9Llsc
|
||||
5/NMMwQTCoTRm+b0UAYQvzHDS7km9yceNSfFlkpE/lWnIt9E0H+8bOwEbYF8y2qy
|
||||
yLXIm7MYEm4UnUZ0j8kihP+wSHsZP2xPhjEUcQw1ZWhWLACvzlEC0a3zsovoJ6U8
|
||||
zrqqfEBtuQgfwwJRWArMLQn/rlEJSrTrFfehwhvH3dPVSU36N5nBA8ls5YlSHBQv
|
||||
38dChpBXu3wzFhetkSuHxdlfopeZMtDmljnIkBNTEFg01oqJNPbyiX2JQcfKizCt
|
||||
maCIiJY+hOYIKkJdkt1FyOhADBciebxCQrpIIzWupeyQNuVj3I4g6YaQX00+kgiB
|
||||
H1XKrAg/dpMNmQENBFrHiEoBCADASg9zHYzaSU0/1q1hcmTHU6H4syCQXHHd3zF7
|
||||
n/RcsGnt4RBuUi/BUvNp3zYR6uFOyyk6LPV1hq2Ca8e/oSFrDYxqLladESQd9GNN
|
||||
stDeK3HinsWJCVwSbkzNJbUtyr6SclmWt66vNqBZngMallJRQe8QDqpg0ZSZj/0d
|
||||
VGxPBR16zc/2ddGnXJFe/V5XAWAap9SEo44pyGK4xf87Bgq8jT33LuQtd8exOk3E
|
||||
atkK5jLEn9xmiheoSePEhOoQSrJMHfMjFka0PYZlCeaaHw7r7yXb/VoHFOAPxb6k
|
||||
a1cunbp39b4z7Jy9kLBy0tbDnAs/sLp4LUN3Vx1JLoXBSIsHABEBAAG0JFJhdWwg
|
||||
Sm9yZGFuIDxyYXVsQHByeXNtYXRpY2xhYnMuY29tPokBNwQTAQoAIQUCWseISgIb
|
||||
AwULCQgHAwUVCgkICwUWAgMBAAIeAQIXgAAKCRCVRSpwGBD+26lmB/wJxChWwva0
|
||||
StjMRSk3V7JTBPi5RNQm3VY8UydUNjsXzHVvtjUczOj+zHfo8tYUYa/ypWujXEX9
|
||||
bVYVMr9JGNjm+rysI1gmW1gcx9pZr9gde4CR4Owfpwjh8oFnSrBBrcaelb0Krrv4
|
||||
Okms43fEw1bWpllayal5SqknOIxpw1ZiNpG3jVe/C9n1/3Nw4XF5R0RHDxXTu6Hu
|
||||
H3t7zRQwAJcOod6ccRdzNR9CsGdnOoNPG3pqs9w6zWUp0QETMAMcEpSLCJumaVfQ
|
||||
24PK+MF92F5JRwNVt80xSLA9KMyNnv/ZvxZXXLjkIhsAmwYkHUs3WSm5HJ4qqOCX
|
||||
V8VwFbwnea3eiQIzBBABCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl9zs2AA
|
||||
CgkQcuM+TfGlA24dMA/+N31SPAXVgwiNasBknb+YSq2/OQPogxQUc8tupvDcQ62H
|
||||
u0kvA7pUPUFjITJ1xsm8CRXfb7Hge9rNUw9Y2MNKf4sIDQs3bFeKOiAGVbO8Z055
|
||||
5VCTWWPhXzjWoR84YWrx0Go8WT/V3Lahy4frGA3Vsza6wzi2P6c7LF4jqX2iBD1l
|
||||
OpAYNGyt0sX/RLp3s2jOTWJwVyRR4UKSZOgpi9OTGLXrq5oU2dpwEIzBmhaWIs+u
|
||||
oD5/4TaAt4lEFu3Sxk5w8fJlUXbM4IG8A1l+dnRPF5rtjtfvuX0GeQcDtJ5e1qHj
|
||||
7PvVj8HjGPwxqsAjYHpg6QjyQCdtHYHdboEIU+OXMYRPRh2Iv0GUoeuMqoSsvFgn
|
||||
c9PGN8Ai5u+oNzKeLJhpxpLV7s9zAbsripdnvDBn7RwuNx2ziqZayxoYvFRCAQl5
|
||||
wzr0/eo/wq36D9EI7uJ2I3yt1g/VkwWQsAeE2skuGGbwed263cWTkl6g1w+vlDY3
|
||||
/jc3a8HcS0zIUX194ChrbbNezGb74mQFoLk6PksLfhXseAKCs8bvPaTwIm0JGTMT
|
||||
YzkRufrv1+I9KPFy3fTpvnMZTbud4nPCLsK179whk+Jrdv866i+E2WZ1JyzIZ9bB
|
||||
P1x+ABi82PMdzhTw+pTkR1CVHmrmOiSi2MHRYGMedM9ECGnPIdab5d75r1qkuvO5
|
||||
AQ0EWseISgEIAKHrgTVeZ0CWZMETk7NFjDTvBpoTMQDT7DDe2vFQHc8D2AvR3Aod
|
||||
+CUaOeY0Yp0GOemd7BEcUbpYbCQk1B9G8vHNcQqTMLjBgujXwF6w2c+lqEu2/tyI
|
||||
2/uGCOBDs4wuQZo+akCCMekqYjdZBfZV6nQzf/l7eQHIq+sNtnSiZmHdH46PLi7/
|
||||
17wR9GxzKvuv1R73DCyekCcz1Puo0b7lfGD28kESJK1Mg9SAOqVjtaS58Oxo+Y1M
|
||||
ZWRqh0tkAkgOBpdyddGy6TX/9c3a0U3eBQweRpNDh0eCEh5UsYDluL4NtXj7rVYd
|
||||
4mHONJzI3h1LnKWvpVYmk703MPmtgeJwNzEAEQEAAYkBHwQYAQoACQUCWseISgIb
|
||||
DAAKCRCVRSpwGBD+25PxCACNBj/2HBSpdYAxEGhNHSaw62y7Jwuf7NbsKIAqygzi
|
||||
m2+dxae7PbAm64jEXYJA5GaCOs4xO52S/2+1N87e5J86VRNg0vlA9/ivFzERAxEg
|
||||
rgIUyGXYfS8oA/4r5+PfKt/NvXO2wH3MPakrqZqXhOv+1IPvOt03wWoZKmYyVT6g
|
||||
YnzutOsvH6cbhUh/D0WpuU8ZgkrFu5Xe7ynZoLm1qQOA23pkxxQbeNs0uoKUja9v
|
||||
bx4eBtaliLc6rsXt+1WzdXA5ZRNqkdn87Tz5IU0FuQYVblYvPz9QoUlvx5siWVaP
|
||||
Mmc7dIctWaWyaJmgjkLKdy36ydS5axhqn158yIjOZV3emQINBF8h6lMBEACovC4z
|
||||
oieJ9iMZpWfylsLQKkeEmfSXnjcjW4RLUjs2CRWj+W6H7eCRy/MBOdx+6zAb8TE/
|
||||
n/TSlP5l5Xx40BGDAMUZVFrJVEkMPK5kUmNzybg7PiuMd3qZE+pNyHEXXlLU77Dn
|
||||
VO26TD9RvpKXdjm+ATsnQ6rvDNszkYI2Bj1tPxwZ7bRi96U/upL3WfYOsTLHirM3
|
||||
pEkI3zfMZj8ufDX9XlmGrQ384E/hTgjpLXmDm/jMRlX3mRzDkV1HO+gEicfePm5+
|
||||
K4eWlMCNXN5bcGwoEFY2LwAojRcbRaH/UH2S3btkG2eSK2fOFEwQ0G4vIyoLF60R
|
||||
cEk1s6cYIgk3kVsmNSzA5iJ81nD5bbfTceUKsjTZiw5RmN0Vh5g75pw+3uoXB+55
|
||||
egabEIt/GTZZlP6zhd/CKjTQR0R4+ZaEbZFOCtABukC5xfWGCRdNmXAWMBe0MYpW
|
||||
ub2TRLISLfNNc7GWM4Y17d9aRhaql9gY6QLIRNGYuvGPiRMaJADZx46LtbWo8YiG
|
||||
xLId7H8D+/0MSzMOg7RhELqYScYDigiVEXkTHA3QSprf/ohjm8woRhQY5CjCEZp5
|
||||
8MvhD40VAo4Gxgx1V8lwB3CD3kDgFEaUZh2H+N/fmRegACN2lzEm1krOiS7sAB48
|
||||
/Av99/1VXalLoBbzFTnuAYwnmLk8vdparT5LlwARAQABtChUZXJlbmNlIFRzYW8g
|
||||
PHRlcmVuY2VAcHJ5c21hdGljbGFicy5jb20+iQJOBBMBCAA4FiEEMX1ukQWPjzwj
|
||||
A7p3VjE+RFgSl6YFAl8h6lMCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQ
|
||||
VjE+RFgSl6YJgBAAqIAMzMFf/+qE896kuXYSlmaYtzhrMXgQVD09UgSq4JfVYudE
|
||||
e8EfamWz8RFV7znxHO/5Fp4yoWeDGc2b40DhivvRb1p4XSJi+F64moeHt+qn4Hay
|
||||
CDs5wGv40KJk0+2r7iILc3Gw4NP6r4c2HU2EqSW8fn/bYM3yxLsYEXt5zhgipKpR
|
||||
CB4WmqojW+CxXkj8dv4CyY+HlKJ6nZWstK42h736njM44rjMDSDt3lv0ZYykopTw
|
||||
09THnHkWOXY4PSrKOzE+OqsPMPOD3Gq3bYPqQ0ZgdS4FeuDesqqHjJabRX+7DgJK
|
||||
lLEnylfDlvDKf6I/tqrAfqCCdQvEjpdQXG84GSdK//wT1biunHvGeyHM/PZx9V5O
|
||||
X8YoecswcYjozU1F7x8buk8GxV7ZuJ2pbdXr7jTou1dA7e4Nas9Qu1RcK39MoTYc
|
||||
Ub467ONFCX+Y3MzELTnEyx4hSwTxlWg7VjCQ63zrFOd8VG0WUydlqiHgIE8WCJVE
|
||||
pQlrr2v/A6ieM1HY1Ftjt+mk/qncqibCdyIofyV4o5FrVo8arx74jR+Lhvkp1T9N
|
||||
uRj8V6M3g9vxJdAsy1O5/ZrdwKJ6Yy1n62+9b/uxhklYONK7DpDlWU0ZLWHlKEIV
|
||||
zzfILCiLf+nxMQdVwuthlcysg8JwYEE8xDXc851ciuj0ygv3Wm0BY44V6/m5Ag0E
|
||||
XyHqUwEQALuKzOAlo75p2vSYD7TecE/E0hBOS+cjs8kRH+oIzm5fx7sf00SC1jU2
|
||||
q5QLYLixNeT+l0bD70R7b8r2uFu1aZL7pQqbGIGisLHlxu8611+PCpE5AsQi3Wui
|
||||
IZ6Y8K7grJ28vviBiZUBY3iCCRH0LuvyZN3R0zgyMGbzouQk5wuGJUkRHJKtV5by
|
||||
FVEl3CYudRtAp5LPFw6j7UzT5yQqBmY6tXp5WMmmAvOtnu8ohpRhzY21dJMlSykX
|
||||
Ne9rcARy8mVWNdcXJUIc85z0BmyrdiA4YY0XiZTHD9mslj+af4QHsaS+p3aorTLD
|
||||
5BKkp5Ek79a1BUxBjrao4W2fljYf129/SHbwds/Dup26zB2vi/fhbfVPvevXLPpi
|
||||
Vm4uz8fE4D5lPYZAdu5GtO2V9kWbhDtU1R1SJSdFDI9Sev3B+NLrstclGfdbFQKF
|
||||
shbUxydjSX56OJvh5hee50PcCz+Ab+usoyUPkcOfET/L55AdqJo3cVYtnAKpOJG/
|
||||
mKP5Ih3LZVm9wCdWTCzboEtDfLlcjCc5kLiE9Pq7sKMnXm/NcXNFWBkRuaHg+Mt5
|
||||
Yk659/Q6oUG3yUrS2d7cSeuNRWNlaRlnk3hZtB13xpmoHGYmrMOe7wiEE5xEnOju
|
||||
1ctRRRX6lNUPlSvID83Y9JSYL9hYMbidRmFY28AIddT/PjVTgfi1ABEBAAGJAjYE
|
||||
GAEIACAWIQQxfW6RBY+PPCMDundWMT5EWBKXpgUCXyHqUwIbDAAKCRBWMT5EWBKX
|
||||
poMOD/4/sRLnK94pY2qtSSfNZEumOdQVvges08u34saG4mT1iLD9TcRgsFvgwcTV
|
||||
Pec9Git4iJmoJpFylvOzoBWmtDzJ7TB3JxvtwUfFH2uZ22k12eChyAFHI8TxASqI
|
||||
TV2YwkIgB2PTmva1GrdNKM6y5bfS1BdS0u+Etgp4zN9nyQECY2sM2accUi2S7JBV
|
||||
7o6lEJWCRgFUwIfY6fdfxhZo/w0g2u5wftUQYlbXWSQLImFFZBc/13k8EMxsfbMi
|
||||
LQ/wxKEfe1ZwFoXqIS6cq8CHTdj70QO7K9ah6krGPmL23LVuxlkQIR7mT5yFJtv2
|
||||
VRT4IYgOUl6rAd08B6MOy6MOMa14FpNrAybPK8Yd/eSdUNciVf17q33Yc15x+trs
|
||||
En1hHbDS82xaLJ/Ld4mANeJVt3FodgVmzZGpKheFEOFXXeuX1ZZ4c9TgX+0wBKoh
|
||||
aBb8EGBVo4aV4GbZ/gEa8wls5NNTwd56H4G94hiq+qFM39x4YjhAvkM0hLRYzR05
|
||||
tSCdlEbkh2K6RbOhBPsNHCbypWRt2fN8/q4uLPJJt7NRQ2zi6H/x04HGblhXdX6H
|
||||
mmYjJv1LTbem9VptcpvPauNsibeIvIWA2nYM2ncDWt6gJpOH9Zh4fHuaG4aCdNmJ
|
||||
hPNeJNnmLcpDQvR9wU5w7e5tC/ZSeTZ5ul1zOKa1qZ4lJ50BDQ==
|
||||
=a30p
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
@@ -3,24 +3,23 @@ Hash: SHA512
|
||||
|
||||
Contact: mailto:security@prysmaticlabs.com
|
||||
Encryption: openpgp4fpr:0AE0051D647BA3C1A917AF4072E33E4DF1A5036E
|
||||
Encryption: openpgp4fpr:341396BAFACC28C5082327F889725027FC8EC0D4
|
||||
Encryption: openpgp4fpr:FEE44615A19049DF0CA0C2735E2B7E5734DFADCB
|
||||
Encryption: openpgp4fpr:CD08DE68C60B82D3EE2A3F7D95452A701810FEDB
|
||||
Encryption: openpgp4fpr:317D6E91058F8F3C2303BA7756313E44581297A6
|
||||
Preferred-Languages: en
|
||||
Canonical: https://github.com/prysmaticlabs/prysm/tree/master/.well-known/security.txt
|
||||
|
||||
-----BEGIN PGP SIGNATURE-----
|
||||
|
||||
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl6HrcwACgkQcuM+TfGl
|
||||
A26voQ/8DFB5wUHP0uyY8k7FGbxhLzSeImxomnUHJaUGfdczbCdYPMEHc9zI1iZP
|
||||
6LRiy9wS6qhqj/GSKVwvDPr+ZymXuV3L22GOP2lRhl7Z9Mm21ZJNOcoQBFOZnyHu
|
||||
DAy9HeTmeuJxYkf8weqZYXyzEoKJBDmfuWmEFjrtMcFXUfT3aJn1E2A/AQdcVQIC
|
||||
9L+iGWwFwjsPhcfaMuwcB7QMheDO6KSB7XPPCbrZ036Np8UTZ4qbZ5y73tlfkcOc
|
||||
tYTrMSPtS4eNutiDOP5Np36cLzRtNpm/BziAK+7ZKiYY0HI5h9IkCTLO4x2UmAMX
|
||||
sPoeaAB5z2QLIwmU9J2NhJrwiNMGTpJ+0bowy8U4cgzAX20CXVjRqGhy+cir8Ewg
|
||||
DjEGjWINUw6W0yzJp0mKSKzuOhdTTmzIYBeMBsyce+pgN1KGFCxeIwxGxyJzADdw
|
||||
mYQdljRXn4yEYP/KEpu/F2o8L4ptRO2jZWKvTvdzSSGGSyKyF4HsIRJ7m98DaB6S
|
||||
0oGq1KpbKKTbQi5g8UShGV2gPeMCs5ZIIqK2b/cRzUet18aUuofLmR4lkKZa9yEG
|
||||
rbzuJq/gB2vgQwExUEgVQ3/DfVc+y80e3YZ5s+rzV0vbLxl4Gh4yExpLo7hRf9iY
|
||||
EFvMzH+BEEb5VfCwByZyV1BmesZVIosr7K6UmVtPe0bZGvv3uIg=
|
||||
=5qpD
|
||||
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl++klgACgkQcuM+TfGl
|
||||
A27rQw/6A29p1W20J0v+h218p8XWLSUpTIGLnZTxw6KqdyVXMzlsQK0YG4G2s2AB
|
||||
0LKh7Ae/Di5E0U+Z4AjUW5nc5eaCxK36GMscH9Ah0rgJwNYxEJw7/2o8ZqVT/Ip2
|
||||
+56rFihRqxFZfaCNKFVuZFaL9jKewV9FKYP38ID6/SnTcrOHiu2AoAlyZGmB03p+
|
||||
iT57SPRHatygeY4xb/gwcfREFWEv+VHGyBTv8A+6ABZDxyurboCFMERHzFICrbmk
|
||||
8UdHxxlWZDnHAbAUyAwpERC5znx6IHXQJwF8TMtu6XY6a6axT2XBOyJDF9/mZOz+
|
||||
kdkz6loX5uxaQBGLtTv6Kqf1yUGANOZ16VhHvWwL209LmHmigIVQ+qSM6c79PsW/
|
||||
vrsqdz3GBsiMC5Fq2vYgnbgzpfE8Atjn0y7E+j4R7IvwOAE/Ro/b++nqnc4YqhME
|
||||
P/yTcfGftaCrdSNnQCXeoV9JxpFM5Xy8KV3eexvNKbcgA/9DtgxL5i+s5ZJkUT9A
|
||||
+qJvoRrRyIym32ghkHgtFJKB3PLCdobeoOVRk6EnMo9zKSiSK2rZEJW8Ccbo515D
|
||||
W9qUOn3GF7lNVuUFAU/YKEdmDp/AVaViZ7vH+8aq0LC0HBkZ8XlzWnWoArS8sMhw
|
||||
fX0R9g/HMgrwNte/d0mwim5lJ2Plgv60Bh4grJqwZJeWbU0zi1U=
|
||||
=uW+X
|
||||
-----END PGP SIGNATURE-----
|
||||
|
||||
@@ -104,13 +104,15 @@ nogo(
|
||||
"@org_golang_x_tools//go/analysis/passes/inspect:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_tool_library",
|
||||
"//tools/analyzers/maligned:go_tool_library",
|
||||
"//tools/analyzers/roughtime:go_tool_library",
|
||||
"//tools/analyzers/cryptorand:go_tool_library",
|
||||
"//tools/analyzers/errcheck:go_tool_library",
|
||||
"//tools/analyzers/featureconfig:go_tool_library",
|
||||
"//tools/analyzers/comparesame:go_tool_library",
|
||||
"//tools/analyzers/shadowpredecl:go_tool_library",
|
||||
"//tools/analyzers/nop:go_tool_library",
|
||||
"//tools/analyzers/slicedirect:go_tool_library",
|
||||
"//tools/analyzers/ineffassign:go_tool_library",
|
||||
"//tools/analyzers/properpermissions:go_tool_library",
|
||||
] + select({
|
||||
# nogo checks that fail with coverage enabled.
|
||||
":coverage_enabled": [],
|
||||
|
||||
@@ -4,7 +4,7 @@ Note: The latest and most up to date documenation can be found on our [docs port
|
||||
|
||||
Excited by our work and want to get involved in building out our sharding releases? Or maybe you haven't learned as much about the Ethereum protocol but are a savvy developer?
|
||||
|
||||
You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PR’s after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/che9auJ) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding) drop us a line there if you want to get more involved or have any questions on our implementation!
|
||||
You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PR’s after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/CTYGPUJ) drop us a line there if you want to get more involved or have any questions on our implementation!
|
||||
|
||||
## Contribution Steps
|
||||
|
||||
@@ -62,12 +62,6 @@ Changes that only affect a single file can be tested with
|
||||
$ go test <file_you_are_working_on>
|
||||
```
|
||||
|
||||
Changes that affect multiple files can be tested with ...
|
||||
|
||||
```
|
||||
$ golangci-lint run && bazel test //...
|
||||
```
|
||||
|
||||
**10. Stage the file or files that you want to commit.**
|
||||
|
||||
```
|
||||
@@ -181,7 +175,7 @@ We consider two types of contributions to our repo and categorize them as follow
|
||||
|
||||
### Part-Time Contributors
|
||||
|
||||
Anyone can become a part-time contributor and help out on implementing sharding. The responsibilities of a part-time contributor include:
|
||||
Anyone can become a part-time contributor and help out on implementing eth2. The responsibilities of a part-time contributor include:
|
||||
|
||||
- Engaging in Gitter conversations, asking the questions on how to begin contributing to the project
|
||||
- Opening up github issues to express interest in code to implement
|
||||
@@ -192,8 +186,6 @@ Anyone can become a part-time contributor and help out on implementing sharding.
|
||||
- Follow up on open PRs
|
||||
- Have an estimated timeframe to completion and let the core contributors know if a PR will take longer than expected
|
||||
|
||||
We do not expect all part-time contributors to be experts on all the latest sharding documentation, but all contributors should at least be familiarized with our sharding [README.md](https://github.com/prysmaticlabs/prysm/blob/master/validator/README.md) and have gone through the required Ethereum readings as posted on our [READINGS.md](https://github.com/prysmaticlabs/prysm/blob/master/docs/READINGS.md) document.
|
||||
|
||||
### Core Contributors
|
||||
|
||||
Core contributors are remote contractors of Prysmatic Labs, LLC. and are considered critical team members of our organization. Core devs have all of the responsibilities of part-time contributors plus the majority of the following:
|
||||
|
||||
@@ -61,9 +61,20 @@ Example:
|
||||
|
||||
```bash
|
||||
go get github.com/prysmaticlabs/example@v1.2.3
|
||||
bazel run //:gazelle -- update-repos -from_file=go.mod -to_macro=deps.bzl%prysm_deps
|
||||
bazel run //:gazelle -- update-repos -from_file=go.mod -to_macro=deps.bzl%prysm_deps -prune=true
|
||||
```
|
||||
|
||||
The deps.bzl file should have been updated with the dependency and any transitive dependencies.
|
||||
|
||||
Do NOT add new `go_repository` to the WORKSPACE file. All dependencies should live in deps.bzl.
|
||||
|
||||
## Running tests
|
||||
|
||||
To enable conditional compilation and custom configuration for tests (where compiled code has more
|
||||
debug info, while not being completely optimized), we rely on Go's build tags/constraints mechanism
|
||||
(see official docs on [build constraints](https://golang.org/pkg/go/build/#hdr-Build_Constraints)).
|
||||
Therefore, whenever using `go test`, do not forget to pass in extra build tag, eg:
|
||||
|
||||
```bash
|
||||
go test ./beacon-chain/sync/initial-sync -tags develop
|
||||
```
|
||||
|
||||
31
README.md
31
README.md
@@ -1,20 +1,35 @@
|
||||
# Prysm: An Ethereum 2.0 Client Written in Go
|
||||
# Prysm: An Ethereum 2.0 Implementation Written in Go
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://github.com/ethereum/eth2.0-specs/tree/v0.12.2)
|
||||
[](https://discord.gg/KSA7rPr)
|
||||
[](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/eth2.0-specs/tree/v1.0.0)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the Ethereum 2.0 client specifications developed by [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum 2.0](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
### Getting Started
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by either our [Discord](https://discord.gg/KSA7rPr) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) and a member of the team or our community will be happy to assist you.
|
||||
|
||||
### Come join the testnet!
|
||||
Participation is now open to the public for our Ethereum 2.0 phase 0 testnet release. Visit [prylabs.net](https://prylabs.net) for more information on the project or to sign up as a validator on the network. You can visualize the nodes in the network on [eth2stats.io](https://eth2stats.io), explore validator rewards/penalties via Bitfly's block explorer: [beaconcha.in](https://beaconcha.in), and follow the latest blocks added to the chain on [Etherscan](https://beacon.etherscan.io).
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/CTYGPUJ).
|
||||
|
||||
### Staking on Mainnet
|
||||
|
||||
To participate in staking, you can join the [official eth2 launchpad](https://launchpad.ethereum.org). The launchpad is the only recommended way to become a validator on mainnet. You can explore validator rewards/penalties via Bitfly's block explorer: [beaconcha.in](https://beaconcha.in), and follow the latest blocks added to the chain on [beaconscan](https://beaconscan.com).
|
||||
|
||||
|
||||
## Contributing
|
||||
### Branches
|
||||
Prysm maintains two permanent branches:
|
||||
|
||||
* [master](https://github.com/prysmaticlabs/prysm/tree/master): This points to the latest stable release. It is ideal for most users.
|
||||
* [develop](https://github.com/prysmaticlabs/prysm/tree/develop): This is used for development, it contains the latest PRs. Developers should base their PRs on this branch.
|
||||
|
||||
### Guide
|
||||
Want to get involved? Check out our [Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/) to learn more!
|
||||
|
||||
## License
|
||||
|
||||
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||
|
||||
## Legal Disclaimer
|
||||
|
||||
[Terms of Use](/TERMS_OF_SERVICE.md)
|
||||
|
||||
45
TERMS_OF_SERVICE.md
Normal file
45
TERMS_OF_SERVICE.md
Normal file
@@ -0,0 +1,45 @@
|
||||
## Terms of Use
|
||||
|
||||
Effective as of Oct 14, 2020
|
||||
|
||||
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Prysmatic Labs (referenced herein as “we” or “us”). If you do not agree to the Terms, do not download or use Prysm.
|
||||
|
||||
### About Prysm
|
||||
Prysm is a client implementation for Ethereum 2.0 Phase 0 protocol for a proof-of-stake blockchain. To participate in the network, a user must send ETH from the Eth1.0 chain into a validator deposit contract, which will queue in the user as a validator in the system. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
|
||||
|
||||
### Licensing Terms
|
||||
Prysm is a fully open-source software program licensed pursuant to the GNU General Public License v3.0.
|
||||
|
||||
The Prysmatic Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Prysmatic Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
|
||||
|
||||
### Risks of Operating Prysm
|
||||
The use of Prysm and acting as a validator on the Ethereum 2.0 network can lead to loss of money. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm including the security of your ETH and meeting any applicable minimum system requirements.
|
||||
|
||||
Use of Prysm and the ability to receive rewards or penalties may be affected at any time by mistakes made by the user or other users, software problems such as bugs, errors, incorrectly constructed transactions, unsafe cryptographic libraries or malware affecting the network, technical failures in the hardware of a user, security problems experienced by a user and/or actions or inactions of third parties and/or events experienced by third parties, among other risks. We cannot and do not guarantee that any user of Prysm will make money, that the Prysm network will operate in accordance with the documentation or that transactions will be effective or secure.
|
||||
|
||||
We make no claims that Prysm is appropriate or permitted for use in any specific jurisdiction. Access to Prysm may not be legal by certain persons or in certain jurisdictions or countries. If you access Prysm, you do so on your own initiative and are responsible for compliance with local laws.
|
||||
|
||||
Some Internet plans will charge an additional amount for any excess upload bandwidth used that isn’t included in the plan and may terminate your connection without warning because of overuse. We advise that you check whether your Internet connection is subjected to such limitations and monitor your bandwidth use so that you can stop Prysm before you reach your upload limit.
|
||||
|
||||
### Warranty Disclaimer
|
||||
PRYSM IS PROVIDED ON AN “AS-IS” BASIS AND MAY INCLUDE ERRORS, OMISSIONS, OR OTHER INACCURACIES. PRYSMATIC LABS AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS OR WARRANTIES ABOUT PRYSM FOR ANY PURPOSE, AND HEREBY EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT OR ANY OTHER IMPLIED WARRANTY UNDER THE UNIFORM COMPUTER INFORMATION TRANSACTIONS ACT AS ENACTED BY ANY STATE. WE ALSO MAKE NO REPRESENTATIONS OR WARRANTIES THAT PRYSM WILL OPERATE ERROR-FREE, UNINTERRUPTED, OR IN A MANNER THAT WILL MEET YOUR REQUIREMENTS AND/OR NEEDS. THEREFORE, YOU ASSUME THE ENTIRE RISK REGARDING THE QUALITY AND/OR PERFORMANCE OF PRYSM AND ANY TRANSACTIONS ENTERED INTO THEREON.
|
||||
|
||||
### Limitation of Liability
|
||||
In no event will Prysmatic Labs or any of its contributors be liable, whether in contract, warranty, tort (including negligence, whether active, passive or imputed), product liability, strict liability or other theory, breach of statutory duty or otherwise arising out of, or in connection with, your use of Prysm, for any direct, indirect, incidental, special or consequential damages (including any loss of profits or data, business interruption or other pecuniary loss, or damage, loss or other compromise of data, in each case whether direct, indirect, incidental, special or consequential) arising out of use Prysm, even if we or other users have been advised of the possibility of such damages. The foregoing limitations and disclaimers shall apply to the maximum extent permitted by applicable law, even if any remedy fails of its essential purpose. You acknowledge and agree that the limitations of liability afforded us hereunder constitute a material and actual inducement and condition to entering into these Terms, and are reasonable, fair and equitable in scope to protect our legitimate interests in light of the fact that we are not receiving consideration from you for providing Prysm.
|
||||
|
||||
### Indemnification
|
||||
To the maximum extent permitted by law, you will defend, indemnify and hold Prysmatic Labs and its contributors harmless from and against any and all claims, actions, suits, investigations, or proceedings by any third party (including any party or purported party to or beneficiary or purported beneficiary of any transaction on Prysm), as well as any and all losses, liabilities,
|
||||
damages, costs, and expenses (including reasonable attorneys’ fees) arising out of, accruing from, or in any way related to (i) your breach of the terms of this Agreement, (ii) any transaction, or the failure to occur of any transaction on Prysm, and (iii) your negligence, fraud, or willful misconduct.
|
||||
|
||||
### Compliance with Laws and Tax Obligations
|
||||
Your use of Prysm is subject to all applicable laws of any governmental authority, including, without limitation, federal, state and foreign securities laws, tax laws, tariff and trade laws, ordinances, judgments, decrees, injunctions, writs and orders or like actions of any governmental authority and rules, regulations, orders, interpretations, licenses, and permits of any federal,
|
||||
regional, state, county, municipal or other governmental authority and you agree to comply with all such laws in your use of Prysm. The users of Prysm are solely responsible to determinate what, if any, taxes apply to their ETH transactions. The owners of, or contributors to, Prysm are not responsible for determining the taxes that apply to ETH transactions.
|
||||
|
||||
### Miscellaneous
|
||||
These Terms will be construed and enforced in accordance with the laws of the state of Illinois as applied to agreements entered into and completely performed in Illinois. You agree to the personal jurisdiction by and venue in Illinois and waive any objection to such jurisdiction or venue.
|
||||
|
||||
We reserve the right to revise these Terms, and your rights and obligations are at all times subject to the then-current Terms provided on Prysm. Your continued use of Prysm constitutes acceptance of such revised Terms.
|
||||
|
||||
These Terms constitute the entire agreement between you and Prysmatic Labs regarding use of Prysm and will supersede all prior agreements whether, written or oral. No usage of trade or other regular practice or method of dealing between the parties will be used to modify, interpret, supplement, or alter the terms of these Terms.
|
||||
|
||||
If any portion of these Terms is held invalid or unenforceable, such invalidity or enforceability will not affect the other provisions of these Terms, which will remain in full force and effect, and the invalid or unenforceable portion will be given effect to the greatest extent possible. The failure of a party to require performance of any provision will not affect that party’s right to require performance at any time thereafter, nor will a waiver of any breach or default of these Terms or any provision of these Terms constitute a waiver of any subsequent breach or default or a waiver of the provision itself.
|
||||
43
TESTNET.md
43
TESTNET.md
@@ -1,43 +0,0 @@
|
||||
# Testnet
|
||||
|
||||
The Prysmatic Labs test network is available for anyone to join. The easiest way to participate is by joining through the website, https://prylabs.net.
|
||||
|
||||
## Interop
|
||||
|
||||
For developers looking to connect a client other than Prysm to the test network, here is the relevant information for compatability.
|
||||
|
||||
|
||||
**Spec version** - [v0.8.3](https://github.com/ethereum/eth2.0-specs/tree/v0.8.3)
|
||||
|
||||
**ETH 1 Deposit Contract Address** - See https://prylabs.net/contract. This contract is deployed on the [goerli](https://goerli.net/) network.
|
||||
|
||||
**Genesis time** - The ETH1 block time in which the 64th deposit to start ETH2 was included. This is NOT midnight of the next day as required by spec.
|
||||
|
||||
### ETH 2 Configuration
|
||||
|
||||
Use the [minimal config](https://github.com/ethereum/eth2.0-specs/blob/v0.8.3/configs/minimal.yaml) with the following changes.
|
||||
|
||||
| field | value |
|
||||
|-------|-------|
|
||||
| MIN_DEPOSIT_AMOUNT | 100 |
|
||||
| MAX_EFFECTIVE_BALANCE | 3.2 * 1e9 |
|
||||
| EJECTION_BALANCE | 1.6 * 1e9 |
|
||||
| EFFECTIVE_BALANCE_INCREMENT | 0.1 * 1e9 |
|
||||
| ETH1_FOLLOW_DISTANCE | 16 |
|
||||
| GENESIS_FORK_VERSION | See [latest code](https://github.com/prysmaticlabs/prysm/blob/master/shared/params/config.go#L236) |
|
||||
|
||||
These parameters reduce the minimal config to 1/10 of the required ETH.
|
||||
|
||||
We have a genesis.ssz file available for download [here](https://prysmaticlabs.com/uploads/genesis.ssz)
|
||||
|
||||
### Connecting to the network
|
||||
|
||||
We have a libp2p bootstrap node available at `/dns4/prylabs.net/tcp/30001/p2p/16Uiu2HAm7Qwe19vz9WzD2Mxn7fXd1vgHHp4iccuyq7TxwRXoAGfc`.
|
||||
|
||||
Some of the Prysmatic Labs hosted nodes are behind a libp2p relay, so your libp2p implementation protocol should understand this functionality.
|
||||
|
||||
### Other
|
||||
|
||||
Undoubtably, you will have bugs. Reach out to us on [Discord](https://discord.gg/KSA7rPr) and be sure to capture issues on Github at https://github.com/prysmaticlabs/prysm/issues.
|
||||
|
||||
If you have instructions for you client, we would love to attempt this on your behalf. Kindly send over the instructions via github issue, PR, email to team@prysmaticlabs.com, or discord.
|
||||
107
WORKSPACE
107
WORKSPACE
@@ -5,19 +5,19 @@ load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
||||
|
||||
http_archive(
|
||||
name = "bazel_toolchains",
|
||||
sha256 = "db48eed61552e25d36fe051a65d2a329cc0fb08442627e8f13960c5ab087a44e",
|
||||
strip_prefix = "bazel-toolchains-3.2.0",
|
||||
sha256 = "8e0633dfb59f704594f19ae996a35650747adc621ada5e8b9fb588f808c89cb0",
|
||||
strip_prefix = "bazel-toolchains-3.7.0",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/releases/download/3.2.0/bazel-toolchains-3.2.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-toolchains/releases/download/3.2.0/bazel-toolchains-3.2.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_grail_bazel_toolchain",
|
||||
sha256 = "0bec89e35d8a141c87f28cfc506d6d344785c8eb2ff3a453140a1fe972ada79d",
|
||||
strip_prefix = "bazel-toolchain-77a87103145f86f03f90475d19c2c8854398a444",
|
||||
urls = ["https://github.com/grailbio/bazel-toolchain/archive/77a87103145f86f03f90475d19c2c8854398a444.tar.gz"],
|
||||
sha256 = "b924b102adc0c3368d38a19bd971cb4fa75362a27bc363d0084b90ca6877d3f0",
|
||||
strip_prefix = "bazel-toolchain-0.5.7",
|
||||
urls = ["https://github.com/grailbio/bazel-toolchain/archive/0.5.7.tar.gz"],
|
||||
)
|
||||
|
||||
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
|
||||
@@ -28,7 +28,7 @@ load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
|
||||
|
||||
llvm_toolchain(
|
||||
name = "llvm_toolchain",
|
||||
llvm_version = "9.0.0",
|
||||
llvm_version = "10.0.0",
|
||||
)
|
||||
|
||||
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
|
||||
@@ -47,10 +47,10 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "bazel_skylib",
|
||||
sha256 = "97e70364e9249702246c0e9444bccdc4b847bed1eb03c5a3ece4f83dfe6abc44",
|
||||
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -60,10 +60,10 @@ bazel_skylib_workspace()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "d8c45ee70ec39a57e7a05e5027c32b1576cc7f16d9dd37135b0eddde45cf1b10",
|
||||
sha256 = "222e49f034ca7a1d1231422cdb67066b885819885c356673cb1f72f748a3c9d4",
|
||||
urls = [
|
||||
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/v0.20.0/bazel-gazelle-v0.20.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.20.0/bazel-gazelle-v0.20.0.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.22.3/bazel-gazelle-v0.22.3.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.22.3/bazel-gazelle-v0.22.3.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -76,9 +76,9 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_docker",
|
||||
sha256 = "dc97fccceacd4c6be14e800b2a00693d5e8d07f69ee187babfd04a80a9f8e250",
|
||||
strip_prefix = "rules_docker-0.14.1",
|
||||
url = "https://github.com/bazelbuild/rules_docker/archive/v0.14.1.tar.gz",
|
||||
sha256 = "1286175a94c0b1335efe1d75d22ea06e89742557d3fac2a0366f242a6eac6f5a",
|
||||
strip_prefix = "rules_docker-ba4310833230294fa69b7d6ea1787ac684631a7d",
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/archive/ba4310833230294fa69b7d6ea1787ac684631a7d.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -89,10 +89,10 @@ http_archive(
|
||||
# nogo check fails for certain third_party dependencies.
|
||||
"//third_party:io_bazel_rules_go.patch",
|
||||
],
|
||||
sha256 = "7b9bbe3ea1fccb46dcfa6c3f3e29ba7ec740d8733370e21cdc8937467b4a4349",
|
||||
sha256 = "52d0a57ea12139d727883c2fef03597970b89f2cc2a05722c42d1d7d41ec065b",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.22.4/rules_go-v0.22.4.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.22.4/rules_go-v0.22.4.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.24.13/rules_go-v0.24.13.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.24.13/rules_go-v0.24.13.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -100,14 +100,14 @@ http_archive(
|
||||
# https://github.com/gogo/protobuf/pull/582 is merged.
|
||||
git_repository(
|
||||
name = "com_github_gogo_protobuf",
|
||||
commit = "5628607bb4c51c3157aacc3a50f0ab707582b805",
|
||||
commit = "b03c65ea87cdc3521ede29f62fe3ce239267c1bc",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
"@io_bazel_rules_go//third_party:com_github_gogo_protobuf-gazelle.patch",
|
||||
"//third_party:com_github_gogo_protobuf-equal.patch",
|
||||
],
|
||||
remote = "https://github.com/gogo/protobuf",
|
||||
shallow_since = "1571033717 +0200",
|
||||
shallow_since = "1610265707 +0000",
|
||||
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
|
||||
)
|
||||
|
||||
@@ -139,9 +139,9 @@ load(
|
||||
|
||||
container_pull(
|
||||
name = "alpine_cc_linux_amd64",
|
||||
digest = "sha256:3f7f4dfcb6dceac3a902f36609cc232262e49f5656a6dc4bb3da89e35fecc8a5",
|
||||
digest = "sha256:752aa0c9a88461ffc50c5267bb7497ef03a303e38b2c8f7f2ded9bebe5f1f00e",
|
||||
registry = "index.docker.io",
|
||||
repository = "fasibio/alpine-libgcc",
|
||||
repository = "pinglamb/alpine-glibc",
|
||||
)
|
||||
|
||||
container_pull(
|
||||
@@ -155,12 +155,19 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(nogo = "@//:nogo")
|
||||
go_register_toolchains(
|
||||
go_version = "1.16",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
|
||||
|
||||
gazelle_dependencies()
|
||||
|
||||
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
|
||||
|
||||
go_embed_data_dependencies()
|
||||
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
|
||||
|
||||
gometalinter_dependencies()
|
||||
@@ -203,6 +210,21 @@ http_archive(
|
||||
url = "https://github.com/kubernetes/repo-infra/archive/6537f2101fb432b679f3d103ee729dd8ac5d30a0.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eip3076_spec_tests",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.json",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "91434d5fd5e1c6eb7b0174fed2afe25e09bddf00e1e4c431db931b2cee4e7773",
|
||||
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eth2_spec_tests_general",
|
||||
build_file_content = """
|
||||
@@ -215,8 +237,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "7e5f838e0f9110471ef8be9401ea687a8ed4d499664dc0eac34ecfdfd03c2ac3",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.12.3/general.tar.gz",
|
||||
sha256 = "ef5396e4b13995da9776eeb5ae346a2de90970c28da3c4f0dcaa4ab9f0ad1f93",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.0.0/general.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -231,8 +253,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "9e5c4b335e3854195019d9569341ab500aa7621b045858efd830fce9dddf27f9",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.12.3/minimal.tar.gz",
|
||||
sha256 = "170551b441e7d54b73248372ad9ce8cb6c148810b5f1364637117a63f4f1c085",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.0.0/minimal.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -247,8 +269,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "556d90a495e3f13f269741cb6c4ad060f529fe2adf82efccb83833d172c1bf93",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.12.3/mainnet.tar.gz",
|
||||
sha256 = "b541a9979b4703fa5ee5d2182b0b5313c38efc54ae7eaec2eef793230a52ec83",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.0.0/mainnet.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -264,9 +286,9 @@ buildifier_dependencies()
|
||||
|
||||
git_repository(
|
||||
name = "com_google_protobuf",
|
||||
commit = "4059c61f27eb1b06c4ee979546a238be792df0a4",
|
||||
commit = "fde7cf7358ec7cd69e8db9be4f1fa6a5c431386a", # v3.13.0
|
||||
remote = "https://github.com/protocolbuffers/protobuf",
|
||||
shallow_since = "1558721209 -0700",
|
||||
shallow_since = "1597443653 -0700",
|
||||
)
|
||||
|
||||
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
|
||||
@@ -336,15 +358,26 @@ go_binary(
|
||||
urls = ["https://github.com/ferranbt/fastssz/archive/06015a5d84f9e4eefe2c21377ca678fa8f1a1b09.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "prysm_web_ui",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "site",
|
||||
srcs = glob(["**/*"]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "edb80f3a695d84f6000f0e05abf7a4bbf207c03abb91219780ec97e7d6ad21c8",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.3/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
load("//:deps.bzl", "prysm_deps")
|
||||
|
||||
# gazelle:repository_macro deps.bzl%prysm_deps
|
||||
prysm_deps()
|
||||
|
||||
load("@com_github_prysmaticlabs_go_ssz//:deps.bzl", "go_ssz_dependencies")
|
||||
|
||||
go_ssz_dependencies()
|
||||
|
||||
load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
|
||||
|
||||
bls_dependencies()
|
||||
|
||||
@@ -1,153 +1,19 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_test")
|
||||
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle")
|
||||
load("//tools:go_image.bzl", "go_image_alpine", "go_image_debug")
|
||||
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
|
||||
load("//tools:target_migration.bzl", "moved_targets")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"main.go",
|
||||
"usage.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/node:go_default_library",
|
||||
"//shared/cmd:go_default_library",
|
||||
"//shared/debug:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/logutil:go_default_library",
|
||||
"//shared/maxprocs:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//log:go_default_library",
|
||||
"@com_github_ipfs_go_log_v2//:go_default_library",
|
||||
"@com_github_joonix_log//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//altsrc:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_image(
|
||||
name = "image",
|
||||
srcs = [
|
||||
"main.go",
|
||||
"usage.go",
|
||||
],
|
||||
base = select({
|
||||
"//tools:base_image_alpine": "//tools:alpine_cc_image",
|
||||
"//tools:base_image_cc": "//tools:cc_image",
|
||||
"//conditions:default": "//tools:cc_image",
|
||||
}),
|
||||
goarch = "amd64",
|
||||
goos = "linux",
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain",
|
||||
race = "off",
|
||||
static = "off", # Static enabled binary seems to cause issues with DNS lookup with cgo.
|
||||
tags = ["manual"],
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/node:go_default_library",
|
||||
"//shared/cmd:go_default_library",
|
||||
"//shared/debug:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/logutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//log:go_default_library",
|
||||
"@com_github_ipfs_go_log_v2//:go_default_library",
|
||||
"@com_github_joonix_log//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//altsrc:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
"//shared/maxprocs:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest": ":image",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}": ":image",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest": ":image",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}": ":image",
|
||||
},
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
go_image_debug(
|
||||
name = "image_debug",
|
||||
image = ":image",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle_debug",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-debug": ":image_debug",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-debug": ":image_debug",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest-debug": ":image_debug",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}-debug": ":image_debug",
|
||||
},
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
go_image_alpine(
|
||||
name = "image_alpine",
|
||||
image = ":image",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle_alpine",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-alpine": ":image_alpine",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-alpine": ":image_alpine",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest-alpine": ":image_alpine",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}-alpine": ":image_alpine",
|
||||
},
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images",
|
||||
bundle = ":image_bundle",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images_debug",
|
||||
bundle = ":image_bundle_debug",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images_alpine",
|
||||
bundle = ":image_bundle_alpine",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "beacon-chain",
|
||||
embed = [":go_default_library"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//endtoend:__pkg__",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["usage_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
moved_targets(
|
||||
[
|
||||
":push_images_debug",
|
||||
":push_images_alpine",
|
||||
":push_images",
|
||||
":image_bundle_debug",
|
||||
":image_debug",
|
||||
":image_bundle_alpine",
|
||||
":image_bundle",
|
||||
":image_with_creation_time",
|
||||
":image_alpine",
|
||||
":image",
|
||||
":go_default_test",
|
||||
":beacon-chain",
|
||||
],
|
||||
"//cmd/beacon-chain",
|
||||
)
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
# Prysmatic Labs Beacon Chain Implementation
|
||||
|
||||
This is the main project folder for the beacon chain implementation of Ethereum Serenity in Golang by [Prysmatic Labs](https://prysmaticlabs.com). Before you begin, check out our [Contribution Guidelines](https://github.com/prysmaticlabs/prysm/blob/master/CONTRIBUTING.md) and join our active chat room on Discord or Gitter below:
|
||||
This is the main project folder for the beacon chain implementation of eth2 written in Go by [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
|
||||
[](https://discord.gg/KSA7rPr)
|
||||
[](https://gitter.im/prysmaticlabs/prysm?badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
You can also read our main [README](https://github.com/prysmaticlabs/prysm/blob/master/README.md) and join our active chat room on Discord.
|
||||
|
||||
Also, read the latest beacon chain [design spec](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at prysmatic labs.
|
||||
Check out the [FAQs](https://notes.ethereum.org/9MMuzWeFTTSg-3Tz_YeiBA?view). Refer this page on [why](http://email.mg2.substack.com/c/eJwlj9GOhCAMRb9G3jRQQPGBh5mM8xsbhKrsDGIAM9m_X9xN2qZtbpt7rCm4xvSjj5gLOTOmL-809CMbKXFaOKakIl4DZYr2AGyQIGjHOnWH22OiYnoIxmDijaBhhS6fcy7GvjobA9m0mSXOcnZq5GBqLkilXBZhBsus5ZK89VbKkRt-a-BZI6DzZ7iur1lQ953KJ9bemnxgahuQU9XJu6pFPdu8meT8vragzEjpMCwMGLlgLo6h5z1JumQTu4IJd4v15xqMf_8ZLP_Y1bSLdbnrD-LL71i2Kj7DLxaWWF4)
|
||||
we are combining sharding and casper together.
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
|
||||
Also, read the official beacon chain [specification](https://github.com/ethereum/eth2.0-specs/blob/master/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at Prysmatic Labs.
|
||||
|
||||
@@ -5,7 +5,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"checkpoint_info_cache.go",
|
||||
"head.go",
|
||||
"info.go",
|
||||
"init_sync_process_block.go",
|
||||
@@ -18,6 +17,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_block.go",
|
||||
"service.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
|
||||
visibility = [
|
||||
@@ -34,7 +34,7 @@ go_library(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
@@ -43,22 +43,24 @@ go_library(
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/interface:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/mputil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/roughtime:go_default_library",
|
||||
"//shared/slotutil:go_default_library",
|
||||
"//shared/timeutils:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"@com_github_emicklei_dot//:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
@@ -77,17 +79,21 @@ go_test(
|
||||
name = "go_raceoff_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"blockchain_test.go",
|
||||
"chain_info_test.go",
|
||||
"checkpoint_info_cache_test.go",
|
||||
"checktags_test.go",
|
||||
"head_test.go",
|
||||
"info_test.go",
|
||||
"metrics_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_test.go",
|
||||
"weak_subjectivity_checks_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
gotags = ["develop"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
@@ -112,7 +118,6 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
|
||||
@@ -124,6 +129,7 @@ go_test(
|
||||
name = "go_raceon_test",
|
||||
srcs = [
|
||||
"chain_info_norace_test.go",
|
||||
"checktags_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
],
|
||||
@@ -135,6 +141,7 @@ go_test(
|
||||
# See: https://github.com/etcd-io/bbolt/issues/187.
|
||||
"-d=checkptr=0",
|
||||
],
|
||||
gotags = ["develop"],
|
||||
race = "on",
|
||||
tags = ["race_on"],
|
||||
deps = [
|
||||
@@ -159,7 +166,6 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_x_net//context:go_default_library",
|
||||
|
||||
15
beacon-chain/blockchain/blockchain_test.go
Normal file
15
beacon-chain/blockchain/blockchain_test.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
|
||||
m.Run()
|
||||
}
|
||||
@@ -4,15 +4,15 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ChainInfoFetcher defines a common interface for methods in blockchain service which
|
||||
@@ -27,7 +27,7 @@ type ChainInfoFetcher interface {
|
||||
// TimeFetcher retrieves the Eth2 data that's related to time.
|
||||
type TimeFetcher interface {
|
||||
GenesisTime() time.Time
|
||||
CurrentSlot() uint64
|
||||
CurrentSlot() types.Slot
|
||||
}
|
||||
|
||||
// GenesisFetcher retrieves the eth2 data related to its genesis.
|
||||
@@ -38,12 +38,12 @@ type GenesisFetcher interface {
|
||||
// HeadFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves head related data.
|
||||
type HeadFetcher interface {
|
||||
HeadSlot() uint64
|
||||
HeadSlot() types.Slot
|
||||
HeadRoot(ctx context.Context) ([]byte, error)
|
||||
HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error)
|
||||
HeadState(ctx context.Context) (*state.BeaconState, error)
|
||||
HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error)
|
||||
HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error)
|
||||
HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error)
|
||||
HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error)
|
||||
HeadGenesisValidatorRoot() [32]byte
|
||||
HeadETH1Data() *ethpb.Eth1Data
|
||||
ProtoArrayStore() *protoarray.Store
|
||||
@@ -96,7 +96,10 @@ func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
}
|
||||
|
||||
// HeadSlot returns the slot of the head of the chain.
|
||||
func (s *Service) HeadSlot() uint64 {
|
||||
func (s *Service) HeadSlot() types.Slot {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return 0
|
||||
}
|
||||
@@ -106,6 +109,9 @@ func (s *Service) HeadSlot() uint64 {
|
||||
|
||||
// HeadRoot returns the root of the head of the chain.
|
||||
func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if s.headRoot() != params.BeaconConfig().ZeroHash {
|
||||
r := s.headRoot()
|
||||
return r[:], nil
|
||||
@@ -131,6 +137,9 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
// If the head is nil from service struct,
|
||||
// it will attempt to get the head block from DB.
|
||||
func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if s.hasHeadState() {
|
||||
return s.headBlock(), nil
|
||||
}
|
||||
@@ -144,6 +153,8 @@ func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, erro
|
||||
func (s *Service) HeadState(ctx context.Context) (*state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.HeadState")
|
||||
defer span.End()
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
ok := s.hasHeadState()
|
||||
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
|
||||
@@ -152,19 +163,25 @@ func (s *Service) HeadState(ctx context.Context) (*state.BeaconState, error) {
|
||||
return s.headState(ctx), nil
|
||||
}
|
||||
|
||||
return s.beaconDB.HeadState(ctx)
|
||||
return s.stateGen.StateByRoot(ctx, s.headRoot())
|
||||
}
|
||||
|
||||
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
|
||||
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error) {
|
||||
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return []uint64{}, nil
|
||||
return []types.ValidatorIndex{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(s.headState(ctx), epoch)
|
||||
}
|
||||
|
||||
// HeadSeed returns the seed from the head view of a given epoch.
|
||||
func (s *Service) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error) {
|
||||
func (s *Service) HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
@@ -174,6 +191,9 @@ func (s *Service) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error)
|
||||
|
||||
// HeadGenesisValidatorRoot returns genesis validator root of the head state.
|
||||
func (s *Service) HeadGenesisValidatorRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -183,6 +203,9 @@ func (s *Service) HeadGenesisValidatorRoot() [32]byte {
|
||||
|
||||
// HeadETH1Data returns the eth1data of the current head state.
|
||||
func (s *Service) HeadETH1Data() *ethpb.Eth1Data {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return ðpb.Eth1Data{}
|
||||
}
|
||||
@@ -202,6 +225,9 @@ func (s *Service) GenesisTime() time.Time {
|
||||
// GenesisValidatorRoot returns the genesis validator
|
||||
// root of the chain.
|
||||
func (s *Service) GenesisValidatorRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return [32]byte{}
|
||||
}
|
||||
@@ -210,6 +236,9 @@ func (s *Service) GenesisValidatorRoot() [32]byte {
|
||||
|
||||
// CurrentFork retrieves the latest fork information of the beacon chain.
|
||||
func (s *Service) CurrentFork() *pb.Fork {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return &pb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
@@ -226,9 +255,6 @@ func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, er
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If the block has not been finalized, the block must be recent. Check recent canonical roots
|
||||
// mapping which uses proto array fork choice.
|
||||
s.recentCanonicalBlocksLock.RLock()
|
||||
defer s.recentCanonicalBlocksLock.RUnlock()
|
||||
return s.recentCanonicalBlocks[blockRoot], nil
|
||||
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
||||
return s.forkChoiceStore.IsCanonical(blockRoot), nil
|
||||
}
|
||||
|
||||
@@ -11,9 +11,9 @@ import (
|
||||
)
|
||||
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
beaconDB: beaconDB,
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
@@ -22,11 +22,11 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadRoot_DataRace(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
beaconDB: beaconDB,
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
stateGen: stategen.New(db, sc),
|
||||
stateGen: stategen.New(beaconDB),
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
@@ -36,11 +36,11 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadBlock_DataRace(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
beaconDB: beaconDB,
|
||||
head: &head{block: ðpb.SignedBeaconBlock{}},
|
||||
stateGen: stategen.New(db, sc),
|
||||
stateGen: stategen.New(beaconDB),
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
@@ -50,10 +50,10 @@ func TestHeadBlock_DataRace(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadState_DataRace(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
stateGen: stategen.New(db, sc),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB),
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
|
||||
@@ -6,8 +6,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
@@ -18,81 +21,82 @@ import (
|
||||
)
|
||||
|
||||
// Ensure Service implements chain info interface.
|
||||
var _ = ChainInfoFetcher(&Service{})
|
||||
var _ = TimeFetcher(&Service{})
|
||||
var _ = ForkFetcher(&Service{})
|
||||
var _ ChainInfoFetcher = (*Service)(nil)
|
||||
var _ TimeFetcher = (*Service)(nil)
|
||||
var _ ForkFetcher = (*Service)(nil)
|
||||
|
||||
func TestFinalizedCheckpt_Nil(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, db, sc)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], c.FinalizedCheckpt().Root, "Incorrect pre chain start value")
|
||||
}
|
||||
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, db, sc)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
headRoot, err := c.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c := setupBeaconChain(t, db, sc)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.finalizedCheckpt = cp
|
||||
|
||||
assert.Equal(t, cp.Epoch, c.FinalizedCheckpt().Epoch, "Unexpected finalized epoch")
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
genesisRoot := [32]byte{'A'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, db, sc)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.finalizedCheckpt = cp
|
||||
c.genesisRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.genesisRoot[:], c.FinalizedCheckpt().Root)
|
||||
}
|
||||
|
||||
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
|
||||
cp := ðpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c := setupBeaconChain(t, db, sc)
|
||||
c.justifiedCheckpt = cp
|
||||
|
||||
assert.Equal(t, cp.Epoch, c.CurrentJustifiedCheckpt().Epoch, "Unexpected justified epoch")
|
||||
}
|
||||
|
||||
func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
genesisRoot := [32]byte{'B'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, db, sc)
|
||||
c.justifiedCheckpt = cp
|
||||
c.genesisRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.genesisRoot[:], c.CurrentJustifiedCheckpt().Root)
|
||||
}
|
||||
|
||||
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 7, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
||||
c := setupBeaconChain(t, db, sc)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
assert.Equal(t, params.BeaconConfig().ZeroHash, bytesutil.ToBytes32(c.CurrentJustifiedCheckpt().Root), "Unexpected justified epoch")
|
||||
c.prevJustifiedCheckpt = cp
|
||||
assert.Equal(t, cp.Epoch, c.PreviousJustifiedCheckpt().Epoch, "Unexpected previous justified epoch")
|
||||
}
|
||||
|
||||
func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
genesisRoot := [32]byte{'C'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, db, sc)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
c.prevJustifiedCheckpt = cp
|
||||
c.genesisRoot = genesisRoot
|
||||
assert.DeepEqual(t, c.genesisRoot[:], c.PreviousJustifiedCheckpt().Root)
|
||||
@@ -103,13 +107,30 @@ func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
c.head = &head{slot: 100, state: s}
|
||||
assert.Equal(t, uint64(100), c.headSlot())
|
||||
assert.Equal(t, types.Slot(100), c.HeadSlot())
|
||||
}
|
||||
|
||||
func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
c.head = &head{root: [32]byte{'A'}}
|
||||
assert.Equal(t, [32]byte{'A'}, c.headRoot())
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, [32]byte{'A'}, bytesutil.ToBytes32(r))
|
||||
}
|
||||
|
||||
func TestHeadRoot_UseDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := &Service{beaconDB: beaconDB}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := testutil.NewBeaconBlock()
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), b))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Root: br[:]}))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(context.Background(), br))
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, br, bytesutil.ToBytes32(r))
|
||||
}
|
||||
|
||||
func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
@@ -152,6 +173,17 @@ func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCurrentFork_NilHeadSTate(t *testing.T) {
|
||||
f := &pb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
c := &Service{}
|
||||
if !proto.Equal(c.CurrentFork(), f) {
|
||||
t.Error("Received incorrect fork version")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenesisValidatorRoot_CanRetrieve(t *testing.T) {
|
||||
// Should not panic if head state is nil.
|
||||
c := &Service{}
|
||||
@@ -164,8 +196,8 @@ func TestGenesisValidatorRoot_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHeadETH1Data_Nil(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, db, sc)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
assert.DeepEqual(t, ðpb.Eth1Data{}, c.HeadETH1Data(), "Incorrect pre chain start value")
|
||||
}
|
||||
|
||||
@@ -179,3 +211,74 @@ func TestHeadETH1Data_CanRetrieve(t *testing.T) {
|
||||
t.Error("Received incorrect eth1 data")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsCanonical_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 0
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, blk))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
|
||||
can, err := c.IsCanonical(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, can)
|
||||
|
||||
can, err = c.IsCanonical(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, can)
|
||||
}
|
||||
|
||||
func TestService_HeadValidatorsIndices(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
|
||||
c.head = &head{}
|
||||
indices, err := c.HeadValidatorsIndices(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(indices))
|
||||
|
||||
c.head = &head{state: s}
|
||||
indices, err = c.HeadValidatorsIndices(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(indices))
|
||||
}
|
||||
|
||||
func TestService_HeadSeed(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisState(t, 1)
|
||||
c := &Service{}
|
||||
seed, err := helpers.Seed(s, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
|
||||
c.head = &head{}
|
||||
root, err := c.HeadSeed(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{}, root)
|
||||
|
||||
c.head = &head{state: s}
|
||||
root, err = c.HeadSeed(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, seed, root)
|
||||
}
|
||||
|
||||
func TestService_HeadGenesisValidatorRoot(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisState(t, 1)
|
||||
c := &Service{}
|
||||
|
||||
c.head = &head{}
|
||||
root := c.HeadGenesisValidatorRoot()
|
||||
require.Equal(t, [32]byte{}, root)
|
||||
|
||||
c.head = &head{state: s}
|
||||
root = c.HeadGenesisValidatorRoot()
|
||||
require.DeepEqual(t, root[:], s.GenesisValidatorRoot())
|
||||
}
|
||||
|
||||
func TestService_ProtoArrayStore(t *testing.T) {
|
||||
c := &Service{forkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
p := c.ProtoArrayStore()
|
||||
require.Equal(t, 0, int(p.FinalizedEpoch()))
|
||||
}
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
)
|
||||
|
||||
var (
|
||||
// This defines the max number of checkpoint info this cache can store.
|
||||
// Each cache is calculated at 3MB(30K validators), the total cache size is around 100MB.
|
||||
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
|
||||
maxInfoSize = 32
|
||||
|
||||
// This tracks the number of check point info requests that aren't present in the cache.
|
||||
infoMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "check_point_info_cache_miss",
|
||||
Help: "The number of check point info requests that aren't present in the cache.",
|
||||
})
|
||||
// This tracks the number of check point info requests that are in the cache.
|
||||
infoHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "check_point_info_cache_hit",
|
||||
Help: "The number of check point info requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// checkPtInfoCache is a struct with 1 LRU cache for looking up check point info by checkpoint.
|
||||
type checkPtInfoCache struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// newCheckPointInfoCache creates a new checkpoint info cache for storing/accessing processed check point info object.
|
||||
func newCheckPointInfoCache() *checkPtInfoCache {
|
||||
cache, err := lru.New(maxInfoSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &checkPtInfoCache{
|
||||
cache: cache,
|
||||
}
|
||||
}
|
||||
|
||||
// get fetches check point info by check point. Returns the reference of the CheckPtInfo, nil if doesn't exist.
|
||||
func (c *checkPtInfoCache) get(cp *ethpb.Checkpoint) (*pb.CheckPtInfo, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
h, err := hashutil.HashProto(cp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
item, exists := c.cache.Get(h)
|
||||
|
||||
if exists && item != nil {
|
||||
infoHit.Inc()
|
||||
// Copy here is unnecessary since the returned check point info object
|
||||
// will only be used to verify attestation signature.
|
||||
return item.(*pb.CheckPtInfo), nil
|
||||
}
|
||||
|
||||
infoMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// put adds CheckPtInfo object to the cache. This method also trims the least
|
||||
// recently added CheckPtInfo object if the cache size has ready the max cache size limit.
|
||||
func (c *checkPtInfoCache) put(cp *ethpb.Checkpoint, info *pb.CheckPtInfo) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
h, err := hashutil.HashProto(cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.cache.Add(h, info)
|
||||
return nil
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestHotStateCache_RoundTrip(t *testing.T) {
|
||||
c := newCheckPointInfoCache()
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
}
|
||||
info, err := c.get(cp)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, (*pb.CheckPtInfo)(nil), info)
|
||||
|
||||
i := &pb.CheckPtInfo{
|
||||
Seed: bytesutil.PadTo([]byte{'c'}, 32),
|
||||
GenesisRoot: bytesutil.PadTo([]byte{'d'}, 32),
|
||||
ActiveIndices: []uint64{0, 1, 2, 3},
|
||||
}
|
||||
|
||||
require.NoError(t, c.put(cp, i))
|
||||
info, err = c.get(cp)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, info, i)
|
||||
}
|
||||
|
||||
func TestHotStateCache_CanPrune(t *testing.T) {
|
||||
c := newCheckPointInfoCache()
|
||||
for i := 0; i < maxInfoSize+1; i++ {
|
||||
cp := ðpb.Checkpoint{Epoch: uint64(i), Root: make([]byte, 32)}
|
||||
require.NoError(t, c.put(cp, &pb.CheckPtInfo{}))
|
||||
}
|
||||
require.Equal(t, len(c.cache.Keys()), maxInfoSize)
|
||||
}
|
||||
7
beacon-chain/blockchain/checktags_test.go
Normal file
7
beacon-chain/blockchain/checktags_test.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// +build !develop
|
||||
|
||||
package blockchain
|
||||
|
||||
func init() {
|
||||
log.Fatal("Tests in this package require extra build tag: re-run with `-tags develop`")
|
||||
}
|
||||
@@ -6,13 +6,14 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -21,10 +22,10 @@ import (
|
||||
|
||||
// This defines the current chain service's view of head.
|
||||
type head struct {
|
||||
slot uint64 // current head slot.
|
||||
slot types.Slot // current head slot.
|
||||
root [32]byte // current head root.
|
||||
block *ethpb.SignedBeaconBlock // current head block.
|
||||
state *state.BeaconState // current head state.
|
||||
state *stateTrie.BeaconState // current head state.
|
||||
}
|
||||
|
||||
// Determined the head from the fork choice service and saves its new data
|
||||
@@ -52,12 +53,24 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
if headStartRoot == params.BeaconConfig().ZeroHash {
|
||||
headStartRoot = s.genesisRoot
|
||||
}
|
||||
headRoot, err := s.forkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
// In order to process head, fork choice store requires justified info.
|
||||
// If the fork choice store is missing justified block info, a node should
|
||||
// re-initiate fork choice store using the latest justified info.
|
||||
// This recovers a fatal condition and should not happen in run time.
|
||||
if !s.forkChoiceStore.HasNode(headStartRoot) {
|
||||
jb, err := s.beaconDB.Block(ctx, headStartRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.forkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block, headStartRoot, f, j); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.updateRecentCanonicalBlocks(ctx, headRoot); err != nil {
|
||||
headRoot, err := s.forkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -72,13 +85,17 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
defer span.End()
|
||||
|
||||
// Do nothing if head hasn't changed.
|
||||
if headRoot == s.headRoot() {
|
||||
r, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if headRoot == bytesutil.ToBytes32(r) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the head state is not available, just return nil.
|
||||
// There's nothing to cache
|
||||
if !s.stateGen.StateSummaryExists(ctx, headRoot) {
|
||||
if !s.beaconDB.HasStateSummary(ctx, headRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -101,16 +118,17 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
}
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
if bytesutil.ToBytes32(newHeadBlock.Block.ParentRoot) != s.headRoot() {
|
||||
headSlot := s.HeadSlot()
|
||||
if bytesutil.ToBytes32(newHeadBlock.Block.ParentRoot) != bytesutil.ToBytes32(r) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadBlock.Block.Slot),
|
||||
"oldSlot": fmt.Sprintf("%d", s.headSlot()),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
}).Debug("Chain reorg occurred")
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Reorg,
|
||||
Data: &statefeed.ReorgData{
|
||||
NewSlot: newHeadBlock.Block.Slot,
|
||||
OldSlot: s.headSlot(),
|
||||
OldSlot: headSlot,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -131,7 +149,10 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
// This gets called to update canonical root mapping. It does not save head block
|
||||
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
|
||||
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, r [32]byte, hs *state.BeaconState) error {
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, r [32]byte, hs *stateTrie.BeaconState) error {
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return err
|
||||
}
|
||||
cachedHeadRoot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head root from cache")
|
||||
@@ -140,16 +161,12 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock,
|
||||
return nil
|
||||
}
|
||||
|
||||
if b == nil || b.Block == nil {
|
||||
return errors.New("cannot save nil head block")
|
||||
}
|
||||
|
||||
s.setHeadInitialSync(r, stateTrie.CopySignedBeaconBlock(b), hs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This sets head view object which is used to track the head slot, root, block and state.
|
||||
func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *state.BeaconState) {
|
||||
func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *stateTrie.BeaconState) {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
@@ -165,7 +182,7 @@ func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *
|
||||
// This sets head view object which is used to track the head slot, root, block and state. The method
|
||||
// assumes that state being passed into the method will not be modified by any other alternate
|
||||
// caller which holds the state's reference.
|
||||
func (s *Service) setHeadInitialSync(root [32]byte, block *ethpb.SignedBeaconBlock, state *state.BeaconState) {
|
||||
func (s *Service) setHeadInitialSync(root [32]byte, block *ethpb.SignedBeaconBlock, state *stateTrie.BeaconState) {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
@@ -179,91 +196,51 @@ func (s *Service) setHeadInitialSync(root [32]byte, block *ethpb.SignedBeaconBlo
|
||||
}
|
||||
|
||||
// This returns the head slot.
|
||||
func (s *Service) headSlot() uint64 {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
// This is a lock free version.
|
||||
func (s *Service) headSlot() types.Slot {
|
||||
return s.head.slot
|
||||
}
|
||||
|
||||
// This returns the head root.
|
||||
// It does a full copy on head root for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headRoot() [32]byte {
|
||||
if s.head == nil {
|
||||
return params.BeaconConfig().ZeroHash
|
||||
}
|
||||
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.head.root
|
||||
}
|
||||
|
||||
// This returns the head block.
|
||||
// It does a full copy on head block for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headBlock() *ethpb.SignedBeaconBlock {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return stateTrie.CopySignedBeaconBlock(s.head.block)
|
||||
}
|
||||
|
||||
// This returns the head state.
|
||||
// It does a full copy on head state for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headState(ctx context.Context) *stateTrie.BeaconState {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
defer span.End()
|
||||
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.head.state.Copy()
|
||||
}
|
||||
|
||||
// This returns the genesis validator root of the head state.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headGenesisValidatorRoot() [32]byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
|
||||
}
|
||||
|
||||
// Returns true if head state exists.
|
||||
// This is the lock free version.
|
||||
func (s *Service) hasHeadState() bool {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.head != nil && s.head.state != nil
|
||||
}
|
||||
|
||||
// This updates recent canonical block mapping. It uses input head root and retrieves
|
||||
// all the canonical block roots that are ancestor of the input head block root.
|
||||
func (s *Service) updateRecentCanonicalBlocks(ctx context.Context, headRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.updateRecentCanonicalBlocks")
|
||||
defer span.End()
|
||||
|
||||
s.recentCanonicalBlocksLock.Lock()
|
||||
defer s.recentCanonicalBlocksLock.Unlock()
|
||||
|
||||
s.recentCanonicalBlocks = make(map[[32]byte]bool)
|
||||
s.recentCanonicalBlocks[headRoot] = true
|
||||
nodes := s.forkChoiceStore.Nodes()
|
||||
node := s.forkChoiceStore.Node(headRoot)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for node.Parent() != protoarray.NonExistentNode {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
node = nodes[node.Parent()]
|
||||
s.recentCanonicalBlocks[node.Root()] = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This caches justified state balances to be used for fork choice.
|
||||
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
@@ -292,7 +269,7 @@ func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot
|
||||
epoch := helpers.CurrentEpoch(justifiedState)
|
||||
|
||||
justifiedBalances := make([]uint64, justifiedState.NumValidators())
|
||||
if err := justifiedState.ReadFromEveryValidator(func(idx int, val *stateTrie.ReadOnlyValidator) error {
|
||||
if err := justifiedState.ReadFromEveryValidator(func(idx int, val iface.ReadOnlyValidator) error {
|
||||
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
justifiedBalances[idx] = val.EffectiveBalance()
|
||||
} else {
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
@@ -14,21 +16,21 @@ import (
|
||||
)
|
||||
|
||||
func TestSaveHead_Same(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, db, sc)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
r := [32]byte{'A'}
|
||||
service.head = &head{slot: 0, root: r}
|
||||
|
||||
require.NoError(t, service.saveHead(context.Background(), r))
|
||||
assert.Equal(t, uint64(0), service.headSlot(), "Head did not stay the same")
|
||||
assert.Equal(t, types.Slot(0), service.headSlot(), "Head did not stay the same")
|
||||
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
|
||||
}
|
||||
|
||||
func TestSaveHead_Different(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, db, sc)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldRoot := [32]byte{'A'}
|
||||
service.head = &head{slot: 0, root: oldRoot}
|
||||
@@ -40,28 +42,27 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.beaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot))
|
||||
|
||||
assert.Equal(t, uint64(1), service.HeadSlot(), "Head did not change")
|
||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
cachedRoot, err := service.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
if !bytes.Equal(cachedRoot, newRoot[:]) {
|
||||
t.Error("Head did not change")
|
||||
}
|
||||
assert.DeepEqual(t, cachedRoot, newRoot[:], "Head did not change")
|
||||
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock(), "Head did not change")
|
||||
assert.DeepEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
}
|
||||
|
||||
func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
hook := logTest.NewGlobal()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, db, sc)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldRoot := [32]byte{'A'}
|
||||
service.head = &head{slot: 0, root: oldRoot}
|
||||
@@ -75,13 +76,14 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.beaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot))
|
||||
|
||||
assert.Equal(t, uint64(1), service.HeadSlot(), "Head did not change")
|
||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
cachedRoot, err := service.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -89,46 +91,13 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
t.Error("Head did not change")
|
||||
}
|
||||
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock(), "Head did not change")
|
||||
assert.DeepEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
require.LogsContain(t, hook, "Chain reorg occurred")
|
||||
}
|
||||
|
||||
func TestUpdateRecentCanonicalBlocks_CanUpdateWithoutParent(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, db, sc)
|
||||
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, service.updateRecentCanonicalBlocks(context.Background(), r))
|
||||
canonical, err := service.IsCanonical(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, canonical, "Block should be canonical")
|
||||
}
|
||||
|
||||
func TestUpdateRecentCanonicalBlocks_CanUpdateWithParent(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, db, sc)
|
||||
oldHead := [32]byte{'a'}
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 1, oldHead, [32]byte{'g'}, [32]byte{}, 0, 0))
|
||||
currentHead := [32]byte{'b'}
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 3, currentHead, oldHead, [32]byte{}, 0, 0))
|
||||
forkedRoot := [32]byte{'c'}
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 2, forkedRoot, oldHead, [32]byte{}, 0, 0))
|
||||
|
||||
require.NoError(t, service.updateRecentCanonicalBlocks(context.Background(), currentHead))
|
||||
canonical, err := service.IsCanonical(context.Background(), currentHead)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, canonical, "Block should be canonical")
|
||||
canonical, err = service.IsCanonical(context.Background(), oldHead)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, canonical, "Block should be canonical")
|
||||
canonical, err = service.IsCanonical(context.Background(), forkedRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, canonical, "Block should not be canonical")
|
||||
}
|
||||
|
||||
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, db, sc)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
state, _ := testutil.DeterministicGenesisState(t, 100)
|
||||
r := [32]byte{'a'}
|
||||
@@ -137,3 +106,19 @@ func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
|
||||
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
|
||||
}
|
||||
|
||||
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), b))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{}
|
||||
|
||||
require.NoError(t, service.updateHead(context.Background(), []uint64{}))
|
||||
}
|
||||
|
||||
@@ -34,7 +34,12 @@ const template = `<html>
|
||||
|
||||
// TreeHandler is a handler to serve /tree page in metrics.
|
||||
func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if s.headState(r.Context()) == nil {
|
||||
headState, err := s.HeadState(r.Context())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head state")
|
||||
return
|
||||
}
|
||||
if headState == nil {
|
||||
if _, err := w.Write([]byte("Unavailable during initial syncing")); err != nil {
|
||||
log.WithError(err).Error("Failed to render p2p info page")
|
||||
}
|
||||
@@ -47,7 +52,7 @@ func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
graph.Attr("labeljust", "l")
|
||||
|
||||
dotNodes := make([]*dot.Node, len(nodes))
|
||||
avgBalance := uint64(averageBalance(s.headState(r.Context()).Balances()))
|
||||
avgBalance := uint64(averageBalance(headState.Balances()))
|
||||
|
||||
for i := len(nodes) - 1; i >= 0; i-- {
|
||||
// Construct label for each node.
|
||||
@@ -63,7 +68,7 @@ func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
dotN = graph.Node(index).Box().Attr("label", label)
|
||||
}
|
||||
|
||||
if nodes[i].Slot() == s.headSlot() &&
|
||||
if nodes[i].Slot() == s.HeadSlot() &&
|
||||
nodes[i].BestDescendant() == ^uint64(0) &&
|
||||
nodes[i].Parent() != ^uint64(0) {
|
||||
dotN = dotN.Attr("color", "green")
|
||||
|
||||
@@ -20,19 +20,21 @@ func TestService_TreeHandler(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
db, sCache := testDB.SetupDB(t)
|
||||
headState := testutil.NewBeaconState()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
[32]byte{'a'},
|
||||
),
|
||||
StateGen: stategen.New(db, sCache),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.forkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
|
||||
require.NoError(t, s.forkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
|
||||
s.setHead([32]byte{'a'}, testutil.NewBeaconBlock(), headState)
|
||||
|
||||
@@ -3,10 +3,12 @@ package blockchain
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -14,22 +16,42 @@ var log = logrus.WithField("prefix", "blockchain")
|
||||
|
||||
// logs state transition related data every slot.
|
||||
func logStateTransitionData(b *ethpb.BeaconBlock) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestations": len(b.Body.Attestations),
|
||||
"deposits": len(b.Body.Deposits),
|
||||
"attesterSlashings": len(b.Body.AttesterSlashings),
|
||||
"proposerSlashings": len(b.Body.ProposerSlashings),
|
||||
"voluntaryExits": len(b.Body.VoluntaryExits),
|
||||
}).Info("Finished applying state transition")
|
||||
log := log.WithField("slot", b.Slot)
|
||||
if len(b.Body.Attestations) > 0 {
|
||||
log = log.WithField("attestations", len(b.Body.Attestations))
|
||||
}
|
||||
if len(b.Body.Deposits) > 0 {
|
||||
log = log.WithField("deposits", len(b.Body.Deposits))
|
||||
}
|
||||
if len(b.Body.AttesterSlashings) > 0 {
|
||||
log = log.WithField("attesterSlashings", len(b.Body.AttesterSlashings))
|
||||
}
|
||||
if len(b.Body.ProposerSlashings) > 0 {
|
||||
log = log.WithField("proposerSlashings", len(b.Body.ProposerSlashings))
|
||||
}
|
||||
if len(b.Body.VoluntaryExits) > 0 {
|
||||
log = log.WithField("voluntaryExits", len(b.Body.VoluntaryExits))
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
}
|
||||
|
||||
func logBlockSyncStatus(block *ethpb.BeaconBlock, blockRoot [32]byte, finalized *ethpb.Checkpoint) {
|
||||
func logBlockSyncStatus(block *ethpb.BeaconBlock, blockRoot [32]byte, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
|
||||
startTime, err := helpers.SlotToTime(genesisTime, block.Slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot,
|
||||
"slotInEpoch": block.Slot % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": helpers.SlotToEpoch(block.Slot),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root[:])[:8]),
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
}).Info("Synced new block")
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot,
|
||||
"sinceSlotStartTime": timeutils.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": timeutils.Now().Sub(receivedTime),
|
||||
}).Debug("Sync new block times")
|
||||
return nil
|
||||
}
|
||||
|
||||
63
beacon-chain/blockchain/log_test.go
Normal file
63
beacon-chain/blockchain/log_test.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_logStateTransitionData(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
b *ethpb.BeaconBlock
|
||||
want string
|
||||
}{
|
||||
{name: "empty block body",
|
||||
b: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}},
|
||||
want: "\"Finished applying state transition\" prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has attestation",
|
||||
b: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{Attestations: []*ethpb.Attestation{{}}}},
|
||||
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has deposit",
|
||||
b: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
Attestations: []*ethpb.Attestation{{}},
|
||||
Deposits: []*ethpb.Deposit{{}}}},
|
||||
want: "\"Finished applying state transition\" attestations=1 deposits=1 prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has attester slashing",
|
||||
b: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{{}}}},
|
||||
want: "\"Finished applying state transition\" attesterSlashings=1 prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has proposer slashing",
|
||||
b: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{{}}}},
|
||||
want: "\"Finished applying state transition\" prefix=blockchain proposerSlashings=1 slot=0",
|
||||
},
|
||||
{name: "has exit",
|
||||
b: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}},
|
||||
want: "\"Finished applying state transition\" prefix=blockchain slot=0 voluntaryExits=1",
|
||||
},
|
||||
{name: "has everything",
|
||||
b: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
Attestations: []*ethpb.Attestation{{}},
|
||||
Deposits: []*ethpb.Deposit{{}},
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{{}},
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{{}},
|
||||
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}},
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
hook := logTest.NewGlobal()
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
logStateTransitionData(tt.b)
|
||||
require.LogsContain(t, hook, tt.want)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,17 +1,16 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"time"
|
||||
"context"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/roughtime"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -100,13 +99,6 @@ var (
|
||||
Name: "beacon_reorg_total",
|
||||
Help: "Count the number of times beacon chain has a reorg",
|
||||
})
|
||||
sentBlockPropagationHistogram = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "block_sent_latency_milliseconds",
|
||||
Help: "Captures blocks broadcast time. Blocks sent in milliseconds distribution",
|
||||
Buckets: []float64{1000, 2000, 3000, 4000, 5000, 6000},
|
||||
},
|
||||
)
|
||||
attestationInclusionDelay = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "attestation_inclusion_delay_slots",
|
||||
@@ -114,19 +106,10 @@ var (
|
||||
Buckets: []float64{1, 2, 3, 4, 6, 32, 64},
|
||||
},
|
||||
)
|
||||
// TODO(7141): Remove deprecated metrics below.
|
||||
totalEligibleBalances = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_eligible_balances",
|
||||
Help: "The total amount of ether, in gwei, that is eligible for voting of previous epoch",
|
||||
})
|
||||
totalVotedTargetBalances = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_voted_target_balances",
|
||||
Help: "The total amount of ether, in gwei, that has been used in voting attestation target of previous epoch",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
func reportSlotMetrics(stateSlot uint64, headSlot uint64, clockSlot uint64, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
func reportSlotMetrics(stateSlot, headSlot, clockSlot types.Slot, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
clockTimeSlot.Set(float64(clockSlot))
|
||||
beaconSlot.Set(float64(stateSlot))
|
||||
beaconHeadSlot.Set(float64(headSlot))
|
||||
@@ -137,8 +120,8 @@ func reportSlotMetrics(stateSlot uint64, headSlot uint64, clockSlot uint64, fina
|
||||
}
|
||||
|
||||
// reportEpochMetrics reports epoch related metrics.
|
||||
func reportEpochMetrics(state *stateTrie.BeaconState) {
|
||||
currentEpoch := state.Slot() / params.BeaconConfig().SlotsPerEpoch
|
||||
func reportEpochMetrics(ctx context.Context, postState, headState *stateTrie.BeaconState) error {
|
||||
currentEpoch := types.Epoch(postState.Slot() / params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
// Validator instances
|
||||
pendingInstances := 0
|
||||
@@ -156,8 +139,8 @@ func reportEpochMetrics(state *stateTrie.BeaconState) {
|
||||
slashingBalance := uint64(0)
|
||||
slashingEffectiveBalance := uint64(0)
|
||||
|
||||
for i, validator := range state.Validators() {
|
||||
bal, err := state.BalanceAtIndex(uint64(i))
|
||||
for i, validator := range postState.Validators() {
|
||||
bal, err := postState.BalanceAtIndex(types.ValidatorIndex(i))
|
||||
if err != nil {
|
||||
log.Errorf("Could not load validator balance: %v", err)
|
||||
continue
|
||||
@@ -191,6 +174,10 @@ func reportEpochMetrics(state *stateTrie.BeaconState) {
|
||||
activeBalance += bal
|
||||
activeEffectiveBalance += validator.EffectiveBalance
|
||||
}
|
||||
activeInstances += exitingInstances + slashingInstances
|
||||
activeBalance += exitingBalance + slashingBalance
|
||||
activeEffectiveBalance += exitingEffectiveBalance + slashingEffectiveBalance
|
||||
|
||||
validatorsCount.WithLabelValues("Pending").Set(float64(pendingInstances))
|
||||
validatorsCount.WithLabelValues("Active").Set(float64(activeInstances))
|
||||
validatorsCount.WithLabelValues("Exiting").Set(float64(exitingInstances))
|
||||
@@ -206,41 +193,36 @@ func reportEpochMetrics(state *stateTrie.BeaconState) {
|
||||
validatorsEffectiveBalance.WithLabelValues("Slashing").Set(float64(slashingEffectiveBalance))
|
||||
|
||||
// Last justified slot
|
||||
beaconCurrentJustifiedEpoch.Set(float64(state.CurrentJustifiedCheckpoint().Epoch))
|
||||
beaconCurrentJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.CurrentJustifiedCheckpoint().Root)))
|
||||
beaconCurrentJustifiedEpoch.Set(float64(postState.CurrentJustifiedCheckpoint().Epoch))
|
||||
beaconCurrentJustifiedRoot.Set(float64(bytesutil.ToLowInt64(postState.CurrentJustifiedCheckpoint().Root)))
|
||||
|
||||
// Last previous justified slot
|
||||
beaconPrevJustifiedEpoch.Set(float64(state.PreviousJustifiedCheckpoint().Epoch))
|
||||
beaconPrevJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.PreviousJustifiedCheckpoint().Root)))
|
||||
beaconPrevJustifiedEpoch.Set(float64(postState.PreviousJustifiedCheckpoint().Epoch))
|
||||
beaconPrevJustifiedRoot.Set(float64(bytesutil.ToLowInt64(postState.PreviousJustifiedCheckpoint().Root)))
|
||||
|
||||
// Last finalized slot
|
||||
beaconFinalizedEpoch.Set(float64(state.FinalizedCheckpointEpoch()))
|
||||
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(state.FinalizedCheckpoint().Root)))
|
||||
beaconFinalizedEpoch.Set(float64(postState.FinalizedCheckpointEpoch()))
|
||||
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(postState.FinalizedCheckpoint().Root)))
|
||||
currentEth1DataDepositCount.Set(float64(postState.Eth1Data().DepositCount))
|
||||
|
||||
currentEth1DataDepositCount.Set(float64(state.Eth1Data().DepositCount))
|
||||
|
||||
if precompute.Balances != nil {
|
||||
totalEligibleBalances.Set(float64(precompute.Balances.ActivePrevEpoch))
|
||||
totalVotedTargetBalances.Set(float64(precompute.Balances.PrevEpochTargetAttested))
|
||||
prevEpochActiveBalances.Set(float64(precompute.Balances.ActivePrevEpoch))
|
||||
prevEpochSourceBalances.Set(float64(precompute.Balances.PrevEpochAttested))
|
||||
prevEpochTargetBalances.Set(float64(precompute.Balances.PrevEpochTargetAttested))
|
||||
prevEpochHeadBalances.Set(float64(precompute.Balances.PrevEpochHeadAttested))
|
||||
}
|
||||
refMap := state.FieldReferencesCount()
|
||||
for name, val := range refMap {
|
||||
stateTrieReferences.WithLabelValues(name).Set(float64(val))
|
||||
}
|
||||
}
|
||||
|
||||
// This captures metrics for block sent time by subtracts slot start time.
|
||||
func captureSentTimeMetric(genesisTime uint64, currentSlot uint64) error {
|
||||
startTime, err := helpers.SlotToTime(genesisTime, currentSlot)
|
||||
// Validator participation should be viewed on the canonical chain.
|
||||
v, b, err := precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
diffMs := roughtime.Now().Sub(startTime) / time.Millisecond
|
||||
sentBlockPropagationHistogram.Observe(float64(diffMs))
|
||||
_, b, err = precompute.ProcessAttestations(ctx, headState, v, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
|
||||
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))
|
||||
prevEpochTargetBalances.Set(float64(b.PrevEpochTargetAttested))
|
||||
prevEpochHeadBalances.Set(float64(b.PrevEpochHeadAttested))
|
||||
|
||||
refMap := postState.FieldReferencesCount()
|
||||
for name, val := range refMap {
|
||||
stateTrieReferences.WithLabelValues(name).Set(float64(val))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
42
beacon-chain/blockchain/metrics_test.go
Normal file
42
beacon-chain/blockchain/metrics_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
h, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.SetValidators(nil))
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "failed to initialize precompute: nil validators in state", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
h, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.SetCurrentEpochAttestations([]*pb.PendingAttestation{{InclusionDelay: 0}}))
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "attestation with inclusion delay of 0", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_SlashedValidatorOutOfBound(t *testing.T) {
|
||||
h, _ := testutil.DeterministicGenesisState(t, 1)
|
||||
v, err := h.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
v.Slashed = true
|
||||
require.NoError(t, h.UpdateValidatorAtIndex(0, v))
|
||||
require.NoError(t, h.SetCurrentEpochAttestations([]*pb.PendingAttestation{{InclusionDelay: 1, Data: testutil.HydrateAttestationData(ð.AttestationData{})}}))
|
||||
err = reportEpochMetrics(context.Background(), h, h)
|
||||
require.ErrorContains(t, "slot 0 out of bounds", err)
|
||||
}
|
||||
@@ -2,28 +2,21 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/roughtime"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ErrTargetRootNotInDB returns when the target block root of an attestation cannot be found in the
|
||||
// beacon database.
|
||||
var ErrTargetRootNotInDB = errors.New("target root does not exist in db")
|
||||
|
||||
// onAttestation is called whenever an attestation is received, verifies the attestation is valid and saves
|
||||
// it to the DB. As a stateless function, this does not hold nor delay attestation based on the spec descriptions.
|
||||
// The delay is handled by the caller in `processAttestation`.
|
||||
// The delay is handled by the caller in `processAttestations`.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def on_attestation(store: Store, attestation: Attestation) -> None:
|
||||
@@ -43,120 +36,68 @@ var ErrTargetRootNotInDB = errors.New("target root does not exist in db")
|
||||
//
|
||||
// # Update latest messages for attesting indices
|
||||
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
|
||||
// TODO(#6072): This code path is highly untested. Requires comprehensive tests and simpler refactoring.
|
||||
func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) ([]uint64, error) {
|
||||
func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
|
||||
defer span.End()
|
||||
|
||||
if a == nil {
|
||||
return nil, errors.New("nil attestation")
|
||||
if err := helpers.ValidateNilAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, errors.New("nil attestation.Data field")
|
||||
if err := helpers.ValidateSlotTargetEpoch(a.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
if a.Data.Target == nil {
|
||||
return nil, errors.New("nil attestation.Data.Target field")
|
||||
}
|
||||
|
||||
tgt := stateTrie.CopyCheckpoint(a.Data.Target)
|
||||
|
||||
if helpers.SlotToEpoch(a.Data.Slot) != a.Data.Target.Epoch {
|
||||
return nil, fmt.Errorf("data slot is not in the same epoch as target %d != %d", helpers.SlotToEpoch(a.Data.Slot), a.Data.Target.Epoch)
|
||||
}
|
||||
|
||||
// Verify beacon node has seen the target block before.
|
||||
if !s.hasBlock(ctx, bytesutil.ToBytes32(tgt.Root)) {
|
||||
return nil, ErrTargetRootNotInDB
|
||||
}
|
||||
|
||||
if featureconfig.Get().UseCheckPointInfoCache {
|
||||
c, err := s.AttestationCheckPtInfo(ctx, a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.verifyAttTargetEpoch(ctx, uint64(s.genesisTime.Unix()), uint64(roughtime.Now().Unix()), tgt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.verifyBeaconBlock(ctx, a.Data); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
if err := s.verifyLMDFFGConsistent(ctx, a.Data.Target.Epoch, a.Data.Target.Root, a.Data.BeaconBlockRoot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
if err := helpers.VerifySlotTime(uint64(s.genesisTime.Unix()), a.Data.Slot+1, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committee, err := helpers.BeaconCommittee(c.ActiveIndices, bytesutil.ToBytes32(c.Seed), a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexedAtt := attestationutil.ConvertToIndexed(ctx, a, committee)
|
||||
if err := attestationutil.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
domain, err := helpers.Domain(c.Fork, indexedAtt.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester, c.GenesisRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices := indexedAtt.AttestingIndices
|
||||
pubkeys := []bls.PublicKey{}
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := c.PubKeys[indices[i]]
|
||||
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubkeys = append(pubkeys, pk)
|
||||
}
|
||||
if err := attestationutil.VerifyIndexedAttestationSig(ctx, indexedAtt, pubkeys, domain); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.forkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
|
||||
return indexedAtt.AttestingIndices, nil
|
||||
}
|
||||
// Note that target root check is ignored here because it was performed in sync's validation pipeline:
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
// If missing target root were to fail in this method, it would have just failed in `getAttPreState`.
|
||||
|
||||
// Retrieve attestation's data beacon block pre state. Advance pre state to latest epoch if necessary and
|
||||
// save it to the cache.
|
||||
baseState, err := s.getAttPreState(ctx, tgt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
genesisTime := baseState.GenesisTime()
|
||||
|
||||
// Verify attestation target is from current epoch or previous epoch.
|
||||
if err := s.verifyAttTargetEpoch(ctx, genesisTime, uint64(roughtime.Now().Unix()), tgt); err != nil {
|
||||
return nil, err
|
||||
if err := s.verifyAttTargetEpoch(ctx, genesisTime, uint64(timeutils.Now().Unix()), tgt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify attestation beacon block is known and not from the future.
|
||||
if err := s.verifyBeaconBlock(ctx, a.Data); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attestation beacon block")
|
||||
return errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
|
||||
// Verify LMG GHOST and FFG votes are consistent with each other.
|
||||
if err := s.verifyLMDFFGConsistent(ctx, a.Data.Target.Epoch, a.Data.Target.Root, a.Data.BeaconBlockRoot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
// Note that LMG GHOST and FFG consistency check is ignored because it was performed in sync's validation pipeline:
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := helpers.VerifySlotTime(genesisTime, a.Data.Slot+1, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Use the target state to validate attestation and calculate the committees.
|
||||
indexedAtt, err := s.verifyAttestation(ctx, baseState, a)
|
||||
// Use the target state to verify attesting indices are valid.
|
||||
committee, err := helpers.BeaconCommitteeFromState(baseState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
indexedAtt, err := attestationutil.ConvertToIndexed(ctx, a, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := attestationutil.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if indexedAtt.AttestingIndices == nil {
|
||||
return nil, errors.New("nil attesting indices")
|
||||
}
|
||||
// Note that signature verification is ignored here because it was performed in sync's validation pipeline:
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
// We assume trusted attestation in this function has verified signature.
|
||||
|
||||
// Update forkchoice store with the new attestation for updating weight.
|
||||
s.forkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
|
||||
return indexedAtt.AttestingIndices, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,28 +1,31 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/mputil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// getAttPreState retrieves the att pre state by either from the cache or the DB.
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
|
||||
s.checkpointStateLock.Lock()
|
||||
defer s.checkpointStateLock.Unlock()
|
||||
|
||||
cachedState, err := s.checkpointState.StateByCheckpoint(c)
|
||||
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
|
||||
// allowing us to behave smarter in terms of how this function is used concurrently.
|
||||
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
|
||||
lock := mputil.NewMultilock(string(c.Root) + epochKey)
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
cachedState, err := s.checkpointStateCache.StateByCheckpoint(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get cached checkpoint state")
|
||||
}
|
||||
@@ -40,23 +43,31 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta
|
||||
return nil, err
|
||||
}
|
||||
if epochStartSlot > baseState.Slot() {
|
||||
baseState = baseState.Copy()
|
||||
baseState, err = state.ProcessSlots(ctx, baseState, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
|
||||
if featureconfig.Get().EnableNextSlotStateCache {
|
||||
baseState, err = state.ProcessSlotsUsingNextSlotCache(ctx, baseState, c.Root, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
|
||||
}
|
||||
} else {
|
||||
baseState, err = state.ProcessSlots(ctx, baseState, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
|
||||
}
|
||||
}
|
||||
if err := s.checkpointState.AddCheckpointState(c, baseState); err != nil {
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
return baseState, nil
|
||||
}
|
||||
|
||||
has, err := s.stateGen.HasState(ctx, bytesutil.ToBytes32(c.Root))
|
||||
// To avoid sharing the same state across checkpoint state cache and hot state cache,
|
||||
// we don't add the state to check point cache.
|
||||
has, err := s.stateGen.HasStateInCache(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !has {
|
||||
if err := s.checkpointState.AddCheckpointState(c, baseState); err != nil {
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
}
|
||||
@@ -64,72 +75,11 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta
|
||||
|
||||
}
|
||||
|
||||
// getAttCheckPtInfo retrieves the check point info given a check point. Check point info enables the node
|
||||
// to efficiently verify attestation signature without using beacon state. This function utilizes
|
||||
// the checkpoint info cache and will update the check point info cache on miss.
|
||||
func (s *Service) getAttCheckPtInfo(ctx context.Context, c *ethpb.Checkpoint, e uint64) (*pb.CheckPtInfo, error) {
|
||||
// Return checkpoint info if exists in cache.
|
||||
info, err := s.checkPtInfoCache.get(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get cached checkpoint state")
|
||||
}
|
||||
if info != nil {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Retrieve checkpoint state to compute checkpoint info.
|
||||
baseState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
|
||||
}
|
||||
epochStartSlot, err := helpers.StartSlot(c.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if epochStartSlot > baseState.Slot() {
|
||||
baseState = baseState.Copy()
|
||||
baseState, err = state.ProcessSlots(ctx, baseState, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
|
||||
}
|
||||
}
|
||||
f := baseState.Fork()
|
||||
g := bytesutil.ToBytes32(baseState.GenesisValidatorRoot())
|
||||
seed, err := helpers.Seed(baseState, e, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := helpers.ActiveValidatorIndices(baseState, e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validators := baseState.ValidatorsReadOnly()
|
||||
pks := make([][]byte, len(validators))
|
||||
for i := 0; i < len(pks); i++ {
|
||||
pk := validators[i].PublicKey()
|
||||
pks[i] = pk[:]
|
||||
}
|
||||
|
||||
// Cache and return the checkpoint info.
|
||||
info = &pb.CheckPtInfo{
|
||||
Fork: f,
|
||||
GenesisRoot: g[:],
|
||||
Seed: seed[:],
|
||||
ActiveIndices: indices,
|
||||
PubKeys: pks,
|
||||
}
|
||||
if err := s.checkPtInfoCache.put(c, info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
func (s *Service) verifyAttTargetEpoch(ctx context.Context, genesisTime uint64, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := (nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
func (s *Service) verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := types.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
|
||||
currentEpoch := helpers.SlotToEpoch(currentSlot)
|
||||
var prevEpoch uint64
|
||||
var prevEpoch types.Epoch
|
||||
// Prevents previous epoch under flow
|
||||
if currentEpoch > 1 {
|
||||
prevEpoch = currentEpoch - 1
|
||||
@@ -152,41 +102,11 @@ func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.Attestation
|
||||
if b == nil && s.hasInitSyncBlock(r) {
|
||||
b = s.getInitSyncBlock(r)
|
||||
}
|
||||
if b == nil || b.Block == nil {
|
||||
return fmt.Errorf("beacon block %#x does not exist", bytesutil.Trunc(data.BeaconBlockRoot))
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return err
|
||||
}
|
||||
if b.Block.Slot > data.Slot {
|
||||
return fmt.Errorf("could not process attestation for future block, block.Slot=%d > attestation.Data.Slot=%d", b.Block.Slot, data.Slot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyLMDFFGConsistent verifies LMD GHOST and FFG votes are consistent with each other.
|
||||
func (s *Service) verifyLMDFFGConsistent(ctx context.Context, ffgEpoch uint64, ffgRoot []byte, lmdRoot []byte) error {
|
||||
ffgSlot, err := helpers.StartSlot(ffgEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r, err := s.ancestor(ctx, lmdRoot, ffgSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(ffgRoot, r) {
|
||||
return errors.New("FFG and LMD votes are not consistent")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyAttestation validates input attestation is valid.
|
||||
func (s *Service) verifyAttestation(ctx context.Context, baseState *stateTrie.BeaconState, a *ethpb.Attestation) (*ethpb.IndexedAttestation, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(baseState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexedAtt := attestationutil.ConvertToIndexed(ctx, a, committee)
|
||||
if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify indexed attestation")
|
||||
}
|
||||
return indexedAtt, nil
|
||||
}
|
||||
|
||||
@@ -2,12 +2,10 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
@@ -15,51 +13,53 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
)
|
||||
|
||||
func TestStore_OnAttestation(t *testing.T) {
|
||||
func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(db, sc),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = blockTree1(db, []byte{'g'})
|
||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithOutState := testutil.NewBeaconBlock()
|
||||
BlkWithOutState.Block.Slot = 0
|
||||
require.NoError(t, db.SaveBlock(ctx, BlkWithOutState))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, BlkWithOutState))
|
||||
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithStateBadAtt := testutil.NewBeaconBlock()
|
||||
BlkWithStateBadAtt.Block.Slot = 1
|
||||
require.NoError(t, db.SaveBlock(ctx, BlkWithStateBadAtt))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, BlkWithStateBadAtt))
|
||||
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
s := testutil.NewBeaconState()
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
||||
|
||||
BlkWithValidState := testutil.NewBeaconBlock()
|
||||
BlkWithValidState.Block.Slot = 2
|
||||
require.NoError(t, db.SaveBlock(ctx, BlkWithValidState))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, BlkWithValidState))
|
||||
|
||||
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s = testutil.NewBeaconState()
|
||||
s, err = testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = s.SetFork(&pb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
@@ -69,47 +69,35 @@ func TestStore_OnAttestation(t *testing.T) {
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
wantErr bool
|
||||
wantErrString string
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "attestation's data slot not aligned with target vote",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}},
|
||||
wantErr: true,
|
||||
wantErrString: "data slot is not in the same epoch as target 1 != 0",
|
||||
name: "attestation's data slot not aligned with target vote",
|
||||
a: testutil.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||
wantedErr: "slot 32 does not match target epoch 0",
|
||||
},
|
||||
{
|
||||
name: "attestation's target root not in db",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)}}},
|
||||
wantErr: true,
|
||||
wantErrString: "target root does not exist in db",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}},
|
||||
wantErr: true,
|
||||
wantErrString: "could not get pre state for epoch 0",
|
||||
name: "no pre state for attestations's target block",
|
||||
a: testutil.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
||||
wantedErr: "could not get pre state for epoch 0",
|
||||
},
|
||||
{
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}},
|
||||
wantErr: true,
|
||||
wantErrString: "target epoch 100 does not match current epoch",
|
||||
a: testutil.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}}),
|
||||
wantedErr: "target epoch 100 does not match current epoch",
|
||||
},
|
||||
{
|
||||
name: "process nil attestation",
|
||||
a: nil,
|
||||
wantErr: true,
|
||||
wantErrString: "nil attestation",
|
||||
name: "process nil attestation",
|
||||
a: nil,
|
||||
wantedErr: "attestation can't be nil",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Data) in attestation",
|
||||
a: ðpb.Attestation{},
|
||||
wantErr: true,
|
||||
wantErrString: "nil attestation.Data field",
|
||||
name: "process nil field (a.Data) in attestation",
|
||||
a: ðpb.Attestation{},
|
||||
wantedErr: "attestation's data can't be nil",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Target) in attestation",
|
||||
@@ -122,159 +110,60 @@ func TestStore_OnAttestation(t *testing.T) {
|
||||
AggregationBits: make([]byte, 1),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
wantErr: true,
|
||||
wantErrString: "nil attestation.Data.Target field",
|
||||
wantedErr: "attestation's target can't be nil",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := service.onAttestation(ctx, tt.a)
|
||||
if tt.wantErr {
|
||||
if err == nil || !strings.Contains(err.Error(), tt.wantErrString) {
|
||||
t.Errorf("Store.onAttestation() error = %v, wantErr = %v", err, tt.wantErrString)
|
||||
}
|
||||
err := service.onAttestation(ctx, tt.a)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
t.Error(err)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_OnAttestationUsingCheckptCache(t *testing.T) {
|
||||
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{UseCheckPointInfoCache: true})
|
||||
defer resetCfg()
|
||||
|
||||
func TestStore_OnAttestation_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(db, sc),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = blockTree1(db, []byte{'g'})
|
||||
genesisState, pks := testutil.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(timeutils.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := testutil.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithOutState := testutil.NewBeaconBlock()
|
||||
BlkWithOutState.Block.Slot = 0
|
||||
require.NoError(t, db.SaveBlock(ctx, BlkWithOutState))
|
||||
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
|
||||
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = state.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithStateBadAtt := testutil.NewBeaconBlock()
|
||||
BlkWithStateBadAtt.Block.Slot = 1
|
||||
require.NoError(t, db.SaveBlock(ctx, BlkWithStateBadAtt))
|
||||
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
s := testutil.NewBeaconState()
|
||||
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
||||
|
||||
BlkWithValidState := testutil.NewBeaconBlock()
|
||||
BlkWithValidState.Block.Slot = 2
|
||||
require.NoError(t, db.SaveBlock(ctx, BlkWithValidState))
|
||||
|
||||
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s = testutil.NewBeaconState()
|
||||
err = s.SetFork(&pb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
wantErr bool
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "attestation's data slot not aligned with target vote",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}},
|
||||
wantErr: true,
|
||||
wantErrString: "data slot is not in the same epoch as target 1 != 0",
|
||||
},
|
||||
{
|
||||
name: "attestation's target root not in db",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)}}},
|
||||
wantErr: true,
|
||||
wantErrString: "target root does not exist in db",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}},
|
||||
wantErr: true,
|
||||
wantErrString: "could not get pre state for epoch 0",
|
||||
},
|
||||
{
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}},
|
||||
wantErr: true,
|
||||
wantErrString: "target epoch 100 does not match current epoch",
|
||||
},
|
||||
{
|
||||
name: "process nil attestation",
|
||||
a: nil,
|
||||
wantErr: true,
|
||||
wantErrString: "nil attestation",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Data) in attestation",
|
||||
a: ðpb.Attestation{},
|
||||
wantErr: true,
|
||||
wantErrString: "nil attestation.Data field",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Target) in attestation",
|
||||
a: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Target: nil,
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: make([]byte, 1),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
wantErr: true,
|
||||
wantErrString: "nil attestation.Data.Target field",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := service.onAttestation(ctx, tt.a)
|
||||
if tt.wantErr {
|
||||
if err == nil || !strings.Contains(err.Error(), tt.wantErrString) {
|
||||
t.Errorf("Store.onAttestation() error = %v, wantErr = %v", err, tt.wantErrString)
|
||||
}
|
||||
} else {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.onAttestation(ctx, att[0]))
|
||||
}
|
||||
|
||||
func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
StateGen: stategen.New(db, sc),
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := testutil.NewBeaconState()
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = s.SetFinalizedCheckpoint(ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)})
|
||||
require.NoError(t, err)
|
||||
val := ðpb.Validator{
|
||||
@@ -313,11 +202,11 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
|
||||
|
||||
s1, err = service.checkpointState.StateByCheckpoint(cp1)
|
||||
s1, err = service.checkpointStateCache.StateByCheckpoint(cp1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
|
||||
|
||||
s2, err = service.checkpointState.StateByCheckpoint(cp2)
|
||||
s2, err = service.checkpointStateCache.StateByCheckpoint(cp2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
|
||||
@@ -336,29 +225,28 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
|
||||
func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
StateGen: stategen.New(db, sc),
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
epoch := uint64(1)
|
||||
epoch := types.Epoch(1)
|
||||
baseState, _ := testutil.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, baseState.SetSlot(epoch*params.BeaconConfig().SlotsPerEpoch))
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), 32)}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, returned.Slot(), baseState.Slot(), "Incorrectly returned base state")
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
|
||||
|
||||
cached, err := service.checkpointState.StateByCheckpoint(checkpoint)
|
||||
cached, err := service.checkpointStateCache.StateByCheckpoint(checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, cached, "State should have been cached")
|
||||
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
|
||||
|
||||
epoch = uint64(2)
|
||||
epoch = 2
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), 32)}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||
@@ -369,71 +257,65 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, returned.Slot(), baseState.Slot(), "Incorrectly returned base state")
|
||||
|
||||
cached, err = service.checkpointState.StateByCheckpoint(newCheckpoint)
|
||||
cached, err = service.checkpointStateCache.StateByCheckpoint(newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(returned.InnerStateUnsafe(), cached.InnerStateUnsafe()) {
|
||||
t.Error("Incorrectly cached base state")
|
||||
}
|
||||
require.DeepSSZEqual(t, returned.InnerStateUnsafe(), cached.InnerStateUnsafe())
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)}))
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
}
|
||||
|
||||
func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := 2 * params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot
|
||||
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
err = service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
assert.ErrorContains(t, "target epoch 0 does not match current epoch 2 or prev epoch 1", err)
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
d := ðpb.AttestationData{
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
}
|
||||
assert.ErrorContains(t, "beacon block 0x000000000000 does not exist", service.verifyBeaconBlock(ctx, d))
|
||||
d := testutil.HydrateAttestationData(ðpb.AttestationData{})
|
||||
assert.ErrorContains(t, "signed beacon block can't be nil", service.verifyBeaconBlock(ctx, d))
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -449,9 +331,9 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
|
||||
func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -465,11 +347,11 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -478,6 +360,9 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1}
|
||||
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
@@ -485,15 +370,15 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := "FFG and LMD votes are not consistent"
|
||||
assert.ErrorContains(t, wanted, service.verifyLMDFFGConsistent(context.Background(), 1, []byte{'a'}, r33[:]))
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.ErrorContains(t, "Root and finalized store are not consistent", err)
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -502,6 +387,9 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1, Root: r32[:]}
|
||||
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
@@ -509,50 +397,36 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.verifyLMDFFGConsistent(context.Background(), 1, r32[:], r33[:])
|
||||
assert.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetAttCheckptInfo(t *testing.T) {
|
||||
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
cfg := &Config{BeaconDB: db, StateGen: stategen.New(db, cache.NewStateSummaryCache())}
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
baseState, _ := testutil.DeterministicGenesisState(t, 128)
|
||||
b := testutil.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, r))
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b))
|
||||
require.NoError(t, service.beaconDB.SaveGenesisBlockRoot(ctx, r))
|
||||
checkpoint := ðpb.Checkpoint{Root: r[:]}
|
||||
|
||||
returned, err := service.getAttCheckPtInfo(ctx, checkpoint, 0)
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
seed, err := helpers.Seed(baseState, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(t, err)
|
||||
indices, err := helpers.ActiveValidatorIndices(baseState, 0)
|
||||
require.NoError(t, err)
|
||||
validators := baseState.ValidatorsReadOnly()
|
||||
pks := make([][]byte, len(validators))
|
||||
for i := 0; i < len(pks); i++ {
|
||||
pk := validators[i].PublicKey()
|
||||
pks[i] = pk[:]
|
||||
}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1, Root: r32[:]}
|
||||
|
||||
wanted := &pb.CheckPtInfo{
|
||||
Fork: baseState.Fork(),
|
||||
GenesisRoot: baseState.GenesisValidatorRoot(),
|
||||
Seed: seed[:],
|
||||
ActiveIndices: indices,
|
||||
PubKeys: pks,
|
||||
}
|
||||
require.DeepEqual(t, wanted, returned)
|
||||
|
||||
cached, err := service.checkPtInfoCache.get(checkpoint)
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, [32]byte{}, 0, 0))
|
||||
|
||||
_, err = service.forkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, wanted, cached)
|
||||
}
|
||||
|
||||
@@ -3,12 +3,16 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
@@ -17,8 +21,14 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// A custom slot deadline for processing state slots in our cache.
|
||||
const slotDeadline = 5 * time.Second
|
||||
|
||||
// A custom deadline for deposit trie insertion.
|
||||
const depositDeadline = 20 * time.Second
|
||||
|
||||
// This defines size of the upper bound for initial sync block cache.
|
||||
var initialSyncBlockCacheSize = 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
// onBlock is called when a gossip block is received. It runs regular state transition on the block.
|
||||
// The block's signing root should be computed before calling this method to avoid redundant
|
||||
@@ -84,15 +94,36 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock,
|
||||
return err
|
||||
}
|
||||
|
||||
postState, err := state.ExecuteStateTransition(ctx, preState, signed)
|
||||
set, postState, err := state.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
valid, err := set.Verify()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not batch verify signature")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("signature in block failed to verify")
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
|
||||
if featureconfig.Get().EnableNextSlotStateCache {
|
||||
go func() {
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
if err := state.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
if err := s.updateJustified(ctx, postState); err != nil {
|
||||
@@ -100,107 +131,61 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock,
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
newFinalized := postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch
|
||||
if featureconfig.Get().UpdateHeadTimely {
|
||||
if newFinalized {
|
||||
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save new justified")
|
||||
}
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint()
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: signed.Block.Slot,
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: signed,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
if newFinalized {
|
||||
if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
|
||||
if err := s.forkChoiceStore.Prune(ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not prune proto array fork choice nodes")
|
||||
}
|
||||
|
||||
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save new justified")
|
||||
if !featureconfig.Get().UpdateHeadTimely {
|
||||
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save new justified")
|
||||
}
|
||||
}
|
||||
|
||||
// Update deposit cache.
|
||||
s.depositCache.InsertFinalizedDeposits(ctx, int64(postState.Eth1DepositIndex()))
|
||||
go func() {
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
defer cancel()
|
||||
if err := s.insertFinalizedDeposits(depCtx, fRoot); err != nil {
|
||||
log.WithError(err).Error("Could not insert finalized deposits.")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
defer reportAttestationInclusion(b)
|
||||
|
||||
return s.handleEpochBoundary(postState)
|
||||
}
|
||||
|
||||
// onBlockInitialSyncStateTransition is called when an initial sync block is received.
|
||||
// It runs state transition on the block and without fork choice and post operation pool processes.
|
||||
// The block's signing root should be computed before calling this method to avoid redundant
|
||||
// computation in this method and methods it calls into.
|
||||
func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockInitialSyncStateTransition")
|
||||
defer span.End()
|
||||
|
||||
if signed == nil || signed.Block == nil {
|
||||
return errors.New("nil block")
|
||||
}
|
||||
|
||||
b := signed.Block
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.stateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(signed.Block.ParentRoot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if preState == nil {
|
||||
return fmt.Errorf("nil pre state for slot %d", b.Slot)
|
||||
}
|
||||
|
||||
// Exit early if the pre state slot is higher than incoming block's slot.
|
||||
if preState.Slot() >= signed.Block.Slot {
|
||||
return nil
|
||||
}
|
||||
|
||||
var postState *stateTrie.BeaconState
|
||||
if featureconfig.Get().InitSyncNoVerify {
|
||||
postState, err = state.ExecuteStateTransitionNoVerifyAttSigs(ctx, preState, signed)
|
||||
} else {
|
||||
postState, err = state.ExecuteStateTransition(ctx, preState, signed)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, true /* init sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
// Save the latest block as head in cache.
|
||||
if err := s.saveHeadNoDB(ctx, signed, blockRoot, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
|
||||
if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
}
|
||||
|
||||
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
if err := s.updateJustifiedInitSync(ctx, postState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
|
||||
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
|
||||
if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return s.handleEpochBoundary(postState)
|
||||
return s.handleEpochBoundary(ctx, postState)
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock,
|
||||
@@ -235,7 +220,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
PublicKeys: []bls.PublicKey{},
|
||||
Messages: [][32]byte{},
|
||||
}
|
||||
set := new(bls.SignatureSet)
|
||||
var set *bls.SignatureSet
|
||||
boundaries := make(map[[32]byte]*stateTrie.BeaconState)
|
||||
for i, b := range blks {
|
||||
set, preState, err = state.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
@@ -245,8 +230,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
// Save potential boundary states.
|
||||
if helpers.IsEpochStart(preState.Slot()) {
|
||||
boundaries[blockRoots[i]] = preState.Copy()
|
||||
if err := s.handleEpochBoundary(preState); err != nil {
|
||||
return nil, nil, fmt.Errorf("could not handle epoch boundary state")
|
||||
if err := s.handleEpochBoundary(ctx, preState); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
|
||||
}
|
||||
}
|
||||
jCheckpoints[i] = preState.CurrentJustifiedCheckpoint()
|
||||
@@ -280,14 +265,19 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
// handles a block after the block's batch has been verified, where we can save blocks
|
||||
// their state summaries and split them off to relative hot/cold storage.
|
||||
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb.SignedBeaconBlock,
|
||||
blockRoot [32]byte, fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
b := signed.Block
|
||||
|
||||
s.saveInitSyncBlock(blockRoot, signed)
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
s.stateGen.SaveStateSummary(ctx, signed, blockRoot)
|
||||
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
|
||||
Slot: signed.Block.Slot,
|
||||
Root: blockRoot[:],
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
|
||||
if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
|
||||
@@ -313,48 +303,69 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
func (s *Service) handleEpochBoundary(postState *stateTrie.BeaconState) error {
|
||||
if postState.Slot() >= s.nextEpochBoundarySlot {
|
||||
reportEpochMetrics(postState)
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, postState *stateTrie.BeaconState) error {
|
||||
if postState.Slot()+1 == s.nextEpochBoundarySlot {
|
||||
// Update caches for the next epoch at epoch boundary slot - 1.
|
||||
if err := helpers.UpdateCommitteeCache(postState, helpers.NextEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
copied := postState.Copy()
|
||||
copied, err := state.ProcessSlots(ctx, copied, copied.Slot()+1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(copied); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if postState.Slot() >= s.nextEpochBoundarySlot {
|
||||
if err := reportEpochMetrics(ctx, postState, s.head.state); err != nil {
|
||||
return err
|
||||
}
|
||||
var err error
|
||||
s.nextEpochBoundarySlot, err = helpers.StartSlot(helpers.NextEpoch(postState))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Update committees cache at epoch boundary slot.
|
||||
|
||||
// Update caches at epoch boundary slot.
|
||||
// The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1.
|
||||
if err := helpers.UpdateCommitteeCache(postState, helpers.CurrentEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(postState, helpers.CurrentEpoch(postState)); err != nil {
|
||||
if err := helpers.UpdateProposerIndicesInCache(postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
|
||||
// to gain information on the most current chain.
|
||||
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock, root [32]byte,
|
||||
state *stateTrie.BeaconState) error {
|
||||
fCheckpoint := state.FinalizedCheckpoint()
|
||||
jCheckpoint := state.CurrentJustifiedCheckpoint()
|
||||
st *stateTrie.BeaconState) error {
|
||||
fCheckpoint := st.FinalizedCheckpoint()
|
||||
jCheckpoint := st.CurrentJustifiedCheckpoint()
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body.Attestations {
|
||||
committee, err := helpers.BeaconCommitteeFromState(state, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(st, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices, err := attestationutil.AttestingIndices(a.AggregationBits, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices := attestationutil.AttestingIndices(a.AggregationBits, committee)
|
||||
s.forkChoiceStore.ProcessAttestation(ctx, indices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock,
|
||||
root [32]byte, fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -370,7 +381,7 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.B
|
||||
|
||||
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
|
||||
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b *ethpb.SignedBeaconBlock, state *stateTrie.BeaconState, initSync bool) error {
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b *ethpb.SignedBeaconBlock, st *stateTrie.BeaconState, initSync bool) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
|
||||
defer span.End()
|
||||
if initSync {
|
||||
@@ -378,10 +389,10 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b *ethpb.Si
|
||||
} else if err := s.beaconDB.SaveBlock(ctx, b); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Block.Slot)
|
||||
}
|
||||
if err := s.stateGen.SaveState(ctx, r, state); err != nil {
|
||||
if err := s.stateGen.SaveState(ctx, r, st); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block, r, state); err != nil {
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block, r, st); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block.Slot)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -6,18 +6,20 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() uint64 {
|
||||
func (s *Service) CurrentSlot() types.Slot {
|
||||
return helpers.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
}
|
||||
|
||||
@@ -25,7 +27,7 @@ func (s *Service) CurrentSlot() uint64 {
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkChoice.getBlockPreState")
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.getBlockPreState")
|
||||
defer span.End()
|
||||
|
||||
// Verify incoming block has a valid pre state.
|
||||
@@ -56,14 +58,14 @@ func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*
|
||||
|
||||
// verifyBlkPreState validates input block has a valid pre-state.
|
||||
func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "chainService.verifyBlkPreState")
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.verifyBlkPreState")
|
||||
defer span.End()
|
||||
|
||||
parentRoot := bytesutil.ToBytes32(b.ParentRoot)
|
||||
// Loosen the check to HasBlock because state summary gets saved in batches
|
||||
// during initial syncing. There's no risk given a state summary object is just a
|
||||
// a subset of the block object.
|
||||
if !s.stateGen.StateSummaryExists(ctx, parentRoot) && !s.beaconDB.HasBlock(ctx, parentRoot) {
|
||||
if !s.beaconDB.HasStateSummary(ctx, parentRoot) && !s.beaconDB.HasBlock(ctx, parentRoot) {
|
||||
return errors.New("could not reconstruct parent state")
|
||||
}
|
||||
|
||||
@@ -87,7 +89,7 @@ func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) e
|
||||
// VerifyBlkDescendant validates input block root is a descendant of the
|
||||
// current finalized block root.
|
||||
func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkChoice.VerifyBlkDescendant")
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyBlkDescendant")
|
||||
defer span.End()
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
||||
finalizedBlkSigned, err := s.beaconDB.Block(ctx, fRoot)
|
||||
@@ -230,9 +232,10 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, cp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = cp
|
||||
if !featureconfig.Get().UpdateHeadTimely {
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = cp
|
||||
}
|
||||
|
||||
fRoot := bytesutil.ToBytes32(cp.Root)
|
||||
if err := s.stateGen.MigrateToCold(ctx, fRoot); err != nil {
|
||||
@@ -254,8 +257,40 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
// else:
|
||||
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
|
||||
// return root
|
||||
func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkChoice.ancestor")
|
||||
func (s *Service) ancestor(ctx context.Context, root []byte, slot types.Slot) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
|
||||
defer span.End()
|
||||
|
||||
r := bytesutil.ToBytes32(root)
|
||||
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
|
||||
// This is most optimal outcome.
|
||||
ar, err := s.ancestorByForkChoiceStore(ctx, r, slot)
|
||||
if err != nil {
|
||||
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
|
||||
// This is the second line of defense for retrieving ancestor root.
|
||||
ar, err = s.ancestorByDB(ctx, r, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return ar, nil
|
||||
}
|
||||
|
||||
// This retrieves an ancestor root using fork choice store. The look up is looping through the a flat array structure.
|
||||
func (s *Service) ancestorByForkChoiceStore(ctx context.Context, r [32]byte, slot types.Slot) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByForkChoiceStore")
|
||||
defer span.End()
|
||||
|
||||
if !s.forkChoiceStore.HasParent(r) {
|
||||
return nil, errors.New("could not find root in fork choice store")
|
||||
}
|
||||
return s.forkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
}
|
||||
|
||||
// This retrieves an ancestor root using DB. The look up is recursively looking up DB. Slower than `ancestorByForkChoiceStore`.
|
||||
func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot types.Slot) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByDB")
|
||||
defer span.End()
|
||||
|
||||
// Stop recursive ancestry lookup if context is cancelled.
|
||||
@@ -263,13 +298,6 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byt
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
r := bytesutil.ToBytes32(root)
|
||||
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
|
||||
// This is most optimal outcome.
|
||||
if s.forkChoiceStore.HasParent(r) {
|
||||
return s.forkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
}
|
||||
|
||||
signed, err := s.beaconDB.Block(ctx, r)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get ancestor block")
|
||||
@@ -284,10 +312,10 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byt
|
||||
}
|
||||
b := signed.Block
|
||||
if b.Slot == slot || b.Slot < slot {
|
||||
return root, nil
|
||||
return r[:], nil
|
||||
}
|
||||
|
||||
return s.ancestor(ctx, b.ParentRoot, slot)
|
||||
return s.ancestorByDB(ctx, bytesutil.ToBytes32(b.ParentRoot), slot)
|
||||
}
|
||||
|
||||
// This updates justified check point in store, if the new justified is later than stored justified or
|
||||
@@ -310,10 +338,7 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *state
|
||||
if !attestationutil.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
|
||||
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
}
|
||||
|
||||
// Update justified if store justified is not in chain with finalized check point.
|
||||
@@ -339,8 +364,9 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *state
|
||||
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.BeaconBlock,
|
||||
fCheckpoint *ethpb.Checkpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
pendingNodes := make([]*ethpb.BeaconBlock, 0)
|
||||
pendingRoots := make([][32]byte, 0)
|
||||
|
||||
parentRoot := bytesutil.ToBytes32(blk.ParentRoot)
|
||||
slot := blk.Slot
|
||||
@@ -358,6 +384,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.
|
||||
}
|
||||
|
||||
pendingNodes = append(pendingNodes, b.Block)
|
||||
copiedRoot := parentRoot
|
||||
pendingRoots = append(pendingRoots, copiedRoot)
|
||||
parentRoot = bytesutil.ToBytes32(b.Block.ParentRoot)
|
||||
slot = b.Block.Slot
|
||||
higherThanFinalized = slot > fSlot
|
||||
@@ -367,11 +395,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.
|
||||
// Lower slots should be at the end of the list.
|
||||
for i := len(pendingNodes) - 1; i >= 0; i-- {
|
||||
b := pendingNodes[i]
|
||||
r, err := b.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r := pendingRoots[i]
|
||||
if err := s.forkChoiceStore.ProcessBlock(ctx,
|
||||
b.Slot, r, bytesutil.ToBytes32(b.ParentRoot), bytesutil.ToBytes32(b.Body.Graffiti),
|
||||
jCheckpoint.Epoch,
|
||||
@@ -383,6 +407,28 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.
|
||||
return nil
|
||||
}
|
||||
|
||||
// inserts finalized deposits into our finalized deposit trie.
|
||||
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertFinalizedDeposits")
|
||||
defer span.End()
|
||||
|
||||
// Update deposit cache.
|
||||
finalizedState, err := s.stateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not fetch finalized state")
|
||||
}
|
||||
// We update the cache up to the last deposit index in the finalized block's state.
|
||||
// We can be confident that these deposits will be included in some block
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
|
||||
s.depositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.depositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The deletes input attestations from the attestation pool, so proposers don't include them in a block for the future.
|
||||
func (s *Service) deletePoolAtts(atts []*ethpb.Attestation) error {
|
||||
for _, att := range atts {
|
||||
|
||||
@@ -2,13 +2,16 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
@@ -20,42 +23,43 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/roughtime"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
)
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
StateGen: stategen.New(db, sc),
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, db.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(db, validGenesisRoot[:])
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
random := testutil.NewBeaconBlock()
|
||||
random.Block.Slot = 1
|
||||
random.Block.ParentRoot = validGenesisRoot[:]
|
||||
assert.NoError(t, db.SaveBlock(ctx, random))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, random))
|
||||
randomParentRoot, err := random.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
|
||||
randomParentRoot2 := roots[1]
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2[:]}))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
|
||||
|
||||
tests := []struct {
|
||||
@@ -76,7 +80,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
blk: func() *ethpb.SignedBeaconBlock {
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.ParentRoot = randomParentRoot2
|
||||
b.Block.Slot = params.BeaconConfig().FarFutureEpoch
|
||||
b.Block.Slot = params.BeaconConfig().FarFutureSlot
|
||||
return b
|
||||
}(),
|
||||
s: st.Copy(),
|
||||
@@ -123,18 +127,18 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
|
||||
func TestStore_OnBlockBatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
StateGen: stategen.New(db, sc),
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, db.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
@@ -147,11 +151,11 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
|
||||
bState := st.Copy()
|
||||
|
||||
blks := []*ethpb.SignedBeaconBlock{}
|
||||
blkRoots := [][32]byte{}
|
||||
var blks []*ethpb.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var firstState *stateTrie.BeaconState
|
||||
for i := 1; i < 10; i++ {
|
||||
b, err := testutil.GenerateFullBlock(bState, keys, testutil.DefaultBlockGenConfig(), uint64(i))
|
||||
b, err := testutil.GenerateFullBlock(bState, keys, testutil.DefaultBlockGenConfig(), types.Slot(i))
|
||||
require.NoError(t, err)
|
||||
bState, err = state.ExecuteStateTransition(ctx, bState, b)
|
||||
require.NoError(t, err)
|
||||
@@ -166,7 +170,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
blks[0].Block.ParentRoot = gRoot[:]
|
||||
require.NoError(t, db.SaveBlock(context.Background(), blks[0]))
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
|
||||
require.NoError(t, service.stateGen.SaveState(ctx, blkRoots[0], firstState))
|
||||
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
require.NoError(t, err)
|
||||
@@ -174,11 +178,11 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
|
||||
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = time.Now()
|
||||
@@ -198,7 +202,7 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, newJustifiedBlk))
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, lastJustifiedBlk))
|
||||
|
||||
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
||||
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
||||
update, err = service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
@@ -208,11 +212,11 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
|
||||
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
lastJustifiedBlk := testutil.NewBeaconBlock()
|
||||
@@ -226,7 +230,7 @@ func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, newJustifiedBlk))
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, lastJustifiedBlk))
|
||||
|
||||
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
||||
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
||||
|
||||
@@ -237,11 +241,11 @@ func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
|
||||
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
StateGen: stategen.New(db, sc),
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
@@ -251,7 +255,7 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, db.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
@@ -270,18 +274,18 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
|
||||
func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
StateGen: stategen.New(db, sc),
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, db.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
@@ -307,24 +311,25 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
|
||||
func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db, StateGen: stategen.New(db, cache.NewStateSummaryCache())}
|
||||
cfg := &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedBlock := testutil.NewBeaconBlock()
|
||||
require.NoError(t, db.SaveBlock(ctx, signedBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, signedBlock))
|
||||
r, err := signedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
st := testutil.NewBeaconState()
|
||||
service.initSyncState[r] = st.Copy()
|
||||
require.NoError(t, db.SaveState(ctx, st.Copy(), r))
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st.Copy(), r))
|
||||
|
||||
// Could update
|
||||
s := testutil.NewBeaconState()
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetCurrentJustifiedCheckpoint(ðpb.Checkpoint{Epoch: 1, Root: r[:]}))
|
||||
require.NoError(t, service.updateJustified(context.Background(), s))
|
||||
|
||||
@@ -334,14 +339,14 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
service.bestJustifiedCheckpt.Epoch = 2
|
||||
require.NoError(t, service.updateJustified(context.Background(), s))
|
||||
|
||||
assert.Equal(t, uint64(2), service.bestJustifiedCheckpt.Epoch, "Incorrect justified epoch in service")
|
||||
assert.Equal(t, types.Epoch(2), service.bestJustifiedCheckpt.Epoch, "Incorrect justified epoch in service")
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
@@ -349,13 +354,14 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
require.NoError(t, db.SaveBlock(ctx, genesis))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(db, validGenesisRoot[:])
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
@@ -374,11 +380,52 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
block := testutil.NewBeaconBlock()
|
||||
block.Block.Slot = 9
|
||||
block.Block.ParentRoot = roots[8]
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), block.Block, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 5, len(service.forkChoiceStore.Nodes()), "Miss match nodes")
|
||||
// Ensure all roots and their respective blocks exist.
|
||||
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
|
||||
for i, rt := range wantedRoots {
|
||||
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
|
||||
assert.Equal(t, true, service.beaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
@@ -387,10 +434,11 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, db.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
|
||||
@@ -428,7 +476,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
// (B1, and B3 are all from the same slots)
|
||||
func blockTree1(db db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
genesisRoot = bytesutil.PadTo(genesisRoot, 32)
|
||||
b0 := testutil.NewBeaconBlock()
|
||||
b0.Block.Slot = 0
|
||||
@@ -486,43 +534,53 @@ func blockTree1(db db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, b := range []*ethpb.SignedBeaconBlock{b0, b1, b3, b4, b5, b6, b7, b8} {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
if err := db.SaveBlock(context.Background(), beaconBlock); err != nil {
|
||||
if err := beaconDB.SaveBlock(context.Background(), beaconBlock); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save state")
|
||||
}
|
||||
}
|
||||
if err := db.SaveState(context.Background(), st.Copy(), r1); err != nil {
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), r1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), st.Copy(), r7); err != nil {
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), r7); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), st.Copy(), r8); err != nil {
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), r8); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return [][]byte{r0[:], r1[:], nil, r3[:], r4[:], r5[:], r6[:], r7[:], r8[:]}, nil
|
||||
}
|
||||
|
||||
func TestCurrentSlot_HandlesOverflow(t *testing.T) {
|
||||
svc := Service{genesisTime: roughtime.Now().Add(1 * time.Hour)}
|
||||
svc := Service{genesisTime: timeutils.Now().Add(1 * time.Hour)}
|
||||
|
||||
slot := svc.CurrentSlot()
|
||||
require.Equal(t, uint64(0), slot, "Unexpected slot")
|
||||
require.Equal(t, types.Slot(0), slot, "Unexpected slot")
|
||||
}
|
||||
func TestAncestorByDB_CtxErr(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service, err := NewService(ctx, &Config{})
|
||||
require.NoError(t, err)
|
||||
|
||||
cancel()
|
||||
_, err = service.ancestorByDB(ctx, [32]byte{}, 0)
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
}
|
||||
|
||||
func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -545,7 +603,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
require.NoError(t, db.SaveBlock(context.Background(), beaconBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), beaconBlock))
|
||||
}
|
||||
|
||||
// Slots 100 to 200 are skip slots. Requesting root at 150 will yield root at 100. The last physical block.
|
||||
@@ -563,6 +621,82 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
b1.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b100 := testutil.NewBeaconBlock()
|
||||
b100.Block.Slot = 100
|
||||
b100.Block.ParentRoot = r1[:]
|
||||
r100, err := b100.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b200 := testutil.NewBeaconBlock()
|
||||
b200.Block.Slot = 200
|
||||
b200.Block.ParentRoot = r100[:]
|
||||
r200, err := b200.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), [32]byte{}, 0, 0)) // Saves blocks to fork choice store.
|
||||
}
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) != r100 {
|
||||
t.Error("Did not get correct root")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAncestor_CanUseDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
b1.Block.ParentRoot = bytesutil.PadTo([]byte{'a'}, 32)
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b100 := testutil.NewBeaconBlock()
|
||||
b100.Block.Slot = 100
|
||||
b100.Block.ParentRoot = r1[:]
|
||||
r100, err := b100.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b200 := testutil.NewBeaconBlock()
|
||||
b200.Block.Slot = 200
|
||||
b200.Block.ParentRoot = r100[:]
|
||||
r200, err := b200.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), beaconBlock)) // Saves blocks to DB.
|
||||
}
|
||||
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, [32]byte{}, 0, 0))
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) != r100 {
|
||||
t.Error("Did not get correct root")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
@@ -578,7 +712,7 @@ func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
type args struct {
|
||||
cachedCheckPoint *ethpb.Checkpoint
|
||||
@@ -617,13 +751,15 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
beaconState := testutil.NewBeaconState()
|
||||
beaconState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(test.args.stateCheckPoint))
|
||||
service, err := NewService(ctx, &Config{BeaconDB: db, StateGen: stategen.New(db, sc), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
require.NoError(t, err)
|
||||
service.justifiedCheckpt = test.args.cachedCheckPoint
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
|
||||
genesisState := testutil.NewBeaconState()
|
||||
genesisState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, genesisState, bytesutil.ToBytes32(test.want.Root)))
|
||||
|
||||
if test.args.diffFinalizedCheckPoint {
|
||||
@@ -653,40 +789,37 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyBlkDescendant(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
b1.Block.Body.Graffiti = bytesutil.PadTo([]byte{'a'}, 32)
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, b1))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b1))
|
||||
|
||||
type args struct {
|
||||
parentRoot [32]byte
|
||||
finalizedRoot [32]byte
|
||||
finalizedSlot uint64
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
shouldError bool
|
||||
err string
|
||||
name string
|
||||
args args
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "could not get finalized block in block service cache",
|
||||
args: args{
|
||||
finalizedRoot: [32]byte{'a'},
|
||||
},
|
||||
shouldError: true,
|
||||
err: "nil finalized block",
|
||||
wantedErr: "nil finalized block",
|
||||
},
|
||||
{
|
||||
name: "could not get finalized block root in DB",
|
||||
@@ -694,8 +827,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
finalizedRoot: r,
|
||||
parentRoot: [32]byte{'a'},
|
||||
},
|
||||
shouldError: true,
|
||||
err: "could not get finalized block root",
|
||||
wantedErr: "could not get finalized block root",
|
||||
},
|
||||
{
|
||||
name: "is not descendant",
|
||||
@@ -703,8 +835,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
finalizedRoot: r1,
|
||||
parentRoot: r,
|
||||
},
|
||||
shouldError: true,
|
||||
err: "is not a descendent of the current finalized block slot",
|
||||
wantedErr: "is not a descendent of the current finalized block slot",
|
||||
},
|
||||
{
|
||||
name: "is descendant",
|
||||
@@ -712,30 +843,27 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
finalizedRoot: r,
|
||||
parentRoot: r,
|
||||
},
|
||||
shouldError: false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
service, err := NewService(ctx, &Config{BeaconDB: db, StateGen: stategen.New(db, sc), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
for _, tt := range tests {
|
||||
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Root: test.args.finalizedRoot[:],
|
||||
Root: tt.args.finalizedRoot[:],
|
||||
}
|
||||
err = service.VerifyBlkDescendant(ctx, test.args.parentRoot)
|
||||
if test.shouldError {
|
||||
if err == nil || !strings.Contains(err.Error(), test.err) {
|
||||
t.Error("Did not get wanted error")
|
||||
}
|
||||
err = service.VerifyBlkDescendant(ctx, tt.args.parentRoot)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else if err != nil {
|
||||
t.Error(err)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
cfg := &Config{BeaconDB: db}
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -754,9 +882,114 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.updateJustifiedInitSync(ctx, newCp))
|
||||
|
||||
assert.DeepEqual(t, currentCp, service.prevJustifiedCheckpt, "Incorrect previous justified checkpoint")
|
||||
assert.DeepEqual(t, newCp, service.CurrentJustifiedCheckpt(), "Incorrect current justified checkpoint in cache")
|
||||
assert.DeepSSZEqual(t, currentCp, service.prevJustifiedCheckpt, "Incorrect previous justified checkpoint")
|
||||
assert.DeepSSZEqual(t, newCp, service.CurrentJustifiedCheckpt(), "Incorrect current justified checkpoint in cache")
|
||||
cp, err := service.beaconDB.JustifiedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, newCp, cp, "Incorrect current justified checkpoint in db")
|
||||
assert.DeepSSZEqual(t, newCp, cp, "Incorrect current justified checkpoint in db")
|
||||
}
|
||||
|
||||
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
service.head = &head{}
|
||||
require.ErrorContains(t, "failed to initialize precompute: nil inner state", service.handleEpochBoundary(ctx, s))
|
||||
}
|
||||
|
||||
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, _ := testutil.DeterministicGenesisState(t, 1024)
|
||||
service.head = &head{state: s}
|
||||
require.NoError(t, s.SetSlot(2*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, service.handleEpochBoundary(ctx, s))
|
||||
require.Equal(t, 3*params.BeaconConfig().SlotsPerEpoch, service.nextEpochBoundarySlot)
|
||||
}
|
||||
|
||||
func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := testutil.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gBlk, err := service.beaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
|
||||
testState := gs.Copy()
|
||||
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
blk, err := testutil.GenerateFullBlock(testState, keys, testutil.DefaultBlockGenConfig(), i)
|
||||
require.NoError(t, err)
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, blk, r))
|
||||
testState, err = service.stateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, types.Epoch(3), service.CurrentJustifiedCheckpt().Epoch)
|
||||
require.Equal(t, types.Epoch(2), service.FinalizedCheckpt().Epoch)
|
||||
}
|
||||
|
||||
func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gBlk, err := service.beaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, service.stateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
zeroSig := [96]byte{}
|
||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
root := []byte(strconv.Itoa(int(i)))
|
||||
depositCache.InsertDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.FromBytes48([48]byte{}),
|
||||
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
|
||||
Amount: 0,
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root))
|
||||
}
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 9, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(109))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/roughtime"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -23,9 +21,9 @@ import (
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
type AttestationReceiver interface {
|
||||
ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error
|
||||
IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool
|
||||
AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*state.BeaconState, error)
|
||||
AttestationCheckPtInfo(ctx context.Context, att *ethpb.Attestation) (*pb.CheckPtInfo, error)
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
|
||||
VerifyFinalizedConsistency(ctx context.Context, root []byte) error
|
||||
}
|
||||
|
||||
// ReceiveAttestationNoPubsub is a function that defines the operations that are performed on
|
||||
@@ -37,40 +35,13 @@ func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Att
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveAttestationNoPubsub")
|
||||
defer span.End()
|
||||
|
||||
_, err := s.onAttestation(ctx, att)
|
||||
if err != nil {
|
||||
if err := s.onAttestation(ctx, att); err != nil {
|
||||
return errors.Wrap(err, "could not process attestation")
|
||||
}
|
||||
|
||||
if !featureconfig.Get().DisableUpdateHeadPerAttestation {
|
||||
// This updates fork choice head, if a new head could not be updated due to
|
||||
// long range or intermediate forking. It simply logs a warning and returns nil
|
||||
// as that's more appropriate than returning errors.
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValidAttestation returns true if the attestation can be verified against its pre-state.
|
||||
func (s *Service) IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool {
|
||||
baseState, err := s.AttestationPreState(ctx, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get attestation pre state")
|
||||
return false
|
||||
}
|
||||
|
||||
if err := blocks.VerifyAttestationSignature(ctx, baseState, att); err != nil {
|
||||
log.WithError(err).Error("Failed to validate attestation")
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// AttestationPreState returns the pre state of attestation.
|
||||
func (s *Service) AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*state.BeaconState, error) {
|
||||
ss, err := helpers.StartSlot(att.Data.Target.Epoch)
|
||||
@@ -83,21 +54,50 @@ func (s *Service) AttestationPreState(ctx context.Context, att *ethpb.Attestatio
|
||||
return s.getAttPreState(ctx, att.Data.Target)
|
||||
}
|
||||
|
||||
// AttestationCheckPtInfo returns the check point info of attestation that can be used to verify the attestation
|
||||
// contents and signatures.
|
||||
func (s *Service) AttestationCheckPtInfo(ctx context.Context, att *ethpb.Attestation) (*pb.CheckPtInfo, error) {
|
||||
ss, err := helpers.StartSlot(att.Data.Target.Epoch)
|
||||
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
|
||||
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestation) error {
|
||||
targetSlot, err := helpers.StartSlot(a.Data.Target.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := helpers.ValidateSlotClock(ss, uint64(s.genesisTime.Unix())); err != nil {
|
||||
return nil, err
|
||||
r, err := s.ancestor(ctx, a.Data.BeaconBlockRoot, targetSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.getAttCheckPtInfo(ctx, att.Data.Target, helpers.SlotToEpoch(att.Data.Slot))
|
||||
if !bytes.Equal(a.Data.Target.Root, r) {
|
||||
return errors.New("FFG and LMD votes are not consistent")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This processes attestations from the attestation pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
|
||||
// VerifyFinalizedConsistency verifies input root is consistent with finalized store.
|
||||
// When the input root is not be consistent with finalized store then we know it is not
|
||||
// on the finalized check point that leads to current canonical chain and should be rejected accordingly.
|
||||
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
|
||||
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
|
||||
// In this case, we could exit early to save on additional computation.
|
||||
if s.forkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
f := s.FinalizedCheckpt()
|
||||
ss, err := helpers.StartSlot(f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r, err := s.ancestor(ctx, root, ss)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(f.Root, r) {
|
||||
return errors.New("Root and finalized store are not consistent")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- struct{}) {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
@@ -105,67 +105,67 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
|
||||
<-stateChannel
|
||||
stateSub.Unsubscribe()
|
||||
|
||||
st := slotutil.GetSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
|
||||
st := slotutil.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-st.C():
|
||||
ctx := s.ctx
|
||||
atts := s.attPool.ForkchoiceAttestations()
|
||||
for _, a := range atts {
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.Data.Slot + 1
|
||||
if err := helpers.VerifySlotTime(uint64(s.genesisTime.Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
hasState := s.stateGen.StateSummaryExists(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
if !(hasState && hasBlock) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.attPool.DeleteForkchoiceAttestation(a); err != nil {
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
if !s.verifyCheckpointEpoch(a.Data.Target) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.ReceiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": a.Data.Slot,
|
||||
"committeeIndex": a.Data.CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.Target.Root)),
|
||||
"aggregationCount": a.AggregationBits.Count(),
|
||||
}).WithError(err).Warn("Could not receive attestation in chain service")
|
||||
}
|
||||
// Continue when there's no fork choice attestation, there's nothing to process and update head.
|
||||
// This covers the condition when the node is still initial syncing to the head of the chain.
|
||||
if s.attPool.ForkchoiceAttestationCount() == 0 {
|
||||
continue
|
||||
}
|
||||
s.processAttestations(s.ctx)
|
||||
if err := s.updateHead(s.ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This verifies the epoch of input checkpoint is within current epoch and previous epoch
|
||||
// with respect to current time. Returns true if it's within, false if it's not.
|
||||
func (s *Service) verifyCheckpointEpoch(c *ethpb.Checkpoint) bool {
|
||||
now := uint64(roughtime.Now().Unix())
|
||||
genesisTime := uint64(s.genesisTime.Unix())
|
||||
currentSlot := (now - genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
currentEpoch := helpers.SlotToEpoch(currentSlot)
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestations(ctx context.Context) {
|
||||
atts := s.attPool.ForkchoiceAttestations()
|
||||
for _, a := range atts {
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.Data.Slot + 1
|
||||
if err := helpers.VerifySlotTime(uint64(s.genesisTime.Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var prevEpoch uint64
|
||||
if currentEpoch > 1 {
|
||||
prevEpoch = currentEpoch - 1
|
||||
hasState := s.beaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
if !(hasState && hasBlock) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.attPool.DeleteForkchoiceAttestation(a); err != nil {
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.ReceiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": a.Data.Slot,
|
||||
"committeeIndex": a.Data.CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.Target.Root)),
|
||||
"aggregationCount": a.AggregationBits.Count(),
|
||||
}).WithError(err).Warn("Could not process attestation for fork choice")
|
||||
}
|
||||
}
|
||||
|
||||
if c.Epoch != prevEpoch && c.Epoch != currentEpoch {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -5,45 +5,117 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
|
||||
func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, db, sc)
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
chainService.genesisTime = time.Now()
|
||||
|
||||
assert.Equal(t, true, chainService.verifyCheckpointEpoch(ðpb.Checkpoint{Root: make([]byte, 32)}))
|
||||
assert.Equal(t, false, chainService.verifyCheckpointEpoch(ðpb.Checkpoint{Epoch: 1}))
|
||||
}
|
||||
|
||||
func TestAttestationPreState_FarFutureSlot(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, db, sc)
|
||||
chainService.genesisTime = time.Now()
|
||||
|
||||
e := helpers.MaxSlotBuffer/params.BeaconConfig().SlotsPerEpoch + 1
|
||||
_, err := chainService.AttestationCheckPtInfo(context.Background(), ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Epoch: e}}})
|
||||
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
|
||||
}
|
||||
|
||||
func TestAttestationCheckPtInfo_FarFutureSlot(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, db, sc)
|
||||
chainService.genesisTime = time.Now()
|
||||
|
||||
e := helpers.MaxSlotBuffer/params.BeaconConfig().SlotsPerEpoch + 1
|
||||
e := types.Epoch(helpers.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
|
||||
_, err := chainService.AttestationPreState(context.Background(), ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Epoch: e}}})
|
||||
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := "FFG and LMD votes are not consistent"
|
||||
a := testutil.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = []byte{'a'}
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(context.Background(), a))
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
a := testutil.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = r32[:]
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
err = service.VerifyLmdFfgConsistency(context.Background(), a)
|
||||
require.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
|
||||
}
|
||||
|
||||
func TestProcessAttestations_Ok(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
AttPool: attestations.NewPool(),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service.genesisTime = timeutils.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
require.NoError(t, err)
|
||||
genesisState, pks := testutil.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(timeutils.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := testutil.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = state.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.attPool.SaveForkchoiceAttestations(atts))
|
||||
service.processAttestations(ctx)
|
||||
require.Equal(t, 0, len(service.attPool.ForkchoiceAttestations()))
|
||||
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
|
||||
}
|
||||
|
||||
@@ -4,19 +4,24 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
|
||||
var epochsSinceFinalitySaveHotStateDB = types.Epoch(100)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service receive and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error
|
||||
ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedBeaconBlock, blkRoots [][32]byte) error
|
||||
HasInitSyncBlock(root [32]byte) bool
|
||||
}
|
||||
@@ -29,6 +34,7 @@ type BlockReceiver interface {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
receivedTime := timeutils.Now()
|
||||
blockCopy := stateTrie.CopySignedBeaconBlock(block)
|
||||
|
||||
// Apply state transition on the new block.
|
||||
@@ -39,70 +45,41 @@ func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlo
|
||||
}
|
||||
|
||||
// Update and save head block after fork choice.
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
if !featureconfig.Get().UpdateHeadTimely {
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block.Slot,
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: blockCopy,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block.Slot,
|
||||
BlockRoot: blockRoot,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Handle post block operations such as attestations and exits.
|
||||
if err := s.handlePostBlockOperations(blockCopy.Block); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
|
||||
// Log block sync status.
|
||||
logBlockSyncStatus(blockCopy.Block, blockRoot, s.finalizedCheckpt)
|
||||
|
||||
// Log state transition data.
|
||||
logStateTransitionData(blockCopy.Block)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockInitialSync processes the input block for the purpose of initial syncing.
|
||||
// This method should only be used on blocks during initial syncing phase.
|
||||
func (s *Service) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockNoVerify")
|
||||
defer span.End()
|
||||
blockCopy := stateTrie.CopySignedBeaconBlock(block)
|
||||
|
||||
// Apply state transition on the new block.
|
||||
if err := s.onBlockInitialSyncStateTransition(ctx, blockCopy, blockRoot); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
traceutil.AnnotateError(span, err)
|
||||
// Have we been finalizing? Should we start saving hot states to db?
|
||||
if err := s.checkSaveHotStateDB(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block.Slot,
|
||||
BlockRoot: blockRoot,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
// Reports on block and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
|
||||
// Log block sync status.
|
||||
if err := logBlockSyncStatus(blockCopy.Block, blockRoot, s.finalizedCheckpt, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
return err
|
||||
}
|
||||
// Log state transition data.
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blockCopy.Block.Slot,
|
||||
"attestations": len(blockCopy.Block.Body.Attestations),
|
||||
"deposits": len(blockCopy.Block.Body.Deposits),
|
||||
}).Debug("Finished applying state transition")
|
||||
logStateTransitionData(blockCopy.Block)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -132,14 +109,22 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedB
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block.Slot,
|
||||
BlockRoot: blkRoots[i],
|
||||
Verified: true,
|
||||
Slot: blockCopy.Block.Slot,
|
||||
BlockRoot: blkRoots[i],
|
||||
SignedBlock: blockCopy,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
}
|
||||
|
||||
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
|
||||
// log.Fatalf will prevent defer from being called
|
||||
span.End()
|
||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||
log.Fatalf("Could not verify weak subjectivity checkpoint: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -172,3 +157,21 @@ func (s *Service) handlePostBlockOperations(b *ethpb.BeaconBlock) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This checks whether it's time to start saving hot state to DB.
|
||||
// It's time when there's `epochsSinceFinalitySaveHotStateDB` epochs of non-finality.
|
||||
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
currentEpoch := helpers.SlotToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality types.Epoch
|
||||
if currentEpoch > s.finalizedCheckpt.Epoch {
|
||||
sinceFinality = currentEpoch - s.finalizedCheckpt.Epoch
|
||||
}
|
||||
|
||||
if sinceFinality >= epochsSinceFinalitySaveHotStateDB {
|
||||
s.stateGen.EnableSaveHotStateToDB(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.stateGen.DisableSaveHotStateToDB(ctx)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
@@ -17,13 +19,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestService_ReceiveBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot uint64) *ethpb.SignedBeaconBlock {
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
|
||||
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
|
||||
assert.NoError(t, err)
|
||||
return blk
|
||||
@@ -76,7 +79,6 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
}
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "updates exit pool",
|
||||
args: args{
|
||||
@@ -91,14 +93,13 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
var n int
|
||||
for i := uint64(0); int(i) < genesis.NumValidators(); i++ {
|
||||
if s.exitPool.HasBeenIncluded(i) {
|
||||
n++
|
||||
}
|
||||
}
|
||||
if n != 3 {
|
||||
t.Errorf("Did not mark the correct number of exits. Got %d but wanted %d", n, 3)
|
||||
pending := s.exitPool.PendingExits(genesis, 1, true /* no limit */)
|
||||
if len(pending) != 0 {
|
||||
t.Errorf(
|
||||
"Did not mark the correct number of exits. Got %d pending but wanted %d",
|
||||
len(pending),
|
||||
0,
|
||||
)
|
||||
}
|
||||
},
|
||||
},
|
||||
@@ -117,12 +118,12 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db, stateSummaryCache := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, db.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
@@ -131,7 +132,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
AttPool: attestations.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(db, stateSummaryCache),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
@@ -139,6 +140,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
gBlk, err := s.beaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -158,11 +160,11 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
b, err := testutil.GenerateFullBlock(genesis, keys, testutil.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
db, stateSummaryCache := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
require.NoError(t, db.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot))
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
@@ -171,7 +173,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
AttPool: attestations.NewPool(),
|
||||
ExitPool: voluntaryexits.NewPool(),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(db, stateSummaryCache),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
@@ -179,6 +181,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
gBlk, err := s.beaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -196,91 +199,11 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
assert.Equal(t, 2, len(s.forkChoiceStore.Nodes()))
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockInitialSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot uint64) *ethpb.SignedBeaconBlock {
|
||||
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
|
||||
assert.NoError(t, err)
|
||||
return blk
|
||||
}
|
||||
|
||||
type args struct {
|
||||
block *ethpb.SignedBeaconBlock
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantedErr string
|
||||
check func(*testing.T, *Service)
|
||||
}{
|
||||
{
|
||||
name: "applies block with state transition",
|
||||
args: args{
|
||||
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 2 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, uint64(2), s.head.state.Slot(), "Incorrect head state slot")
|
||||
assert.Equal(t, uint64(2), s.head.block.Block.Slot, "Incorrect head block slot")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "notifies block processed on state feed",
|
||||
args: args{
|
||||
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db, stateSummaryCache := testDB.SetupDB(t)
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(db, stateSummaryCache),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
gBlk, err := s.beaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.ReceiveBlockInitialSync(ctx, tt.args.block, root)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
tt.check(t, s)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot uint64) *ethpb.SignedBeaconBlock {
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
|
||||
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
|
||||
assert.NoError(t, err)
|
||||
return blk
|
||||
@@ -301,8 +224,8 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 2 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, uint64(2), s.head.state.Slot(), "Incorrect head state slot")
|
||||
assert.Equal(t, uint64(2), s.head.block.Block.Slot, "Incorrect head block slot")
|
||||
assert.Equal(t, types.Slot(2), s.head.state.Slot(), "Incorrect head state slot")
|
||||
assert.Equal(t, types.Slot(2), s.head.block.Block.Slot, "Incorrect head block slot")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -320,18 +243,18 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db, stateSummaryCache := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
cfg := &Config{
|
||||
BeaconDB: db,
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(db, stateSummaryCache),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
@@ -341,6 +264,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -358,7 +282,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{})
|
||||
s, err := NewService(context.Background(), &Config{StateNotifier: &blockchainTesting.MockStateNotifier{}})
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{'a'}
|
||||
if s.HasInitSyncBlock(r) {
|
||||
@@ -369,3 +293,41 @@ func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
t.Error("Should have block")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsContain(t, hook, "Exiting mode to save hot states in DB")
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Epoch: 10000000}
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
assert.LogsDoNotContain(t, hook, "Entering mode to save hot states in DB")
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
@@ -20,7 +21,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
@@ -30,51 +30,55 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// headSyncMinEpochsAfterCheckpoint defines how many epochs should elapse after known finalization
|
||||
// checkpoint for head sync to be triggered.
|
||||
const headSyncMinEpochsAfterCheckpoint = 128
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.HeadAccessDatabase
|
||||
depositCache *depositcache.DepositCache
|
||||
chainStartFetcher powchain.ChainStartFetcher
|
||||
attPool attestations.Pool
|
||||
slashingPool *slashings.Pool
|
||||
exitPool *voluntaryexits.Pool
|
||||
genesisTime time.Time
|
||||
p2p p2p.Broadcaster
|
||||
maxRoutines int
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
stateNotifier statefeed.Notifier
|
||||
genesisRoot [32]byte
|
||||
forkChoiceStore f.ForkChoicer
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
prevJustifiedCheckpt *ethpb.Checkpoint
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
nextEpochBoundarySlot uint64
|
||||
initSyncState map[[32]byte]*stateTrie.BeaconState
|
||||
boundaryRoots [][32]byte
|
||||
checkpointState *cache.CheckpointStateCache
|
||||
checkpointStateLock sync.Mutex
|
||||
stateGen *stategen.State
|
||||
opsService *attestations.Service
|
||||
initSyncBlocks map[[32]byte]*ethpb.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
recentCanonicalBlocks map[[32]byte]bool
|
||||
recentCanonicalBlocksLock sync.RWMutex
|
||||
justifiedBalances []uint64
|
||||
justifiedBalancesLock sync.RWMutex
|
||||
checkPtInfoCache *checkPtInfoCache
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.HeadAccessDatabase
|
||||
depositCache *depositcache.DepositCache
|
||||
chainStartFetcher powchain.ChainStartFetcher
|
||||
attPool attestations.Pool
|
||||
slashingPool slashings.PoolManager
|
||||
exitPool voluntaryexits.PoolManager
|
||||
genesisTime time.Time
|
||||
p2p p2p.Broadcaster
|
||||
maxRoutines int
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
stateNotifier statefeed.Notifier
|
||||
genesisRoot [32]byte
|
||||
forkChoiceStore f.ForkChoicer
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
prevJustifiedCheckpt *ethpb.Checkpoint
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
nextEpochBoundarySlot types.Slot
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
stateGen *stategen.State
|
||||
opsService *attestations.Service
|
||||
initSyncBlocks map[[32]byte]*ethpb.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances []uint64
|
||||
justifiedBalancesLock sync.RWMutex
|
||||
wsEpoch types.Epoch
|
||||
wsRoot []byte
|
||||
wsVerified bool
|
||||
}
|
||||
|
||||
// Config options for the service.
|
||||
@@ -84,14 +88,16 @@ type Config struct {
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache *depositcache.DepositCache
|
||||
AttPool attestations.Pool
|
||||
ExitPool *voluntaryexits.Pool
|
||||
SlashingPool *slashings.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
OpsService *attestations.Service
|
||||
StateGen *stategen.State
|
||||
WspBlockRoot []byte
|
||||
WspEpoch types.Epoch
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
@@ -99,37 +105,31 @@ type Config struct {
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
depositCache: cfg.DepositCache,
|
||||
chainStartFetcher: cfg.ChainStartFetcher,
|
||||
attPool: cfg.AttPool,
|
||||
exitPool: cfg.ExitPool,
|
||||
slashingPool: cfg.SlashingPool,
|
||||
p2p: cfg.P2p,
|
||||
maxRoutines: cfg.MaxRoutines,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
forkChoiceStore: cfg.ForkChoiceStore,
|
||||
initSyncState: make(map[[32]byte]*stateTrie.BeaconState),
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointState: cache.NewCheckpointStateCache(),
|
||||
opsService: cfg.OpsService,
|
||||
stateGen: cfg.StateGen,
|
||||
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
|
||||
recentCanonicalBlocks: make(map[[32]byte]bool),
|
||||
justifiedBalances: make([]uint64, 0),
|
||||
checkPtInfoCache: newCheckPointInfoCache(),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
depositCache: cfg.DepositCache,
|
||||
chainStartFetcher: cfg.ChainStartFetcher,
|
||||
attPool: cfg.AttPool,
|
||||
exitPool: cfg.ExitPool,
|
||||
slashingPool: cfg.SlashingPool,
|
||||
p2p: cfg.P2p,
|
||||
maxRoutines: cfg.MaxRoutines,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
forkChoiceStore: cfg.ForkChoiceStore,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
opsService: cfg.OpsService,
|
||||
stateGen: cfg.StateGen,
|
||||
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
|
||||
justifiedBalances: make([]uint64, 0),
|
||||
wsEpoch: cfg.WspEpoch,
|
||||
wsRoot: cfg.WspBlockRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start a blockchain service's main event loop.
|
||||
func (s *Service) Start() {
|
||||
beaconState, err := s.beaconDB.HeadState(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state: %v", err)
|
||||
}
|
||||
|
||||
// For running initial sync with state cache, in an event of restart, we use
|
||||
// last finalized check point as start point to sync instead of head
|
||||
// state. This is because we no longer save state every slot during sync.
|
||||
@@ -138,11 +138,25 @@ func (s *Service) Start() {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
|
||||
if beaconState == nil {
|
||||
beaconState, err = s.stateGen.StateByRoot(s.ctx, bytesutil.ToBytes32(cp.Root))
|
||||
r := bytesutil.ToBytes32(cp.Root)
|
||||
// Before the first finalized epoch, in the current epoch,
|
||||
// the finalized root is defined as zero hashes instead of genesis root hash.
|
||||
// We want to use genesis root to retrieve for state.
|
||||
if r == params.BeaconConfig().ZeroHash {
|
||||
genesisBlock, err := s.beaconDB.GenesisBlock(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state by root: %v", err)
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
if genesisBlock != nil {
|
||||
r, err = genesisBlock.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not tree hash genesis block: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
beaconState, err := s.stateGen.StateByRoot(s.ctx, r)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state by root: %v", err)
|
||||
}
|
||||
|
||||
// Make sure that attestation processor is subscribed and ready for state initializing event.
|
||||
@@ -162,7 +176,11 @@ func (s *Service) Start() {
|
||||
if err != nil {
|
||||
log.Fatalf("Could not retrieve genesis state: %v", err)
|
||||
}
|
||||
go slotutil.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()))
|
||||
gRoot, err := gState.HashTreeRoot(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
||||
}
|
||||
go slotutil.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
|
||||
justifiedCheckpoint, err := s.beaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
@@ -184,6 +202,26 @@ func (s *Service) Start() {
|
||||
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint)
|
||||
|
||||
ss, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get start slot of finalized epoch: %v", err)
|
||||
}
|
||||
h := s.headBlock().Block
|
||||
if h.Slot > ss {
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": ss,
|
||||
"endSlot": h.Slot,
|
||||
}).Info("Loading blocks to fork choice store, this may take a while.")
|
||||
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, s.finalizedCheckpt, s.justifiedCheckpt); err != nil {
|
||||
log.Fatalf("Could not fill in fork choice store missing blocks: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
|
||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||
log.Fatalf("Could not verify weak subjectivity checkpoint: %v", err)
|
||||
}
|
||||
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
@@ -226,7 +264,7 @@ func (s *Service) Start() {
|
||||
}()
|
||||
}
|
||||
|
||||
go s.processAttestation(attestationProcessorSubscribed)
|
||||
go s.processAttestationsRoutine(attestationProcessorSubscribed)
|
||||
}
|
||||
|
||||
// processChainStartTime initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
@@ -238,7 +276,11 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
|
||||
log.Fatalf("Could not initialize beacon chain: %v", err)
|
||||
}
|
||||
// We start a counter to genesis, if needed.
|
||||
go slotutil.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()))
|
||||
gRoot, err := initializedState.HashTreeRoot(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
||||
}
|
||||
go slotutil.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||
|
||||
// We send out a state initialized event to the rest of the services
|
||||
// running in the beacon node.
|
||||
@@ -282,7 +324,7 @@ func (s *Service) initializeBeaconChain(
|
||||
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
if err := helpers.UpdateProposerIndicesInCache(genesisState); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -302,28 +344,21 @@ func (s *Service) Stop() error {
|
||||
}
|
||||
|
||||
// Save initial sync cached blocks to the DB before stop.
|
||||
if err := s.beaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return s.beaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
|
||||
}
|
||||
|
||||
// Status always returns nil unless there is an error condition that causes
|
||||
// this service to be unhealthy.
|
||||
func (s *Service) Status() error {
|
||||
if s.genesisRoot == params.BeaconConfig().ZeroHash {
|
||||
return errors.New("genesis state has not been created")
|
||||
}
|
||||
if runtime.NumGoroutine() > s.maxRoutines {
|
||||
return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearCachedStates removes all stored caches states. This is done after the node
|
||||
// is synced.
|
||||
func (s *Service) ClearCachedStates() {
|
||||
s.initSyncState = map[[32]byte]*stateTrie.BeaconState{}
|
||||
}
|
||||
|
||||
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.BeaconState) error {
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
@@ -401,23 +436,6 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
}
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
|
||||
if flags.Get().UnsafeSync {
|
||||
headBlock, err := s.beaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head block")
|
||||
}
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash head block")
|
||||
}
|
||||
headState, err := s.beaconDB.HeadState(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state")
|
||||
}
|
||||
s.setHead(headRoot, headBlock, headState)
|
||||
return nil
|
||||
}
|
||||
|
||||
finalized, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint from db")
|
||||
@@ -427,7 +445,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
// would be the genesis state and block.
|
||||
return errors.New("no finalized epoch in the database")
|
||||
}
|
||||
finalizedRoot := bytesutil.ToBytes32(finalized.Root)
|
||||
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
var finalizedState *stateTrie.BeaconState
|
||||
|
||||
finalizedState, err = s.stateGen.Resume(ctx)
|
||||
@@ -435,6 +453,42 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
|
||||
if flags.Get().HeadSync {
|
||||
headBlock, err := s.beaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head block")
|
||||
}
|
||||
headEpoch := helpers.SlotToEpoch(headBlock.Block.Slot)
|
||||
var epochsSinceFinality types.Epoch
|
||||
if headEpoch > finalized.Epoch {
|
||||
epochsSinceFinality = headEpoch - finalized.Epoch
|
||||
}
|
||||
// Head sync when node is far enough beyond known finalized epoch,
|
||||
// this becomes really useful during long period of non-finality.
|
||||
if epochsSinceFinality >= headSyncMinEpochsAfterCheckpoint {
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash head block")
|
||||
}
|
||||
finalizedState, err := s.stateGen.Resume(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
log.Infof("Regenerating state from the last checkpoint at slot %d to current head slot of %d."+
|
||||
"This process may take a while, please wait.", finalizedState.Slot(), headBlock.Block.Slot)
|
||||
headState, err := s.stateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state")
|
||||
}
|
||||
s.setHead(headRoot, headBlock, headState)
|
||||
return nil
|
||||
} else {
|
||||
log.Warnf("Finalized checkpoint at slot %d is too close to the current head slot, "+
|
||||
"resetting head from the checkpoint ('--%s' flag is ignored).",
|
||||
finalizedState.Slot(), flags.HeadSync.Name)
|
||||
}
|
||||
}
|
||||
|
||||
finalizedBlock, err := s.beaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block from db")
|
||||
@@ -450,7 +504,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
|
||||
// This is called when a client starts from non-genesis slot. This passes last justified and finalized
|
||||
// information to fork choice service to initializes fork choice store.
|
||||
func (s *Service) resumeForkChoice(justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
func (s *Service) resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
store := protoarray.New(justifiedCheckpoint.Epoch, finalizedCheckpoint.Epoch, bytesutil.ToBytes32(finalizedCheckpoint.Root))
|
||||
s.forkChoiceStore = store
|
||||
}
|
||||
|
||||
@@ -16,9 +16,9 @@ func init() {
|
||||
}
|
||||
|
||||
func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
beaconDB: beaconDB,
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
|
||||
@@ -3,7 +3,6 @@ package blockchain
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -11,9 +10,10 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
protodb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
@@ -32,15 +33,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
}
|
||||
|
||||
type mockBeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
}
|
||||
@@ -67,9 +62,9 @@ func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ = p2p.Broadcaster(&mockBroadcaster{})
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database, sc *cache.StateSummaryCache) *Service {
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
endpoint := "http://127.0.0.1"
|
||||
ctx := context.Background()
|
||||
var web3Service *powchain.Service
|
||||
@@ -93,7 +88,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database, sc *cache.StateSummary
|
||||
require.NoError(t, err)
|
||||
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
|
||||
BeaconDB: beaconDB,
|
||||
HTTPEndPoint: endpoint,
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
DepositContract: common.Address{},
|
||||
})
|
||||
require.NoError(t, err, "Unable to set up web3 service")
|
||||
@@ -101,7 +96,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database, sc *cache.StateSummary
|
||||
opsService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositcache.NewDepositCache()
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &Config{
|
||||
@@ -112,7 +107,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database, sc *cache.StateSummary
|
||||
P2p: &mockBroadcaster{},
|
||||
StateNotifier: &mockBeaconNode{},
|
||||
AttPool: attestations.NewPool(),
|
||||
StateGen: stategen.New(beaconDB, sc),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
|
||||
OpsService: opsService,
|
||||
}
|
||||
@@ -130,21 +125,49 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database, sc *cache.StateSummary
|
||||
func TestChainStartStop_Initialized(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, db, sc)
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
|
||||
genesisBlk := testutil.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, genesisBlk))
|
||||
s := testutil.NewBeaconState()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlk))
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
require.NoError(t, db.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
require.NoError(t, chainService.Stop(), "Unable to stop chain service")
|
||||
|
||||
// The context should have been canceled.
|
||||
assert.Equal(t, context.Canceled, chainService.ctx.Err(), "Context was not canceled")
|
||||
require.LogsContain(t, hook, "data already exists")
|
||||
}
|
||||
|
||||
func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
|
||||
genesisBlk := testutil.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlk))
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
@@ -158,10 +181,10 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
|
||||
func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
bc := setupBeaconChain(t, db, sc)
|
||||
bc := setupBeaconChain(t, beaconDB)
|
||||
var err error
|
||||
|
||||
// Set up 10 deposits pre chain start for validators to register
|
||||
@@ -178,6 +201,7 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
DepositCount: uint64(len(deposits)),
|
||||
BlockHash: make([]byte, 32),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
genState, err = b.ProcessPreGenesisDeposits(ctx, genState, deposits)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -191,27 +215,30 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
if headBlk == nil {
|
||||
t.Error("Head state can't be nil after initialize beacon chain")
|
||||
}
|
||||
if bc.headRoot() == params.BeaconConfig().ZeroHash {
|
||||
r, err := bc.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
if bytesutil.ToBytes32(r) == params.BeaconConfig().ZeroHash {
|
||||
t.Error("Canonical root for slot 0 can't be zeros after initialize beacon chain")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
chainService := setupBeaconChain(t, db, sc)
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
|
||||
genesisBlk := testutil.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, genesisBlk))
|
||||
s := testutil.NewBeaconState()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlk))
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(0))
|
||||
require.NoError(t, db.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}))
|
||||
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
@@ -224,36 +251,37 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := testutil.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, db.SaveBlock(ctx, genesis))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := testutil.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, db.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, db.SaveBlock(ctx, headBlock))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: helpers.SlotToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
c := &Service{beaconDB: db, stateGen: stategen.New(db, sc)}
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: helpers.SlotToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
headBlk, err := c.HeadBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headBlock, headBlk, "Head block incorrect")
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, c.HeadSlot(), headBlock.Block.Slot, "Head slot incorrect")
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -263,41 +291,157 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
}
|
||||
|
||||
func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := testutil.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := testutil.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
|
||||
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
assert.DeepEqual(t, genesis, c.head.block)
|
||||
}
|
||||
|
||||
func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
resetFlags := flags.Get()
|
||||
flags.Init(&flags.GlobalFlags{
|
||||
HeadSync: true,
|
||||
})
|
||||
defer func() {
|
||||
flags.Init(resetFlags)
|
||||
}()
|
||||
|
||||
hook := logTest.NewGlobal()
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
genesisBlock := testutil.NewBeaconBlock()
|
||||
genesisRoot, err := genesisBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlock))
|
||||
|
||||
finalizedBlock := testutil.NewBeaconBlock()
|
||||
finalizedBlock.Block.Slot = finalizedSlot
|
||||
finalizedBlock.Block.ParentRoot = genesisRoot[:]
|
||||
finalizedRoot, err := finalizedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, finalizedBlock))
|
||||
|
||||
// Set head slot close to the finalization point, no head sync is triggered.
|
||||
headBlock := testutil.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot + params.BeaconConfig().SlotsPerEpoch*5
|
||||
headBlock.Block.ParentRoot = finalizedRoot[:]
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
|
||||
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(headBlock.Block.Slot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, finalizedRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Epoch: helpers.SlotToEpoch(finalizedBlock.Block.Slot),
|
||||
Root: finalizedRoot[:],
|
||||
}))
|
||||
|
||||
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
|
||||
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
// Since head sync is not triggered, chain is initialized to the last finalization checkpoint.
|
||||
assert.DeepEqual(t, finalizedBlock, c.head.block)
|
||||
assert.LogsContain(t, hook, "resetting head from the checkpoint ('--head-sync' flag is ignored)")
|
||||
assert.LogsDoNotContain(t, hook, "Regenerating state from the last checkpoint at slot")
|
||||
|
||||
// Set head slot far beyond the finalization point, head sync should be triggered.
|
||||
headBlock = testutil.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot + params.BeaconConfig().SlotsPerEpoch*headSyncMinEpochsAfterCheckpoint
|
||||
headBlock.Block.ParentRoot = finalizedRoot[:]
|
||||
headRoot, err = headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
|
||||
|
||||
hook.Reset()
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
s, err = c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
// Head slot is far beyond the latest finalized checkpoint, head sync is triggered.
|
||||
assert.DeepEqual(t, headBlock, c.head.block)
|
||||
assert.LogsContain(t, hook, "Regenerating state from the last checkpoint at slot 225")
|
||||
assert.LogsDoNotContain(t, hook, "resetting head from the checkpoint ('--head-sync' flag is ignored)")
|
||||
}
|
||||
|
||||
func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
db, sc := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
stateGen: stategen.New(db, sc),
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB),
|
||||
}
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
r, err := b.HashTreeRoot()
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
r, err := blk.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
newState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
newState := testutil.NewBeaconState()
|
||||
require.NoError(t, s.stateGen.SaveState(ctx, r, newState))
|
||||
require.NoError(t, s.saveHeadNoDB(ctx, b, r, newState))
|
||||
require.NoError(t, s.saveHeadNoDB(ctx, blk, r, newState))
|
||||
|
||||
newB, err := s.beaconDB.HeadBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
if reflect.DeepEqual(newB, b) {
|
||||
if reflect.DeepEqual(newB, blk) {
|
||||
t.Error("head block should not be equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
finalizedCheckpt: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
beaconDB: db,
|
||||
beaconDB: beaconDB,
|
||||
}
|
||||
block := testutil.NewBeaconBlock()
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state := testutil.NewBeaconState()
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, state))
|
||||
beaconState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, beaconState))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
@@ -305,12 +449,13 @@ func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
|
||||
|
||||
func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
db, _ := testDB.SetupDB(t)
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: db,
|
||||
beaconDB: beaconDB,
|
||||
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
|
||||
stateGen: stategen.New(beaconDB),
|
||||
}
|
||||
b := testutil.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
@@ -320,11 +465,25 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
require.Equal(t, true, s.beaconDB.HasBlock(ctx, r))
|
||||
}
|
||||
|
||||
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := service.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
service.processChainStartTime(context.Background(), time.Now())
|
||||
|
||||
stateEvent := <-stateChannel
|
||||
require.Equal(t, int(stateEvent.Type), statefeed.Initialized)
|
||||
_, ok := stateEvent.Data.(*statefeed.InitializedData)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockDB(b *testing.B) {
|
||||
db, _ := testDB.SetupDB(b)
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
beaconDB: beaconDB,
|
||||
}
|
||||
block := testutil.NewBeaconBlock()
|
||||
require.NoError(b, s.beaconDB.SaveBlock(ctx, block))
|
||||
@@ -339,19 +498,19 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
|
||||
func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(b)
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
finalizedCheckpt: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
beaconDB: db,
|
||||
beaconDB: beaconDB,
|
||||
}
|
||||
block := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
bs := &pb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
state, err := beaconstate.InitializeFromProto(bs)
|
||||
beaconState, err := beaconstate.InitializeFromProto(bs)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, state))
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, beaconState))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
@@ -20,10 +20,10 @@ go_library(
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
@@ -20,7 +21,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -38,6 +38,7 @@ type ChainService struct {
|
||||
Balance *precompute.Balance
|
||||
Genesis time.Time
|
||||
ValidatorsRoot [32]byte
|
||||
CanonicalRoots map[[32]byte]bool
|
||||
Fork *pb.Fork
|
||||
ETH1Data *ethpb.Eth1Data
|
||||
DB db.Database
|
||||
@@ -47,22 +48,23 @@ type ChainService struct {
|
||||
ValidAttestation bool
|
||||
ForkChoiceStore *protoarray.Store
|
||||
VerifyBlkDescendantErr error
|
||||
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
}
|
||||
|
||||
// StateNotifier mocks the same method in the chain service.
|
||||
func (ms *ChainService) StateNotifier() statefeed.Notifier {
|
||||
if ms.stateNotifier == nil {
|
||||
ms.stateNotifier = &MockStateNotifier{}
|
||||
func (s *ChainService) StateNotifier() statefeed.Notifier {
|
||||
if s.stateNotifier == nil {
|
||||
s.stateNotifier = &MockStateNotifier{}
|
||||
}
|
||||
return ms.stateNotifier
|
||||
return s.stateNotifier
|
||||
}
|
||||
|
||||
// BlockNotifier mocks the same method in the chain service.
|
||||
func (ms *ChainService) BlockNotifier() blockfeed.Notifier {
|
||||
if ms.blockNotifier == nil {
|
||||
ms.blockNotifier = &MockBlockNotifier{}
|
||||
func (s *ChainService) BlockNotifier() blockfeed.Notifier {
|
||||
if s.blockNotifier == nil {
|
||||
s.blockNotifier = &MockBlockNotifier{}
|
||||
}
|
||||
return ms.blockNotifier
|
||||
return s.blockNotifier
|
||||
}
|
||||
|
||||
// MockBlockNotifier mocks the block notifier.
|
||||
@@ -71,16 +73,17 @@ type MockBlockNotifier struct {
|
||||
}
|
||||
|
||||
// BlockFeed returns a block feed.
|
||||
func (msn *MockBlockNotifier) BlockFeed() *event.Feed {
|
||||
if msn.feed == nil {
|
||||
msn.feed = new(event.Feed)
|
||||
func (mbn *MockBlockNotifier) BlockFeed() *event.Feed {
|
||||
if mbn.feed == nil {
|
||||
mbn.feed = new(event.Feed)
|
||||
}
|
||||
return msn.feed
|
||||
return mbn.feed
|
||||
}
|
||||
|
||||
// MockStateNotifier mocks the state notifier.
|
||||
type MockStateNotifier struct {
|
||||
feed *event.Feed
|
||||
feed *event.Feed
|
||||
feedLock sync.Mutex
|
||||
|
||||
recv []*feed.Event
|
||||
recvLock sync.Mutex
|
||||
@@ -98,6 +101,9 @@ func (msn *MockStateNotifier) ReceivedEvents() []*feed.Event {
|
||||
|
||||
// StateFeed returns a state feed.
|
||||
func (msn *MockStateNotifier) StateFeed() *event.Feed {
|
||||
msn.feedLock.Lock()
|
||||
defer msn.feedLock.Unlock()
|
||||
|
||||
if msn.feed == nil && msn.recvCh == nil {
|
||||
msn.feed = new(event.Feed)
|
||||
if msn.RecordEvents {
|
||||
@@ -120,11 +126,11 @@ func (msn *MockStateNotifier) StateFeed() *event.Feed {
|
||||
}
|
||||
|
||||
// OperationNotifier mocks the same method in the chain service.
|
||||
func (ms *ChainService) OperationNotifier() opfeed.Notifier {
|
||||
if ms.opNotifier == nil {
|
||||
ms.opNotifier = &MockOperationNotifier{}
|
||||
func (s *ChainService) OperationNotifier() opfeed.Notifier {
|
||||
if s.opNotifier == nil {
|
||||
s.opNotifier = &MockOperationNotifier{}
|
||||
}
|
||||
return ms.opNotifier
|
||||
return s.opNotifier
|
||||
}
|
||||
|
||||
// MockOperationNotifier mocks the operation notifier.
|
||||
@@ -141,247 +147,237 @@ func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
|
||||
}
|
||||
|
||||
// ReceiveBlockInitialSync mocks ReceiveBlockInitialSync method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &stateTrie.BeaconState{}
|
||||
func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, _ [32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &stateTrie.BeaconState{}
|
||||
}
|
||||
if !bytes.Equal(ms.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.Block.ParentRoot)
|
||||
if !bytes.Equal(s.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block.ParentRoot)
|
||||
}
|
||||
if err := ms.State.SetSlot(block.Block.Slot); err != nil {
|
||||
if err := s.State.SetSlot(block.Block.Slot); err != nil {
|
||||
return err
|
||||
}
|
||||
ms.BlocksReceived = append(ms.BlocksReceived, block)
|
||||
s.BlocksReceived = append(s.BlocksReceived, block)
|
||||
signingRoot, err := block.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ms.DB != nil {
|
||||
if err := ms.DB.SaveBlock(ctx, block); err != nil {
|
||||
if s.DB != nil {
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
|
||||
}
|
||||
ms.Root = signingRoot[:]
|
||||
ms.Block = block
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (ms *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock, roots [][32]byte) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &stateTrie.BeaconState{}
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock, _ [][32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &stateTrie.BeaconState{}
|
||||
}
|
||||
for _, block := range blks {
|
||||
if !bytes.Equal(ms.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.Block.ParentRoot)
|
||||
if !bytes.Equal(s.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block.ParentRoot)
|
||||
}
|
||||
if err := ms.State.SetSlot(block.Block.Slot); err != nil {
|
||||
if err := s.State.SetSlot(block.Block.Slot); err != nil {
|
||||
return err
|
||||
}
|
||||
ms.BlocksReceived = append(ms.BlocksReceived, block)
|
||||
s.BlocksReceived = append(s.BlocksReceived, block)
|
||||
signingRoot, err := block.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ms.DB != nil {
|
||||
if err := ms.DB.SaveBlock(ctx, block); err != nil {
|
||||
if s.DB != nil {
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
|
||||
}
|
||||
ms.Root = signingRoot[:]
|
||||
ms.Block = block
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &stateTrie.BeaconState{}
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, _ [32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &stateTrie.BeaconState{}
|
||||
}
|
||||
if !bytes.Equal(ms.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.Block.ParentRoot)
|
||||
if !bytes.Equal(s.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block.ParentRoot)
|
||||
}
|
||||
if err := ms.State.SetSlot(block.Block.Slot); err != nil {
|
||||
if err := s.State.SetSlot(block.Block.Slot); err != nil {
|
||||
return err
|
||||
}
|
||||
ms.BlocksReceived = append(ms.BlocksReceived, block)
|
||||
s.BlocksReceived = append(s.BlocksReceived, block)
|
||||
signingRoot, err := block.Block.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ms.DB != nil {
|
||||
if err := ms.DB.SaveBlock(ctx, block); err != nil {
|
||||
if s.DB != nil {
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
|
||||
}
|
||||
ms.Root = signingRoot[:]
|
||||
ms.Block = block
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
return nil
|
||||
}
|
||||
|
||||
// HeadSlot mocks HeadSlot method in chain service.
|
||||
func (ms *ChainService) HeadSlot() uint64 {
|
||||
if ms.State == nil {
|
||||
func (s *ChainService) HeadSlot() types.Slot {
|
||||
if s.State == nil {
|
||||
return 0
|
||||
}
|
||||
return ms.State.Slot()
|
||||
return s.State.Slot()
|
||||
}
|
||||
|
||||
// HeadRoot mocks HeadRoot method in chain service.
|
||||
func (ms *ChainService) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
if len(ms.Root) > 0 {
|
||||
return ms.Root, nil
|
||||
func (s *ChainService) HeadRoot(_ context.Context) ([]byte, error) {
|
||||
if len(s.Root) > 0 {
|
||||
return s.Root, nil
|
||||
}
|
||||
return make([]byte, 32), nil
|
||||
}
|
||||
|
||||
// HeadBlock mocks HeadBlock method in chain service.
|
||||
func (ms *ChainService) HeadBlock(context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
return ms.Block, nil
|
||||
func (s *ChainService) HeadBlock(context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
return s.Block, nil
|
||||
}
|
||||
|
||||
// HeadState mocks HeadState method in chain service.
|
||||
func (ms *ChainService) HeadState(context.Context) (*stateTrie.BeaconState, error) {
|
||||
return ms.State, nil
|
||||
func (s *ChainService) HeadState(context.Context) (*stateTrie.BeaconState, error) {
|
||||
return s.State, nil
|
||||
}
|
||||
|
||||
// CurrentFork mocks HeadState method in chain service.
|
||||
func (ms *ChainService) CurrentFork() *pb.Fork {
|
||||
return ms.Fork
|
||||
func (s *ChainService) CurrentFork() *pb.Fork {
|
||||
return s.Fork
|
||||
}
|
||||
|
||||
// FinalizedCheckpt mocks FinalizedCheckpt method in chain service.
|
||||
func (ms *ChainService) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.FinalizedCheckPoint
|
||||
func (s *ChainService) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return s.FinalizedCheckPoint
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt mocks CurrentJustifiedCheckpt method in chain service.
|
||||
func (ms *ChainService) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.CurrentJustifiedCheckPoint
|
||||
func (s *ChainService) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return s.CurrentJustifiedCheckPoint
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt mocks PreviousJustifiedCheckpt method in chain service.
|
||||
func (ms *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.PreviousJustifiedCheckPoint
|
||||
func (s *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return s.PreviousJustifiedCheckPoint
|
||||
}
|
||||
|
||||
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
|
||||
func (ms *ChainService) ReceiveAttestation(context.Context, *ethpb.Attestation) error {
|
||||
func (s *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveAttestationNoPubsub mocks ReceiveAttestationNoPubsub method in chain service.
|
||||
func (ms *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attestation) error {
|
||||
func (s *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttestationPreState mocks AttestationPreState method in chain service.
|
||||
func (ms *ChainService) AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*stateTrie.BeaconState, error) {
|
||||
return ms.State, nil
|
||||
func (s *ChainService) AttestationPreState(_ context.Context, _ *ethpb.Attestation) (*stateTrie.BeaconState, error) {
|
||||
return s.State, nil
|
||||
}
|
||||
|
||||
// HeadValidatorsIndices mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error) {
|
||||
if ms.State == nil {
|
||||
return []uint64{}, nil
|
||||
func (s *ChainService) HeadValidatorsIndices(_ context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error) {
|
||||
if s.State == nil {
|
||||
return []types.ValidatorIndex{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(ms.State, epoch)
|
||||
return helpers.ActiveValidatorIndices(s.State, epoch)
|
||||
}
|
||||
|
||||
// HeadSeed mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error) {
|
||||
return helpers.Seed(ms.State, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
func (s *ChainService) HeadSeed(_ context.Context, epoch types.Epoch) ([32]byte, error) {
|
||||
return helpers.Seed(s.State, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
// HeadETH1Data provides the current ETH1Data of the head state.
|
||||
func (ms *ChainService) HeadETH1Data() *ethpb.Eth1Data {
|
||||
return ms.ETH1Data
|
||||
func (s *ChainService) HeadETH1Data() *ethpb.Eth1Data {
|
||||
return s.ETH1Data
|
||||
}
|
||||
|
||||
// ProtoArrayStore mocks the same method in the chain service.
|
||||
func (ms *ChainService) ProtoArrayStore() *protoarray.Store {
|
||||
return ms.ForkChoiceStore
|
||||
func (s *ChainService) ProtoArrayStore() *protoarray.Store {
|
||||
return s.ForkChoiceStore
|
||||
}
|
||||
|
||||
// GenesisTime mocks the same method in the chain service.
|
||||
func (ms *ChainService) GenesisTime() time.Time {
|
||||
return ms.Genesis
|
||||
func (s *ChainService) GenesisTime() time.Time {
|
||||
return s.Genesis
|
||||
}
|
||||
|
||||
// GenesisValidatorRoot mocks the same method in the chain service.
|
||||
func (ms *ChainService) GenesisValidatorRoot() [32]byte {
|
||||
return ms.ValidatorsRoot
|
||||
func (s *ChainService) GenesisValidatorRoot() [32]byte {
|
||||
return s.ValidatorsRoot
|
||||
}
|
||||
|
||||
// CurrentSlot mocks the same method in the chain service.
|
||||
func (ms *ChainService) CurrentSlot() uint64 {
|
||||
return uint64(time.Now().Unix()-ms.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot
|
||||
func (s *ChainService) CurrentSlot() types.Slot {
|
||||
if s.Slot != nil {
|
||||
return *s.Slot
|
||||
}
|
||||
return types.Slot(uint64(time.Now().Unix()-s.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot)
|
||||
}
|
||||
|
||||
// Participation mocks the same method in the chain service.
|
||||
func (ms *ChainService) Participation(epoch uint64) *precompute.Balance {
|
||||
return ms.Balance
|
||||
func (s *ChainService) Participation(_ uint64) *precompute.Balance {
|
||||
return s.Balance
|
||||
}
|
||||
|
||||
// IsValidAttestation always returns true.
|
||||
func (ms *ChainService) IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool {
|
||||
return ms.ValidAttestation
|
||||
func (s *ChainService) IsValidAttestation(_ context.Context, _ *ethpb.Attestation) bool {
|
||||
return s.ValidAttestation
|
||||
}
|
||||
|
||||
// IsCanonical returns and determines whether a block with the provided root is part of
|
||||
// the canonical chain.
|
||||
func (ms *ChainService) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
func (s *ChainService) IsCanonical(_ context.Context, r [32]byte) (bool, error) {
|
||||
if s.CanonicalRoots != nil {
|
||||
_, ok := s.CanonicalRoots[r]
|
||||
return ok, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// ClearCachedStates does nothing.
|
||||
func (ms *ChainService) ClearCachedStates() {}
|
||||
|
||||
// HasInitSyncBlock mocks the same method in the chain service.
|
||||
func (ms *ChainService) HasInitSyncBlock(root [32]byte) bool {
|
||||
func (s *ChainService) HasInitSyncBlock(_ [32]byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorRoot mocks HeadGenesisValidatorRoot method in chain service.
|
||||
func (ms *ChainService) HeadGenesisValidatorRoot() [32]byte {
|
||||
func (s *ChainService) HeadGenesisValidatorRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// VerifyBlkDescendant mocks VerifyBlkDescendant and always returns nil.
|
||||
func (ms *ChainService) VerifyBlkDescendant(ctx context.Context, root [32]byte) error {
|
||||
return ms.VerifyBlkDescendantErr
|
||||
func (s *ChainService) VerifyBlkDescendant(_ context.Context, _ [32]byte) error {
|
||||
return s.VerifyBlkDescendantErr
|
||||
}
|
||||
|
||||
// AttestationCheckPtInfo mocks AttestationCheckPtInfo and always returns nil.
|
||||
func (ms *ChainService) AttestationCheckPtInfo(ctx context.Context, att *ethpb.Attestation) (*pb.CheckPtInfo, error) {
|
||||
f := ms.State.Fork()
|
||||
g := bytesutil.ToBytes32(ms.State.GenesisValidatorRoot())
|
||||
seed, err := helpers.Seed(ms.State, helpers.SlotToEpoch(att.Data.Slot), params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
|
||||
func (s *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
|
||||
if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) {
|
||||
return errors.New("LMD and FFG miss matched")
|
||||
}
|
||||
indices, err := helpers.ActiveValidatorIndices(ms.State, helpers.SlotToEpoch(att.Data.Slot))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validators := ms.State.ValidatorsReadOnly()
|
||||
pks := make([][]byte, len(validators))
|
||||
for i := 0; i < len(pks); i++ {
|
||||
pk := validators[i].PublicKey()
|
||||
pks[i] = pk[:]
|
||||
}
|
||||
|
||||
info := &pb.CheckPtInfo{
|
||||
Fork: f,
|
||||
GenesisRoot: g[:],
|
||||
Seed: seed[:],
|
||||
ActiveIndices: indices,
|
||||
PubKeys: pks,
|
||||
}
|
||||
|
||||
return info, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyFinalizedConsistency mocks VerifyFinalizedConsistency and always returns nil.
|
||||
func (s *ChainService) VerifyFinalizedConsistency(_ context.Context, r []byte) error {
|
||||
if !bytes.Equal(r, s.FinalizedCheckPoint.Root) {
|
||||
return errors.New("Root and finalized store are not consistent")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
60
beacon-chain/blockchain/weak_subjectivity_checks.go
Normal file
60
beacon-chain/blockchain/weak_subjectivity_checks.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// VerifyWeakSubjectivityRoot verifies the weak subjectivity root in the service struct.
|
||||
// Reference design: https://github.com/ethereum/eth2.0-specs/blob/master/specs/phase0/weak-subjectivity.md#weak-subjectivity-sync-procedure
|
||||
func (s *Service) VerifyWeakSubjectivityRoot(ctx context.Context) error {
|
||||
// TODO(7342): Remove the following to fully use weak subjectivity in production.
|
||||
if len(s.wsRoot) == 0 || s.wsEpoch == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do nothing if the weak subjectivity has previously been verified,
|
||||
// or weak subjectivity epoch is higher than last finalized epoch.
|
||||
if s.wsVerified {
|
||||
return nil
|
||||
}
|
||||
if s.wsEpoch > s.finalizedCheckpt.Epoch {
|
||||
return nil
|
||||
}
|
||||
|
||||
r := bytesutil.ToBytes32(s.wsRoot)
|
||||
log.Infof("Performing weak subjectivity check for root %#x in epoch %d", r, s.wsEpoch)
|
||||
// Save initial sync cached blocks to DB.
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
// A node should have the weak subjectivity block in the DB.
|
||||
if !s.beaconDB.HasBlock(ctx, r) {
|
||||
return fmt.Errorf("node does not have root in DB: %#x", r)
|
||||
}
|
||||
|
||||
startSlot, err := helpers.StartSlot(s.wsEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// A node should have the weak subjectivity block corresponds to the correct epoch in the DB.
|
||||
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(startSlot + params.BeaconConfig().SlotsPerEpoch)
|
||||
roots, err := s.beaconDB.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, root := range roots {
|
||||
if r == root {
|
||||
log.Info("Weak subjectivity check has passed")
|
||||
s.wsVerified = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("node does not have root in db corresponding to epoch: %#x %d", r, s.wsEpoch)
|
||||
}
|
||||
86
beacon-chain/blockchain/weak_subjectivity_checks_test.go
Normal file
86
beacon-chain/blockchain/weak_subjectivity_checks_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 32
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), b))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
wsVerified bool
|
||||
wantErr bool
|
||||
wsRoot [32]byte
|
||||
wsEpoch types.Epoch
|
||||
finalizedEpoch types.Epoch
|
||||
errString string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
name: "nil root and epoch",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "already verified",
|
||||
wsEpoch: 2,
|
||||
finalizedEpoch: 2,
|
||||
wsVerified: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "not yet to verify, ws epoch higher than finalized epoch",
|
||||
wsEpoch: 2,
|
||||
finalizedEpoch: 1,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "can't find the block in DB",
|
||||
wsEpoch: 1,
|
||||
wsRoot: [32]byte{'a'},
|
||||
finalizedEpoch: 3,
|
||||
wantErr: true,
|
||||
errString: "node does not have root in DB",
|
||||
},
|
||||
{
|
||||
name: "can't find the block corresponds to ws epoch in DB",
|
||||
wsEpoch: 2,
|
||||
wsRoot: r, // Root belongs in epoch 1.
|
||||
finalizedEpoch: 3,
|
||||
wantErr: true,
|
||||
errString: "node does not have root in db corresponding to epoch",
|
||||
},
|
||||
{
|
||||
name: "can verify and pass",
|
||||
wsEpoch: 1,
|
||||
wsRoot: r,
|
||||
finalizedEpoch: 3,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
wsRoot: tt.wsRoot[:],
|
||||
wsEpoch: tt.wsEpoch,
|
||||
wsVerified: tt.wsVerified,
|
||||
finalizedCheckpt: ðpb.Checkpoint{Epoch: tt.finalizedEpoch},
|
||||
}
|
||||
if err := s.VerifyWeakSubjectivityRoot(context.Background()); (err != nil) != tt.wantErr {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
12
beacon-chain/cache/BUILD.bazel
vendored
12
beacon-chain/cache/BUILD.bazel
vendored
@@ -2,6 +2,7 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
|
||||
# gazelle:ignore committee_disabled.go
|
||||
# gazelle:ignore proposer_indices_disabled.go
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
@@ -10,16 +11,17 @@ go_library(
|
||||
"committees.go",
|
||||
"common.go",
|
||||
"doc.go",
|
||||
"hot_state_cache.go",
|
||||
"skip_slot_cache.go",
|
||||
"state_summary.go",
|
||||
"subnet_ids.go",
|
||||
"proposer_indices_type.go",
|
||||
] + select({
|
||||
"//fuzz:fuzzing_enabled": [
|
||||
"committee_disabled.go",
|
||||
"proposer_indices_disabled.go"
|
||||
],
|
||||
"//conditions:default": [
|
||||
"committee.go",
|
||||
"proposer_indices.go",
|
||||
],
|
||||
}),
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
|
||||
@@ -39,6 +41,7 @@ go_library(
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
@@ -53,10 +56,10 @@ go_test(
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"feature_flag_test.go",
|
||||
"hot_state_cache_test.go",
|
||||
"cache_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"subnet_ids_test.go",
|
||||
"proposer_indices_test.go"
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@@ -69,6 +72,7 @@ go_test(
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
13
beacon-chain/cache/attestation_data.go
vendored
13
beacon-chain/cache/attestation_data.go
vendored
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
@@ -99,10 +98,7 @@ func (c *AttestationCache) Get(ctx context.Context, req *ethpb.AttestationDataRe
|
||||
|
||||
if exists && item != nil && item.(*attestationReqResWrapper).res != nil {
|
||||
attestationCacheHit.Inc()
|
||||
if featureconfig.Get().ReduceAttesterStateCopy {
|
||||
return state.CopyAttestationData(item.(*attestationReqResWrapper).res), nil
|
||||
}
|
||||
return item.(*attestationReqResWrapper).res, nil
|
||||
return state.CopyAttestationData(item.(*attestationReqResWrapper).res), nil
|
||||
}
|
||||
attestationCacheMiss.Inc()
|
||||
return nil, nil
|
||||
@@ -138,7 +134,7 @@ func (c *AttestationCache) MarkNotInProgress(req *ethpb.AttestationDataRequest)
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *AttestationCache) Put(ctx context.Context, req *ethpb.AttestationDataRequest, res *ethpb.AttestationData) error {
|
||||
func (c *AttestationCache) Put(_ context.Context, req *ethpb.AttestationDataRequest, res *ethpb.AttestationData) error {
|
||||
data := &attestationReqResWrapper{
|
||||
req,
|
||||
res,
|
||||
@@ -167,10 +163,7 @@ func wrapperToKey(i interface{}) (string, error) {
|
||||
}
|
||||
|
||||
func reqToKey(req *ethpb.AttestationDataRequest) (string, error) {
|
||||
if featureconfig.Get().ReduceAttesterStateCopy {
|
||||
return fmt.Sprintf("%d", req.Slot), nil
|
||||
}
|
||||
return fmt.Sprintf("%d-%d", req.CommitteeIndex, req.Slot), nil
|
||||
return fmt.Sprintf("%d", req.Slot), nil
|
||||
}
|
||||
|
||||
type attestationReqResWrapper struct {
|
||||
|
||||
9
beacon-chain/cache/cache_test.go
vendored
Normal file
9
beacon-chain/cache/cache_test.go
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
m.Run()
|
||||
}
|
||||
5
beacon-chain/cache/checkpoint_state.go
vendored
5
beacon-chain/cache/checkpoint_state.go
vendored
@@ -1,7 +1,6 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
@@ -13,10 +12,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotCheckpointState will be returned when a cache object is not a pointer to
|
||||
// a CheckpointState struct.
|
||||
ErrNotCheckpointState = errors.New("object is not a state by check point struct")
|
||||
|
||||
// maxCheckpointStateSize defines the max number of entries check point to state cache can contain.
|
||||
// Choosing 10 to account for multiple forks, this allows 5 forks per epoch boundary with 2 epochs
|
||||
// window to accept attestation based on latest spec.
|
||||
|
||||
5
beacon-chain/cache/checkpoint_state_test.go
vendored
5
beacon-chain/cache/checkpoint_state_test.go
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
@@ -60,8 +61,8 @@ func TestCheckpointStateCache_MaxSize(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := uint64(0); i < uint64(maxCheckpointStateSize+100); i++ {
|
||||
require.NoError(t, st.SetSlot(i))
|
||||
require.NoError(t, c.AddCheckpointState(ðpb.Checkpoint{Epoch: i, Root: make([]byte, 32)}, st))
|
||||
require.NoError(t, st.SetSlot(types.Slot(i)))
|
||||
require.NoError(t, c.AddCheckpointState(ðpb.Checkpoint{Epoch: types.Epoch(i), Root: make([]byte, 32)}, st))
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCheckpointStateSize, len(c.cache.Keys()))
|
||||
|
||||
66
beacon-chain/cache/committee.go
vendored
66
beacon-chain/cache/committee.go
vendored
@@ -1,4 +1,4 @@
|
||||
// -build libfuzzer
|
||||
// +build !libfuzzer
|
||||
|
||||
package cache
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -55,7 +56,7 @@ func NewCommitteesCache() *CommitteeCache {
|
||||
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
|
||||
func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]uint64, error) {
|
||||
func (c *CommitteeCache) Committee(slot types.Slot, seed [32]byte, index types.CommitteeIndex) ([]types.ValidatorIndex, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
@@ -77,11 +78,11 @@ func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]
|
||||
}
|
||||
|
||||
committeeCountPerSlot := uint64(1)
|
||||
if item.CommitteeCount/params.BeaconConfig().SlotsPerEpoch > 1 {
|
||||
committeeCountPerSlot = item.CommitteeCount / params.BeaconConfig().SlotsPerEpoch
|
||||
if item.CommitteeCount/uint64(params.BeaconConfig().SlotsPerEpoch) > 1 {
|
||||
committeeCountPerSlot = item.CommitteeCount / uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
indexOffSet := index + (slot%params.BeaconConfig().SlotsPerEpoch)*committeeCountPerSlot
|
||||
indexOffSet := uint64(index) + uint64(slot.ModSlot(params.BeaconConfig().SlotsPerEpoch).Mul(committeeCountPerSlot))
|
||||
start, end := startEndIndices(item, indexOffSet)
|
||||
|
||||
if end > uint64(len(item.ShuffledIndices)) || end < start {
|
||||
@@ -104,37 +105,8 @@ func (c *CommitteeCache) AddCommitteeShuffledList(committees *Committees) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddProposerIndicesList updates the committee shuffled list with proposer indices.
|
||||
func (c *CommitteeCache) AddProposerIndicesList(seed [32]byte, indices []uint64) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
committees := &Committees{ProposerIndices: indices}
|
||||
if err := c.CommitteeCache.Add(committees); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
committees, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return ErrNotCommittee
|
||||
}
|
||||
committees.ProposerIndices = indices
|
||||
if err := c.CommitteeCache.Add(committees); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
trim(c.CommitteeCache, maxCommitteesCacheSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveIndices returns the active indices of a given seed stored in cache.
|
||||
func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
|
||||
func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]types.ValidatorIndex, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
@@ -181,30 +153,6 @@ func (c *CommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
|
||||
return len(item.SortedIndices), nil
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices of a given seed.
|
||||
func (c *CommitteeCache) ProposerIndices(seed [32]byte) ([]uint64, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
CommitteeCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return nil, ErrNotCommittee
|
||||
}
|
||||
|
||||
return item.ProposerIndices, nil
|
||||
}
|
||||
|
||||
// HasEntry returns true if the committee cache has a value.
|
||||
func (c *CommitteeCache) HasEntry(seed string) bool {
|
||||
_, ok, err := c.CommitteeCache.GetByKey(seed)
|
||||
|
||||
10
beacon-chain/cache/committee_disabled.go
vendored
10
beacon-chain/cache/committee_disabled.go
vendored
@@ -3,6 +3,8 @@
|
||||
// This file is used in fuzzer builds to bypass global committee caches.
|
||||
package cache
|
||||
|
||||
import types "github.com/prysmaticlabs/eth2-types"
|
||||
|
||||
// FakeCommitteeCache is a struct with 1 queue for looking up shuffled indices list by seed.
|
||||
type FakeCommitteeCache struct {
|
||||
}
|
||||
@@ -14,7 +16,7 @@ func NewCommitteesCache() *FakeCommitteeCache {
|
||||
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
|
||||
func (c *FakeCommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]uint64, error) {
|
||||
func (c *FakeCommitteeCache) Committee(slot types.Slot, seed [32]byte, index types.CommitteeIndex) ([]types.ValidatorIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -25,12 +27,12 @@ func (c *FakeCommitteeCache) AddCommitteeShuffledList(committees *Committees) er
|
||||
}
|
||||
|
||||
// AddProposerIndicesList updates the committee shuffled list with proposer indices.
|
||||
func (c *FakeCommitteeCache) AddProposerIndicesList(seed [32]byte, indices []uint64) error {
|
||||
func (c *FakeCommitteeCache) AddProposerIndicesList(seed [32]byte, indices []types.ValidatorIndex) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveIndices returns the active indices of a given seed stored in cache.
|
||||
func (c *FakeCommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
|
||||
func (c *FakeCommitteeCache) ActiveIndices(seed [32]byte) ([]types.ValidatorIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -40,7 +42,7 @@ func (c *FakeCommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices of a given seed.
|
||||
func (c *FakeCommitteeCache) ProposerIndices(seed [32]byte) ([]uint64, error) {
|
||||
func (c *FakeCommitteeCache) ProposerIndices(seed [32]byte) ([]types.ValidatorIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
51
beacon-chain/cache/committee_test.go
vendored
51
beacon-chain/cache/committee_test.go
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -16,7 +17,7 @@ func TestCommitteeKeyFn_OK(t *testing.T) {
|
||||
item := &Committees{
|
||||
CommitteeCount: 1,
|
||||
Seed: [32]byte{'A'},
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4, 5},
|
||||
ShuffledIndices: []types.ValidatorIndex{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
k, err := committeeKeyFn(item)
|
||||
@@ -33,13 +34,13 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4, 5, 6},
|
||||
ShuffledIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6},
|
||||
Seed: [32]byte{'A'},
|
||||
CommitteeCount: 3,
|
||||
}
|
||||
|
||||
slot := params.BeaconConfig().SlotsPerEpoch
|
||||
committeeIndex := uint64(1)
|
||||
committeeIndex := types.CommitteeIndex(1)
|
||||
indices, err := cache.Committee(slot, item.Seed, committeeIndex)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
@@ -47,18 +48,18 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
|
||||
wantedIndex := uint64(0)
|
||||
wantedIndex := types.CommitteeIndex(0)
|
||||
indices, err = cache.Committee(slot, item.Seed, wantedIndex)
|
||||
require.NoError(t, err)
|
||||
|
||||
start, end := startEndIndices(item, wantedIndex)
|
||||
start, end := startEndIndices(item, uint64(wantedIndex))
|
||||
assert.DeepEqual(t, item.ShuffledIndices[start:end], indices)
|
||||
}
|
||||
|
||||
func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
indices, err := cache.ActiveIndices(item.Seed)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
@@ -75,7 +76,7 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
func TestCommitteeCache_ActiveCount(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
count, err := cache.ActiveIndicesCount(item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
|
||||
@@ -87,37 +88,6 @@ func TestCommitteeCache_ActiveCount(t *testing.T) {
|
||||
assert.Equal(t, len(item.SortedIndices), count)
|
||||
}
|
||||
|
||||
func TestCommitteeCache_AddProposerIndicesList(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
seed := [32]byte{'A'}
|
||||
indices := []uint64{1, 2, 3, 4, 5}
|
||||
indices, err := cache.ProposerIndices(seed)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
t.Error("Expected committee count not to exist in empty cache")
|
||||
}
|
||||
require.NoError(t, cache.AddProposerIndicesList(seed, indices))
|
||||
|
||||
received, err := cache.ProposerIndices(seed)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, received, indices)
|
||||
|
||||
item := &Committees{Seed: [32]byte{'B'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
|
||||
indices, err = cache.ProposerIndices(item.Seed)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
t.Error("Expected committee count not to exist in empty cache")
|
||||
}
|
||||
require.NoError(t, cache.AddProposerIndicesList(item.Seed, indices))
|
||||
|
||||
received, err = cache.ProposerIndices(item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, received, indices)
|
||||
}
|
||||
|
||||
func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
@@ -150,9 +120,8 @@ func TestCommitteeCacheOutOfRange(t *testing.T) {
|
||||
err := cache.CommitteeCache.Add(&Committees{
|
||||
CommitteeCount: 1,
|
||||
Seed: seed,
|
||||
ShuffledIndices: []uint64{0},
|
||||
SortedIndices: []uint64{},
|
||||
ProposerIndices: []uint64{},
|
||||
ShuffledIndices: []types.ValidatorIndex{0},
|
||||
SortedIndices: []types.ValidatorIndex{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
11
beacon-chain/cache/committees.go
vendored
11
beacon-chain/cache/committees.go
vendored
@@ -1,6 +1,10 @@
|
||||
package cache
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"errors"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
)
|
||||
|
||||
// ErrNotCommittee will be returned when a cache object is not a pointer to
|
||||
// a Committee struct.
|
||||
@@ -10,7 +14,6 @@ var ErrNotCommittee = errors.New("object is not a committee struct")
|
||||
type Committees struct {
|
||||
CommitteeCount uint64
|
||||
Seed [32]byte
|
||||
ShuffledIndices []uint64
|
||||
SortedIndices []uint64
|
||||
ProposerIndices []uint64
|
||||
ShuffledIndices []types.ValidatorIndex
|
||||
SortedIndices []types.ValidatorIndex
|
||||
}
|
||||
|
||||
4
beacon-chain/cache/common.go
vendored
4
beacon-chain/cache/common.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
var (
|
||||
// maxCacheSize is 4x of the epoch length for additional cache padding.
|
||||
// Requests should be only accessing committees within defined epoch length.
|
||||
maxCacheSize = 4 * params.BeaconConfig().SlotsPerEpoch
|
||||
maxCacheSize = uint64(4 * params.BeaconConfig().SlotsPerEpoch)
|
||||
)
|
||||
|
||||
// trim the FIFO queue to the maxSize.
|
||||
@@ -24,6 +24,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(obj interface{}) error {
|
||||
func popProcessNoopFunc(_ interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
1
beacon-chain/cache/depositcache/BUILD.bazel
vendored
1
beacon-chain/cache/depositcache/BUILD.bazel
vendored
@@ -5,6 +5,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"deposits_cache.go",
|
||||
"log.go",
|
||||
"pending_deposits.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache",
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/trieutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -50,17 +50,15 @@ type FinalizedDeposits struct {
|
||||
// stores all the deposit related data that is required by the beacon-node.
|
||||
type DepositCache struct {
|
||||
// Beacon chain deposits in memory.
|
||||
pendingDeposits []*dbpb.DepositContainer
|
||||
deposits []*dbpb.DepositContainer
|
||||
finalizedDeposits *FinalizedDeposits
|
||||
depositsLock sync.RWMutex
|
||||
chainStartDeposits []*ethpb.Deposit
|
||||
chainStartPubkeys map[string]bool
|
||||
pendingDeposits []*dbpb.DepositContainer
|
||||
deposits []*dbpb.DepositContainer
|
||||
finalizedDeposits *FinalizedDeposits
|
||||
depositsLock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewDepositCache instantiates a new deposit cache
|
||||
func NewDepositCache() (*DepositCache, error) {
|
||||
finalizedDepositsTrie, err := trieutil.NewTrie(int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
// New instantiates a new deposit cache
|
||||
func New() (*DepositCache, error) {
|
||||
finalizedDepositsTrie, err := trieutil.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -68,11 +66,9 @@ func NewDepositCache() (*DepositCache, error) {
|
||||
// finalizedDeposits.MerkleTrieIndex is initialized to -1 because it represents the index of the last trie item.
|
||||
// Inserting the first item into the trie will set the value of the index to 0.
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*dbpb.DepositContainer{},
|
||||
deposits: []*dbpb.DepositContainer{},
|
||||
finalizedDeposits: &FinalizedDeposits{Deposits: finalizedDepositsTrie, MerkleTrieIndex: -1},
|
||||
chainStartPubkeys: make(map[string]bool),
|
||||
chainStartDeposits: make([]*ethpb.Deposit, 0),
|
||||
pendingDeposits: []*dbpb.DepositContainer{},
|
||||
deposits: []*dbpb.DepositContainer{},
|
||||
finalizedDeposits: &FinalizedDeposits{Deposits: finalizedDepositsTrie, MerkleTrieIndex: -1},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -82,7 +78,7 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(log.Fields{
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
@@ -119,7 +115,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
depositTrie := dc.finalizedDeposits.Deposits
|
||||
insertIndex := dc.finalizedDeposits.MerkleTrieIndex + 1
|
||||
insertIndex := int(dc.finalizedDeposits.MerkleTrieIndex + 1)
|
||||
for _, d := range dc.deposits {
|
||||
if d.Index <= dc.finalizedDeposits.MerkleTrieIndex {
|
||||
continue
|
||||
@@ -132,7 +128,7 @@ func (dc *DepositCache) InsertFinalizedDeposits(ctx context.Context, eth1Deposit
|
||||
log.WithError(err).Error("Could not hash deposit data. Finalized deposit cache not updated.")
|
||||
return
|
||||
}
|
||||
depositTrie.Insert(depHash[:], int(insertIndex))
|
||||
depositTrie.Insert(depHash[:], insertIndex)
|
||||
insertIndex++
|
||||
}
|
||||
|
||||
@@ -152,24 +148,6 @@ func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*dbpb.Deposi
|
||||
return dc.deposits
|
||||
}
|
||||
|
||||
// MarkPubkeyForChainstart sets the pubkey deposit status to true.
|
||||
func (dc *DepositCache) MarkPubkeyForChainstart(ctx context.Context, pubkey string) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.MarkPubkeyForChainstart")
|
||||
defer span.End()
|
||||
dc.chainStartPubkeys[pubkey] = true
|
||||
}
|
||||
|
||||
// PubkeyInChainstart returns bool for whether the pubkey passed in has deposited.
|
||||
func (dc *DepositCache) PubkeyInChainstart(ctx context.Context, pubkey string) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PubkeyInChainstart")
|
||||
defer span.End()
|
||||
if dc.chainStartPubkeys != nil {
|
||||
return dc.chainStartPubkeys[pubkey]
|
||||
}
|
||||
dc.chainStartPubkeys = make(map[string]bool)
|
||||
return false
|
||||
}
|
||||
|
||||
// AllDeposits returns a list of historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit {
|
||||
@@ -258,3 +236,28 @@ func (dc *DepositCache) NonFinalizedDeposits(ctx context.Context, untilBlk *big.
|
||||
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PruneProofs")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
if untilDepositIndex >= int64(len(dc.deposits)) {
|
||||
untilDepositIndex = int64(len(dc.deposits) - 1)
|
||||
}
|
||||
|
||||
for i := untilDepositIndex; i >= 0; i-- {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
|
||||
if dc.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
dc.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -19,11 +19,11 @@ import (
|
||||
|
||||
const nilDepositErr = "Ignoring nil deposit insertion"
|
||||
|
||||
var _ = DepositFetcher(&DepositCache{})
|
||||
var _ DepositFetcher = (*DepositCache)(nil)
|
||||
|
||||
func TestInsertDeposit_LogsOnNilDepositInsertion(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
dc, err := NewDepositCache()
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.InsertDeposit(context.Background(), nil, 1, 0, [32]byte{})
|
||||
@@ -33,7 +33,7 @@ func TestInsertDeposit_LogsOnNilDepositInsertion(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
dc, err := NewDepositCache()
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
insertions := []struct {
|
||||
@@ -75,7 +75,7 @@ func TestInsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllDeposits_ReturnsAllDeposits(t *testing.T) {
|
||||
dc, err := NewDepositCache()
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []*dbpb.DepositContainer{
|
||||
@@ -115,7 +115,7 @@ func TestAllDeposits_ReturnsAllDeposits(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
|
||||
dc, err := NewDepositCache()
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []*dbpb.DepositContainer{
|
||||
@@ -155,7 +155,7 @@ func TestAllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t *testing.T) {
|
||||
dc, err := NewDepositCache()
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
@@ -238,7 +238,7 @@ func TestDepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t *testing
|
||||
}
|
||||
|
||||
func TestDepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLessThanOldestDeposit(t *testing.T) {
|
||||
dc, err := NewDepositCache()
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
@@ -272,7 +272,7 @@ func TestDepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLessThanOlde
|
||||
}
|
||||
|
||||
func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
dc, err := NewDepositCache()
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
@@ -329,7 +329,7 @@ func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
|
||||
dc, err := NewDepositCache()
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedDeposits := []*dbpb.DepositContainer{
|
||||
@@ -387,13 +387,13 @@ func TestFinalizedDeposits_DepositsCachedCorrectly(t *testing.T) {
|
||||
require.NoError(t, err, "Could not hash deposit data")
|
||||
deps = append(deps, hash[:])
|
||||
}
|
||||
trie, err := trieutil.GenerateTrieFromItems(deps, int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
trie, err := trieutil.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate deposit trie")
|
||||
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
dc, err := NewDepositCache()
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
oldFinalizedDeposits := []*dbpb.DepositContainer{
|
||||
@@ -445,13 +445,13 @@ func TestFinalizedDeposits_UtilizesPreviouslyCachedDeposits(t *testing.T) {
|
||||
require.NoError(t, err, "Could not hash deposit data")
|
||||
deps = append(deps, hash[:])
|
||||
}
|
||||
trie, err := trieutil.GenerateTrieFromItems(deps, int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
trie, err := trieutil.GenerateTrieFromItems(deps, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate deposit trie")
|
||||
assert.Equal(t, trie.HashTreeRoot(), cachedDeposits.Deposits.HashTreeRoot())
|
||||
}
|
||||
|
||||
func TestFinalizedDeposits_InitializedCorrectly(t *testing.T) {
|
||||
dc, err := NewDepositCache()
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedDeposits := dc.finalizedDeposits
|
||||
@@ -461,7 +461,7 @@ func TestFinalizedDeposits_InitializedCorrectly(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
dc, err := NewDepositCache()
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedDeposits := []*dbpb.DepositContainer{
|
||||
@@ -518,7 +518,7 @@ func TestNonFinalizedDeposits_ReturnsAllNonFinalizedDeposits(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *testing.T) {
|
||||
dc, err := NewDepositCache()
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
finalizedDeposits := []*dbpb.DepositContainer{
|
||||
@@ -573,3 +573,180 @@ func TestNonFinalizedDeposits_ReturnsNonFinalizedDepositsUpToBlockNumber(t *test
|
||||
deps := dc.NonFinalizedDeposits(context.Background(), big.NewInt(10))
|
||||
assert.Equal(t, 1, len(deps))
|
||||
}
|
||||
|
||||
func TestPruneProofs_Ok(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 1))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[2].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 2))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 99))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 4))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func makeDepositProof() [][]byte {
|
||||
proof := make([][]byte, int(params.BeaconConfig().DepositContractTreeDepth)+1)
|
||||
for i := range proof {
|
||||
proof[i] = make([]byte, 32)
|
||||
}
|
||||
return proof
|
||||
}
|
||||
|
||||
5
beacon-chain/cache/depositcache/log.go
vendored
Normal file
5
beacon-chain/cache/depositcache/log.go
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
package depositcache
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "depositcache")
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -33,7 +33,7 @@ func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(log.Fields{
|
||||
log.WithFields(logrus.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
}).Debug("Ignoring nil deposit insertion")
|
||||
|
||||
@@ -9,11 +9,10 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
)
|
||||
|
||||
var _ = PendingDepositsFetcher(&DepositCache{})
|
||||
var _ PendingDepositsFetcher = (*DepositCache)(nil)
|
||||
|
||||
func TestInsertPendingDeposit_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
@@ -29,14 +28,6 @@ func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
|
||||
assert.Equal(t, 0, len(dc.pendingDeposits))
|
||||
}
|
||||
|
||||
func makeDepositProof() [][]byte {
|
||||
proof := make([][]byte, int(params.BeaconConfig().DepositContractTreeDepth)+1)
|
||||
for i := range proof {
|
||||
proof[i] = make([]byte, 32)
|
||||
}
|
||||
return proof
|
||||
}
|
||||
|
||||
func TestRemovePendingDeposit_OK(t *testing.T) {
|
||||
db := DepositCache{}
|
||||
proof1 := makeDepositProof()
|
||||
|
||||
14
beacon-chain/cache/feature_flag_test.go
vendored
14
beacon-chain/cache/feature_flag_test.go
vendored
@@ -1,14 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{EnableEth1DataVoteCache: true})
|
||||
defer resetCfg()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
100
beacon-chain/cache/proposer_indices.go
vendored
Normal file
100
beacon-chain/cache/proposer_indices.go
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
// +build !libfuzzer
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// maxProposerIndicesCacheSize defines the max number of proposer indices on per block root basis can cache.
|
||||
// Due to reorgs and long finality, it's good to keep the old cache around for quickly switch over.
|
||||
maxProposerIndicesCacheSize = uint64(8)
|
||||
|
||||
// ProposerIndicesCacheMiss tracks the number of proposerIndices requests that aren't present in the cache.
|
||||
ProposerIndicesCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "proposer_indices_cache_miss",
|
||||
Help: "The number of proposer indices requests that aren't present in the cache.",
|
||||
})
|
||||
// ProposerIndicesCacheHit tracks the number of proposerIndices requests that are in the cache.
|
||||
ProposerIndicesCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "proposer_indices_cache_hit",
|
||||
Help: "The number of proposer indices requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// ProposerIndicesCache is a struct with 1 queue for looking up proposer indices by root.
|
||||
type ProposerIndicesCache struct {
|
||||
ProposerIndicesCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// proposerIndicesKeyFn takes the block root as the key to retrieve proposer indices in a given epoch.
|
||||
func proposerIndicesKeyFn(obj interface{}) (string, error) {
|
||||
info, ok := obj.(*ProposerIndices)
|
||||
if !ok {
|
||||
return "", ErrNotProposerIndices
|
||||
}
|
||||
|
||||
return key(info.BlockRoot), nil
|
||||
}
|
||||
|
||||
// NewProposerIndicesCache creates a new proposer indices cache for storing/accessing proposer index assignments of an epoch.
|
||||
func NewProposerIndicesCache() *ProposerIndicesCache {
|
||||
return &ProposerIndicesCache{
|
||||
ProposerIndicesCache: cache.NewFIFO(proposerIndicesKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// AddProposerIndices adds ProposerIndices object to the cache.
|
||||
// This method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *ProposerIndicesCache) AddProposerIndices(p *ProposerIndices) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if err := c.ProposerIndicesCache.AddIfNotPresent(p); err != nil {
|
||||
return err
|
||||
}
|
||||
trim(c.ProposerIndicesCache, maxProposerIndicesCacheSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasProposerIndices returns the proposer indices of a block root seed.
|
||||
func (c *ProposerIndicesCache) HasProposerIndices(r [32]byte) (bool, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
_, exists, err := c.ProposerIndicesCache.GetByKey(key(r))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices of a block root seed.
|
||||
func (c *ProposerIndicesCache) ProposerIndices(r [32]byte) ([]types.ValidatorIndex, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.ProposerIndicesCache.GetByKey(key(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
ProposerIndicesCacheHit.Inc()
|
||||
} else {
|
||||
ProposerIndicesCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*ProposerIndices)
|
||||
if !ok {
|
||||
return nil, ErrNotProposerIndices
|
||||
}
|
||||
|
||||
return item.ProposerIndices, nil
|
||||
}
|
||||
31
beacon-chain/cache/proposer_indices_disabled.go
vendored
Normal file
31
beacon-chain/cache/proposer_indices_disabled.go
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
// +build libfuzzer
|
||||
|
||||
// This file is used in fuzzer builds to bypass proposer indices caches.
|
||||
package cache
|
||||
|
||||
import types "github.com/prysmaticlabs/eth2-types"
|
||||
|
||||
// FakeProposerIndicesCache is a struct with 1 queue for looking up proposer indices by root.
|
||||
type FakeProposerIndicesCache struct {
|
||||
}
|
||||
|
||||
// NewProposerIndicesCache creates a new proposer indices cache for storing/accessing proposer index assignments of an epoch.
|
||||
func NewProposerIndicesCache() *FakeProposerIndicesCache {
|
||||
return &FakeProposerIndicesCache{}
|
||||
}
|
||||
|
||||
// AddProposerIndices adds ProposerIndices object to the cache.
|
||||
// This method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *FakeProposerIndicesCache) AddProposerIndices(p *ProposerIndices) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices of a block root seed.
|
||||
func (c *FakeProposerIndicesCache) ProposerIndices(r [32]byte) ([]types.ValidatorIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// HasProposerIndices returns the proposer indices of a block root seed.
|
||||
func (c *FakeProposerIndicesCache) HasProposerIndices(r [32]byte) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
74
beacon-chain/cache/proposer_indices_test.go
vendored
Normal file
74
beacon-chain/cache/proposer_indices_test.go
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestProposerKeyFn_OK(t *testing.T) {
|
||||
item := &ProposerIndices{
|
||||
BlockRoot: [32]byte{'A'},
|
||||
ProposerIndices: []types.ValidatorIndex{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
k, err := proposerIndicesKeyFn(item)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, key(item.BlockRoot), k)
|
||||
}
|
||||
|
||||
func TestProposerKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := proposerIndicesKeyFn("bad")
|
||||
assert.Equal(t, ErrNotProposerIndices, err)
|
||||
}
|
||||
|
||||
func TestProposerCache_AddProposerIndicesList(t *testing.T) {
|
||||
cache := NewProposerIndicesCache()
|
||||
bRoot := [32]byte{'A'}
|
||||
indices, err := cache.ProposerIndices(bRoot)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
t.Error("Expected committee count not to exist in empty cache")
|
||||
}
|
||||
has, err := cache.HasProposerIndices(bRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, has)
|
||||
require.NoError(t, cache.AddProposerIndices(&ProposerIndices{
|
||||
ProposerIndices: indices,
|
||||
BlockRoot: bRoot,
|
||||
}))
|
||||
|
||||
received, err := cache.ProposerIndices(bRoot)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, received, indices)
|
||||
has, err = cache.HasProposerIndices(bRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, has)
|
||||
|
||||
item := &ProposerIndices{BlockRoot: [32]byte{'B'}, ProposerIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
require.NoError(t, cache.AddProposerIndices(item))
|
||||
|
||||
received, err = cache.ProposerIndices(item.BlockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, item.ProposerIndices, received)
|
||||
has, err = cache.HasProposerIndices(bRoot)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, has)
|
||||
|
||||
}
|
||||
|
||||
func TestProposerCache_CanRotate(t *testing.T) {
|
||||
cache := NewProposerIndicesCache()
|
||||
for i := 0; i < int(maxProposerIndicesCacheSize)+1; i++ {
|
||||
s := []byte(strconv.Itoa(i))
|
||||
item := &ProposerIndices{BlockRoot: bytesutil.ToBytes32(s)}
|
||||
require.NoError(t, cache.AddProposerIndices(item))
|
||||
}
|
||||
|
||||
k := cache.ProposerIndicesCache.ListKeys()
|
||||
assert.Equal(t, maxProposerIndicesCacheSize, uint64(len(k)))
|
||||
}
|
||||
17
beacon-chain/cache/proposer_indices_type.go
vendored
Normal file
17
beacon-chain/cache/proposer_indices_type.go
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
)
|
||||
|
||||
// ErrNotProposerIndices will be returned when a cache object is not a pointer to
|
||||
// a ProposerIndices struct.
|
||||
var ErrNotProposerIndices = errors.New("object is not a proposer indices struct")
|
||||
|
||||
// ProposerIndices defines the cached struct for proposer indices.
|
||||
type ProposerIndices struct {
|
||||
BlockRoot [32]byte
|
||||
ProposerIndices []types.ValidatorIndex
|
||||
}
|
||||
24
beacon-chain/cache/skip_slot_cache.go
vendored
24
beacon-chain/cache/skip_slot_cache.go
vendored
@@ -30,7 +30,7 @@ type SkipSlotCache struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
disabled bool // Allow for programmatic toggling of the cache, useful during initial sync.
|
||||
inProgress map[uint64]bool
|
||||
inProgress map[[32]byte]bool
|
||||
}
|
||||
|
||||
// NewSkipSlotCache initializes the map and underlying cache.
|
||||
@@ -41,7 +41,7 @@ func NewSkipSlotCache() *SkipSlotCache {
|
||||
}
|
||||
return &SkipSlotCache{
|
||||
cache: cache,
|
||||
inProgress: make(map[uint64]bool),
|
||||
inProgress: make(map[[32]byte]bool),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func (c *SkipSlotCache) Disable() {
|
||||
|
||||
// Get waits for any in progress calculation to complete before returning a
|
||||
// cached response, if any.
|
||||
func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.BeaconState, error) {
|
||||
func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (*stateTrie.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "skipSlotCache.Get")
|
||||
defer span.End()
|
||||
if c.disabled {
|
||||
@@ -77,7 +77,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.Beacon
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
if !c.inProgress[slot] {
|
||||
if !c.inProgress[r] {
|
||||
c.lock.RUnlock()
|
||||
break
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.Beacon
|
||||
}
|
||||
span.AddAttributes(trace.BoolAttribute("inProgress", inProgress))
|
||||
|
||||
item, exists := c.cache.Get(slot)
|
||||
item, exists := c.cache.Get(r)
|
||||
|
||||
if exists && item != nil {
|
||||
skipSlotCacheHit.Inc()
|
||||
@@ -106,7 +106,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.Beacon
|
||||
|
||||
// MarkInProgress a request so that any other similar requests will block on
|
||||
// Get until MarkNotInProgress is called.
|
||||
func (c *SkipSlotCache) MarkInProgress(slot uint64) error {
|
||||
func (c *SkipSlotCache) MarkInProgress(r [32]byte) error {
|
||||
if c.disabled {
|
||||
return nil
|
||||
}
|
||||
@@ -114,16 +114,16 @@ func (c *SkipSlotCache) MarkInProgress(slot uint64) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.inProgress[slot] {
|
||||
if c.inProgress[r] {
|
||||
return ErrAlreadyInProgress
|
||||
}
|
||||
c.inProgress[slot] = true
|
||||
c.inProgress[r] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkNotInProgress will release the lock on a given request. This should be
|
||||
// called after put.
|
||||
func (c *SkipSlotCache) MarkNotInProgress(slot uint64) error {
|
||||
func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) error {
|
||||
if c.disabled {
|
||||
return nil
|
||||
}
|
||||
@@ -131,18 +131,18 @@ func (c *SkipSlotCache) MarkNotInProgress(slot uint64) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
delete(c.inProgress, slot)
|
||||
delete(c.inProgress, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *SkipSlotCache) Put(ctx context.Context, slot uint64, state *stateTrie.BeaconState) error {
|
||||
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state *stateTrie.BeaconState) error {
|
||||
if c.disabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy state so cached value is not mutated.
|
||||
c.cache.Add(slot, state.Copy())
|
||||
c.cache.Add(r, state.Copy())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
11
beacon-chain/cache/skip_slot_cache_test.go
vendored
11
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -15,21 +15,22 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := cache.NewSkipSlotCache()
|
||||
|
||||
state, err := c.Get(ctx, 5)
|
||||
r := [32]byte{'a'}
|
||||
state, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*stateTrie.BeaconState)(nil), state, "Empty cache returned an object")
|
||||
|
||||
require.NoError(t, c.MarkInProgress(5))
|
||||
require.NoError(t, c.MarkInProgress(r))
|
||||
|
||||
state, err = stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: 10,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, c.Put(ctx, 5, state))
|
||||
require.NoError(t, c.MarkNotInProgress(5))
|
||||
require.NoError(t, c.Put(ctx, r, state))
|
||||
require.NoError(t, c.MarkNotInProgress(r))
|
||||
|
||||
res, err := c.Get(ctx, 5)
|
||||
res, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, res.CloneInnerState(), state.CloneInnerState(), "Expected equal protos to return from cache")
|
||||
}
|
||||
|
||||
65
beacon-chain/cache/state_summary.go
vendored
65
beacon-chain/cache/state_summary.go
vendored
@@ -1,65 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
)
|
||||
|
||||
// StateSummaryCache caches state summary object.
|
||||
type StateSummaryCache struct {
|
||||
initSyncStateSummaries map[[32]byte]*pb.StateSummary
|
||||
initSyncStateSummariesLock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewStateSummaryCache creates a new state summary cache.
|
||||
func NewStateSummaryCache() *StateSummaryCache {
|
||||
return &StateSummaryCache{
|
||||
initSyncStateSummaries: make(map[[32]byte]*pb.StateSummary),
|
||||
}
|
||||
}
|
||||
|
||||
// Put saves a state summary to the initial sync state summaries cache.
|
||||
func (s *StateSummaryCache) Put(r [32]byte, b *pb.StateSummary) {
|
||||
s.initSyncStateSummariesLock.Lock()
|
||||
defer s.initSyncStateSummariesLock.Unlock()
|
||||
s.initSyncStateSummaries[r] = b
|
||||
}
|
||||
|
||||
// Has checks if a state summary exists in the initial sync state summaries cache using the root
|
||||
// of the block.
|
||||
func (s *StateSummaryCache) Has(r [32]byte) bool {
|
||||
s.initSyncStateSummariesLock.RLock()
|
||||
defer s.initSyncStateSummariesLock.RUnlock()
|
||||
_, ok := s.initSyncStateSummaries[r]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Get retrieves a state summary from the initial sync state summaries cache using the root of
|
||||
// the block.
|
||||
func (s *StateSummaryCache) Get(r [32]byte) *pb.StateSummary {
|
||||
s.initSyncStateSummariesLock.RLock()
|
||||
defer s.initSyncStateSummariesLock.RUnlock()
|
||||
b := s.initSyncStateSummaries[r]
|
||||
return b
|
||||
}
|
||||
|
||||
// GetAll retrieves all the beacon state summaries from the initial sync state summaries cache, the returned
|
||||
// state summaries are unordered.
|
||||
func (s *StateSummaryCache) GetAll() []*pb.StateSummary {
|
||||
s.initSyncStateSummariesLock.RLock()
|
||||
defer s.initSyncStateSummariesLock.RUnlock()
|
||||
|
||||
blks := make([]*pb.StateSummary, 0, len(s.initSyncStateSummaries))
|
||||
for _, b := range s.initSyncStateSummaries {
|
||||
blks = append(blks, b)
|
||||
}
|
||||
return blks
|
||||
}
|
||||
|
||||
// Clear clears out the initial sync state summaries cache.
|
||||
func (s *StateSummaryCache) Clear() {
|
||||
s.initSyncStateSummariesLock.Lock()
|
||||
defer s.initSyncStateSummariesLock.Unlock()
|
||||
s.initSyncStateSummaries = make(map[[32]byte]*pb.StateSummary)
|
||||
}
|
||||
92
beacon-chain/cache/subnet_ids.go
vendored
92
beacon-chain/cache/subnet_ids.go
vendored
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/patrickmn/go-cache"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
)
|
||||
@@ -25,40 +26,40 @@ var SubnetIDs = newSubnetIDs()
|
||||
func newSubnetIDs() *subnetIDs {
|
||||
// Given a node can calculate committee assignments of current epoch and next epoch.
|
||||
// Max size is set to 2 epoch length.
|
||||
cacheSize := params.BeaconConfig().MaxCommitteesPerSlot * params.BeaconConfig().SlotsPerEpoch * 2
|
||||
attesterCache, err := lru.New(int(cacheSize))
|
||||
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2))
|
||||
attesterCache, err := lru.New(cacheSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
aggregatorCache, err := lru.New(int(cacheSize))
|
||||
aggregatorCache, err := lru.New(cacheSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
|
||||
subLength := epochDuration * time.Duration(params.BeaconNetworkConfig().EpochsPerRandomSubnetSubscription)
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
subLength := epochDuration * time.Duration(params.BeaconConfig().EpochsPerRandomSubnetSubscription)
|
||||
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
|
||||
return &subnetIDs{attester: attesterCache, aggregator: aggregatorCache, persistentSubnets: persistentCache}
|
||||
}
|
||||
|
||||
// AddAttesterSubnetID adds the subnet index for subscribing subnet for the attester of a given slot.
|
||||
func (c *subnetIDs) AddAttesterSubnetID(slot uint64, subnetID uint64) {
|
||||
c.attesterLock.Lock()
|
||||
defer c.attesterLock.Unlock()
|
||||
func (s *subnetIDs) AddAttesterSubnetID(slot types.Slot, subnetID uint64) {
|
||||
s.attesterLock.Lock()
|
||||
defer s.attesterLock.Unlock()
|
||||
|
||||
ids := []uint64{subnetID}
|
||||
val, exists := c.attester.Get(slot)
|
||||
val, exists := s.attester.Get(slot)
|
||||
if exists {
|
||||
ids = sliceutil.UnionUint64(append(val.([]uint64), ids...))
|
||||
}
|
||||
c.attester.Add(slot, ids)
|
||||
s.attester.Add(slot, ids)
|
||||
}
|
||||
|
||||
// GetAttesterSubnetIDs gets the subnet IDs for subscribed subnets for attesters of the slot.
|
||||
func (c *subnetIDs) GetAttesterSubnetIDs(slot uint64) []uint64 {
|
||||
c.attesterLock.RLock()
|
||||
defer c.attesterLock.RUnlock()
|
||||
func (s *subnetIDs) GetAttesterSubnetIDs(slot types.Slot) []uint64 {
|
||||
s.attesterLock.RLock()
|
||||
defer s.attesterLock.RUnlock()
|
||||
|
||||
val, exists := c.attester.Get(slot)
|
||||
val, exists := s.attester.Get(slot)
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
@@ -69,24 +70,24 @@ func (c *subnetIDs) GetAttesterSubnetIDs(slot uint64) []uint64 {
|
||||
}
|
||||
|
||||
// AddAggregatorSubnetID adds the subnet ID for subscribing subnet for the aggregator of a given slot.
|
||||
func (c *subnetIDs) AddAggregatorSubnetID(slot uint64, subnetID uint64) {
|
||||
c.aggregatorLock.Lock()
|
||||
defer c.aggregatorLock.Unlock()
|
||||
func (s *subnetIDs) AddAggregatorSubnetID(slot types.Slot, subnetID uint64) {
|
||||
s.aggregatorLock.Lock()
|
||||
defer s.aggregatorLock.Unlock()
|
||||
|
||||
ids := []uint64{subnetID}
|
||||
val, exists := c.aggregator.Get(slot)
|
||||
val, exists := s.aggregator.Get(slot)
|
||||
if exists {
|
||||
ids = sliceutil.UnionUint64(append(val.([]uint64), ids...))
|
||||
}
|
||||
c.aggregator.Add(slot, ids)
|
||||
s.aggregator.Add(slot, ids)
|
||||
}
|
||||
|
||||
// GetAggregatorSubnetIDs gets the subnet IDs for subscribing subnet for aggregator of the slot.
|
||||
func (c *subnetIDs) GetAggregatorSubnetIDs(slot uint64) []uint64 {
|
||||
c.aggregatorLock.RLock()
|
||||
defer c.aggregatorLock.RUnlock()
|
||||
func (s *subnetIDs) GetAggregatorSubnetIDs(slot types.Slot) []uint64 {
|
||||
s.aggregatorLock.RLock()
|
||||
defer s.aggregatorLock.RUnlock()
|
||||
|
||||
val, exists := c.aggregator.Get(slot)
|
||||
val, exists := s.aggregator.Get(slot)
|
||||
if !exists {
|
||||
return []uint64{}
|
||||
}
|
||||
@@ -95,11 +96,11 @@ func (c *subnetIDs) GetAggregatorSubnetIDs(slot uint64) []uint64 {
|
||||
|
||||
// GetPersistentSubnets retrieves the persistent subnet and expiration time of that validator's
|
||||
// subscription.
|
||||
func (c *subnetIDs) GetPersistentSubnets(pubkey []byte) ([]uint64, bool, time.Time) {
|
||||
c.subnetsLock.RLock()
|
||||
defer c.subnetsLock.RUnlock()
|
||||
func (s *subnetIDs) GetPersistentSubnets(pubkey []byte) ([]uint64, bool, time.Time) {
|
||||
s.subnetsLock.RLock()
|
||||
defer s.subnetsLock.RUnlock()
|
||||
|
||||
id, duration, ok := c.persistentSubnets.GetWithExpiration(string(pubkey))
|
||||
id, duration, ok := s.persistentSubnets.GetWithExpiration(string(pubkey))
|
||||
if !ok {
|
||||
return []uint64{}, ok, time.Time{}
|
||||
}
|
||||
@@ -108,12 +109,12 @@ func (c *subnetIDs) GetPersistentSubnets(pubkey []byte) ([]uint64, bool, time.Ti
|
||||
|
||||
// GetAllSubnets retrieves all the non-expired subscribed subnets of all the validators
|
||||
// in the cache.
|
||||
func (c *subnetIDs) GetAllSubnets() []uint64 {
|
||||
c.subnetsLock.RLock()
|
||||
defer c.subnetsLock.RUnlock()
|
||||
func (s *subnetIDs) GetAllSubnets() []uint64 {
|
||||
s.subnetsLock.RLock()
|
||||
defer s.subnetsLock.RUnlock()
|
||||
|
||||
itemsMap := c.persistentSubnets.Items()
|
||||
committees := []uint64{}
|
||||
itemsMap := s.persistentSubnets.Items()
|
||||
var committees []uint64
|
||||
|
||||
for _, v := range itemsMap {
|
||||
if v.Expired() {
|
||||
@@ -126,9 +127,28 @@ func (c *subnetIDs) GetAllSubnets() []uint64 {
|
||||
|
||||
// AddPersistentCommittee adds the relevant committee for that particular validator along with its
|
||||
// expiration period.
|
||||
func (c *subnetIDs) AddPersistentCommittee(pubkey []byte, comIndex []uint64, duration time.Duration) {
|
||||
c.subnetsLock.Lock()
|
||||
defer c.subnetsLock.Unlock()
|
||||
func (s *subnetIDs) AddPersistentCommittee(pubkey []byte, comIndex []uint64, duration time.Duration) {
|
||||
s.subnetsLock.Lock()
|
||||
defer s.subnetsLock.Unlock()
|
||||
|
||||
c.persistentSubnets.Set(string(pubkey), comIndex, duration)
|
||||
s.persistentSubnets.Set(string(pubkey), comIndex, duration)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
// entries on them. This should only ever be used for testing, in normal
|
||||
// production, handling of the relevant subnets for each role is done
|
||||
// separately.
|
||||
func (s *subnetIDs) EmptyAllCaches() {
|
||||
// Clear the caches.
|
||||
s.attesterLock.Lock()
|
||||
s.attester.Purge()
|
||||
s.attesterLock.Unlock()
|
||||
|
||||
s.aggregatorLock.Lock()
|
||||
s.aggregator.Purge()
|
||||
s.aggregatorLock.Unlock()
|
||||
|
||||
s.subnetsLock.Lock()
|
||||
s.persistentSubnets.Flush()
|
||||
s.subnetsLock.Unlock()
|
||||
}
|
||||
|
||||
5
beacon-chain/cache/subnet_ids_test.go
vendored
5
beacon-chain/cache/subnet_ids_test.go
vendored
@@ -3,13 +3,14 @@ package cache
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestSubnetIDsCache_RoundTrip(t *testing.T) {
|
||||
c := newSubnetIDs()
|
||||
slot := uint64(100)
|
||||
slot := types.Slot(100)
|
||||
committeeIDs := c.GetAggregatorSubnetIDs(slot)
|
||||
assert.Equal(t, 0, len(committeeIDs), "Empty cache returned an object")
|
||||
|
||||
@@ -42,12 +43,10 @@ func TestSubnetIDsCache_RoundTrip(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSubnetIDsCache_PersistentCommitteeRoundtrip(t *testing.T) {
|
||||
pubkeySet := [][48]byte{}
|
||||
c := newSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
pubkey := [48]byte{byte(i)}
|
||||
pubkeySet = append(pubkeySet, pubkey)
|
||||
c.AddPersistentCommittee(pubkey[:], []uint64{uint64(i)}, 0)
|
||||
}
|
||||
|
||||
|
||||
@@ -21,11 +21,13 @@ go_library(
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//fuzz:__pkg__",
|
||||
"//shared/testutil:__pkg__",
|
||||
"//validator/accounts:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/interface:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
@@ -34,12 +36,13 @@ go_library(
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/slashutil:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
@@ -49,6 +52,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"attestation_regression_test.go",
|
||||
"attestation_test.go",
|
||||
"attester_slashing_test.go",
|
||||
"block_operations_fuzz_test.go",
|
||||
@@ -58,13 +62,16 @@ go_test(
|
||||
"exit_test.go",
|
||||
"genesis_test.go",
|
||||
"header_test.go",
|
||||
"proposer_slashing_regression_test.go",
|
||||
"proposer_slashing_test.go",
|
||||
"randao_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
shard_count = 2,
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/aggregation:go_default_library",
|
||||
@@ -79,6 +86,7 @@ go_test(
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -24,8 +23,8 @@ func ProcessAttestations(
|
||||
beaconState *stateTrie.BeaconState,
|
||||
b *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
if b.Block == nil || b.Block.Body == nil {
|
||||
return nil, errors.New("block and block body can't be nil")
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
@@ -86,8 +85,8 @@ func ProcessAttestationsNoVerifySignature(
|
||||
beaconState *stateTrie.BeaconState,
|
||||
b *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
if b.Block == nil || b.Block.Body == nil {
|
||||
return nil, errors.New("block and block body can't be nil")
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body := b.Block.Body
|
||||
var err error
|
||||
@@ -100,27 +99,21 @@ func ProcessAttestationsNoVerifySignature(
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessAttestationNoVerifySignature processes the attestation without verifying the attestation signature. This
|
||||
// method is used to validate attestations whose signatures have already been verified.
|
||||
func ProcessAttestationNoVerifySignature(
|
||||
// VerifyAttestationNoVerifySignature verifies the attestation without verifying the attestation signature. This is
|
||||
// used before processing attestation with the beacon state.
|
||||
func VerifyAttestationNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
att *ethpb.Attestation,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.ProcessAttestationNoVerifySignature")
|
||||
ctx, span := trace.StartSpan(ctx, "core.VerifyAttestationNoVerifySignature")
|
||||
defer span.End()
|
||||
|
||||
if att == nil || att.Data == nil || att.Data.Target == nil {
|
||||
return nil, errors.New("nil attestation data target")
|
||||
}
|
||||
|
||||
currEpoch := helpers.SlotToEpoch(beaconState.Slot())
|
||||
var prevEpoch uint64
|
||||
if currEpoch == 0 {
|
||||
prevEpoch = 0
|
||||
} else {
|
||||
prevEpoch = currEpoch - 1
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currEpoch := helpers.CurrentEpoch(beaconState)
|
||||
prevEpoch := helpers.PrevEpoch(beaconState)
|
||||
data := att.Data
|
||||
if data.Target.Epoch != prevEpoch && data.Target.Epoch != currEpoch {
|
||||
return nil, fmt.Errorf(
|
||||
@@ -130,8 +123,8 @@ func ProcessAttestationNoVerifySignature(
|
||||
currEpoch,
|
||||
)
|
||||
}
|
||||
if helpers.SlotToEpoch(data.Slot) != data.Target.Epoch {
|
||||
return nil, fmt.Errorf("data slot is not in the same epoch as target %d != %d", helpers.SlotToEpoch(data.Slot), data.Target.Epoch)
|
||||
if err := helpers.ValidateSlotTargetEpoch(att.Data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := att.Data.Slot
|
||||
@@ -158,7 +151,7 @@ func ProcessAttestationNoVerifySignature(
|
||||
return nil, err
|
||||
}
|
||||
c := helpers.SlotCommitteeCount(activeValidatorCount)
|
||||
if att.Data.CommitteeIndex > c {
|
||||
if uint64(att.Data.CommitteeIndex) >= c {
|
||||
return nil, fmt.Errorf("committee index %d >= committee count %d", att.Data.CommitteeIndex, c)
|
||||
}
|
||||
|
||||
@@ -166,6 +159,36 @@ func ProcessAttestationNoVerifySignature(
|
||||
return nil, errors.Wrap(err, "could not verify attestation bitfields")
|
||||
}
|
||||
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexedAtt, err := attestationutil.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, attestationutil.IsValidAttestationIndices(ctx, indexedAtt)
|
||||
}
|
||||
|
||||
// ProcessAttestationNoVerifySignature processes the attestation without verifying the attestation signature. This
|
||||
// method is used to validate attestations whose signatures have already been verified.
|
||||
func ProcessAttestationNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
att *ethpb.Attestation,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.ProcessAttestationNoVerifySignature")
|
||||
defer span.End()
|
||||
|
||||
if _, err := VerifyAttestationNoVerifySignature(ctx, beaconState, att); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currEpoch := helpers.CurrentEpoch(beaconState)
|
||||
data := att.Data
|
||||
s := att.Data.Slot
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(beaconState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -177,112 +200,39 @@ func ProcessAttestationNoVerifySignature(
|
||||
ProposerIndex: proposerIndex,
|
||||
}
|
||||
|
||||
var ffgSourceEpoch uint64
|
||||
var ffgSourceRoot []byte
|
||||
var ffgTargetEpoch uint64
|
||||
if data.Target.Epoch == currEpoch {
|
||||
ffgSourceEpoch = beaconState.CurrentJustifiedCheckpoint().Epoch
|
||||
ffgSourceRoot = beaconState.CurrentJustifiedCheckpoint().Root
|
||||
ffgTargetEpoch = currEpoch
|
||||
if !beaconState.MatchCurrentJustifiedCheckpoint(data.Source) {
|
||||
return nil, errors.New("source check point not equal to current justified checkpoint")
|
||||
}
|
||||
if err := beaconState.AppendCurrentEpochAttestations(pendingAtt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
ffgSourceEpoch = beaconState.PreviousJustifiedCheckpoint().Epoch
|
||||
ffgSourceRoot = beaconState.PreviousJustifiedCheckpoint().Root
|
||||
ffgTargetEpoch = prevEpoch
|
||||
if !beaconState.MatchPreviousJustifiedCheckpoint(data.Source) {
|
||||
return nil, errors.New("source check point not equal to previous justified checkpoint")
|
||||
}
|
||||
if err := beaconState.AppendPreviousEpochAttestations(pendingAtt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if data.Source.Epoch != ffgSourceEpoch {
|
||||
return nil, fmt.Errorf("expected source epoch %d, received %d", ffgSourceEpoch, data.Source.Epoch)
|
||||
}
|
||||
if !bytes.Equal(data.Source.Root, ffgSourceRoot) {
|
||||
return nil, fmt.Errorf("expected source root %#x, received %#x", ffgSourceRoot, data.Source.Root)
|
||||
}
|
||||
if data.Target.Epoch != ffgTargetEpoch {
|
||||
return nil, fmt.Errorf("expected target epoch %d, received %d", ffgTargetEpoch, data.Target.Epoch)
|
||||
}
|
||||
|
||||
// Verify attesting indices are correct.
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexedAtt := attestationutil.ConvertToIndexed(ctx, att, committee)
|
||||
if err := attestationutil.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// VerifyAttestationsSignatures will verify the signatures of the provided attestations. This method performs
|
||||
// a single BLS verification call to verify the signatures of all of the provided attestations. All
|
||||
// of the provided attestations must have valid signatures or this method will return an error.
|
||||
// This method does not determine which attestation signature is invalid, only that one or more
|
||||
// attestation signatures were not valid.
|
||||
func VerifyAttestationsSignatures(ctx context.Context, beaconState *stateTrie.BeaconState, b *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "core.VerifyAttestationsSignatures")
|
||||
defer span.End()
|
||||
atts := b.Block.Body.Attestations
|
||||
span.AddAttributes(trace.Int64Attribute("attestations", int64(len(atts))))
|
||||
|
||||
if len(atts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
fork := beaconState.Fork()
|
||||
gvr := beaconState.GenesisValidatorRoot()
|
||||
dt := params.BeaconConfig().DomainBeaconAttester
|
||||
|
||||
// Split attestations by fork. Note: the signature domain will differ based on the fork.
|
||||
var preForkAtts []*ethpb.Attestation
|
||||
var postForkAtts []*ethpb.Attestation
|
||||
for _, a := range atts {
|
||||
if helpers.SlotToEpoch(a.Data.Slot) < fork.Epoch {
|
||||
preForkAtts = append(preForkAtts, a)
|
||||
} else {
|
||||
postForkAtts = append(postForkAtts, a)
|
||||
}
|
||||
}
|
||||
|
||||
// Check attestations from before the fork.
|
||||
if fork.Epoch > 0 { // Check to prevent underflow.
|
||||
prevDomain, err := helpers.Domain(fork, fork.Epoch-1, dt, gvr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := verifyAttestationsSigWithDomain(ctx, beaconState, preForkAtts, prevDomain); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(preForkAtts) > 0 {
|
||||
// This is a sanity check that preForkAtts were not ignored when fork.Epoch == 0. This
|
||||
// condition is not possible, but it doesn't hurt to check anyway.
|
||||
return errors.New("some attestations were not verified from previous fork before genesis")
|
||||
}
|
||||
|
||||
// Then check attestations from after the fork.
|
||||
currDomain, err := helpers.Domain(fork, fork.Epoch, dt, gvr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return verifyAttestationsSigWithDomain(ctx, beaconState, postForkAtts, currDomain)
|
||||
}
|
||||
|
||||
// VerifyAttestationSignature converts and attestation into an indexed attestation and verifies
|
||||
// the signature in that attestation.
|
||||
func VerifyAttestationSignature(ctx context.Context, beaconState *stateTrie.BeaconState, att *ethpb.Attestation) error {
|
||||
if att == nil || att.Data == nil || att.AggregationBits.Count() == 0 {
|
||||
return fmt.Errorf("nil or missing attestation data: %v", att)
|
||||
if err := helpers.ValidateNilAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt := attestationutil.ConvertToIndexed(ctx, att, committee)
|
||||
indexedAtt, err := attestationutil.ConvertToIndexed(ctx, att, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return VerifyIndexedAttestation(ctx, beaconState, indexedAtt)
|
||||
}
|
||||
|
||||
@@ -314,9 +264,9 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState *stateTrie.Beacon
|
||||
return err
|
||||
}
|
||||
indices := indexedAtt.AttestingIndices
|
||||
pubkeys := []bls.PublicKey{}
|
||||
var pubkeys []bls.PublicKey
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(indices[i])
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(types.ValidatorIndex(indices[i]))
|
||||
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not deserialize validator public key")
|
||||
@@ -325,55 +275,3 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState *stateTrie.Beacon
|
||||
}
|
||||
return attestationutil.VerifyIndexedAttestationSig(ctx, indexedAtt, pubkeys, domain)
|
||||
}
|
||||
|
||||
// Inner method to verify attestations. This abstraction allows for the domain to be provided as an
|
||||
// argument.
|
||||
func verifyAttestationsSigWithDomain(ctx context.Context, beaconState *stateTrie.BeaconState, atts []*ethpb.Attestation, domain []byte) error {
|
||||
if len(atts) == 0 {
|
||||
return nil
|
||||
}
|
||||
set, err := createAttestationSignatureSet(ctx, beaconState, atts, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
verify, err := set.Verify()
|
||||
if err != nil {
|
||||
return errors.Errorf("got error in multiple verification: %v", err)
|
||||
}
|
||||
if !verify {
|
||||
return errors.New("one or more attestation signatures did not verify")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyAttSigUseCheckPt uses the checkpoint info object to verify attestation signature.
|
||||
func VerifyAttSigUseCheckPt(ctx context.Context, c *pb.CheckPtInfo, att *ethpb.Attestation) error {
|
||||
if att == nil || att.Data == nil || att.AggregationBits.Count() == 0 {
|
||||
return fmt.Errorf("nil or missing attestation data: %v", att)
|
||||
}
|
||||
seed := bytesutil.ToBytes32(c.Seed)
|
||||
committee, err := helpers.BeaconCommittee(c.ActiveIndices, seed, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexedAtt := attestationutil.ConvertToIndexed(ctx, att, committee)
|
||||
if err := attestationutil.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := helpers.Domain(c.Fork, indexedAtt.Data.Target.Epoch, params.BeaconConfig().DomainBeaconAttester, c.GenesisRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices := indexedAtt.AttestingIndices
|
||||
pubkeys := []bls.PublicKey{}
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := c.PubKeys[indices[i]]
|
||||
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not deserialize validator public key")
|
||||
}
|
||||
pubkeys = append(pubkeys, pk)
|
||||
}
|
||||
|
||||
return attestationutil.VerifyIndexedAttestationSig(ctx, indexedAtt, pubkeys, domain)
|
||||
}
|
||||
|
||||
44
beacon-chain/core/blocks/attestation_regression_test.go
Normal file
44
beacon-chain/core/blocks/attestation_regression_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
// Beaconfuzz discovered an off by one issue where an attestation could be produced which would pass
|
||||
// validation when att.Data.CommitteeIndex is 1 and the committee count per slot is also 1. The only
|
||||
// valid att.Data.Committee index would be 0, so this is an off by one error.
|
||||
// See: https://github.com/sigp/beacon-fuzz/issues/78
|
||||
func TestProcessAttestationNoVerifySignature_BeaconFuzzIssue78(t *testing.T) {
|
||||
attData, err := ioutil.ReadFile("testdata/beaconfuzz_78_attestation.ssz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
att := ðpb.Attestation{}
|
||||
if err := att.UnmarshalSSZ(attData); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stateData, err := ioutil.ReadFile("testdata/beaconfuzz_78_beacon.ssz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
spb := &pb.BeaconState{}
|
||||
if err := spb.UnmarshalSSZ(stateData); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
st, err := state.InitializeFromProtoUnsafe(spb)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
_, err = blocks.ProcessAttestationNoVerifySignature(ctx, st, att)
|
||||
require.ErrorContains(t, "committee index 1 >= committee count 1", err)
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
@@ -24,12 +25,12 @@ import (
|
||||
|
||||
func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
|
||||
attestations := []*ethpb.Attestation{
|
||||
{
|
||||
testutil.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Slot: 5,
|
||||
},
|
||||
},
|
||||
}),
|
||||
}
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
@@ -50,10 +51,10 @@ func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
|
||||
att := ðpb.Attestation{
|
||||
att := testutil.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
|
||||
Target: ðpb.Checkpoint{Epoch: 0}}}
|
||||
Target: ðpb.Checkpoint{Epoch: 0}}})
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
@@ -80,14 +81,13 @@ func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) {
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
attestations := []*ethpb.Attestation{
|
||||
{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
AggregationBits: bitfield.Bitlist{0x09},
|
||||
},
|
||||
}
|
||||
b := testutil.NewBeaconBlock()
|
||||
@@ -103,22 +103,11 @@ func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) {
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
require.NoError(t, beaconState.SetCurrentEpochAttestations([]*pb.PendingAttestation{}))
|
||||
|
||||
want := fmt.Sprintf(
|
||||
"expected source epoch %d, received %d",
|
||||
helpers.CurrentEpoch(beaconState),
|
||||
attestations[0].Data.Source.Epoch,
|
||||
)
|
||||
want := "source check point not equal to current justified checkpoint"
|
||||
_, err := blocks.ProcessAttestations(context.Background(), beaconState, b)
|
||||
assert.ErrorContains(t, want, err)
|
||||
|
||||
b.Block.Body.Attestations[0].Data.Source.Epoch = helpers.CurrentEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
|
||||
|
||||
want = fmt.Sprintf(
|
||||
"expected source root %#x, received %#x",
|
||||
beaconState.CurrentJustifiedCheckpoint().Root,
|
||||
attestations[0].Data.Source.Root,
|
||||
)
|
||||
_, err = blocks.ProcessAttestations(context.Background(), beaconState, b)
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
@@ -145,30 +134,19 @@ func TestProcessAttestations_PrevEpochFFGDataMismatches(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().SlotsPerEpoch + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
err := beaconState.SetSlot(beaconState.Slot() + 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
pfc := beaconState.PreviousJustifiedCheckpoint()
|
||||
pfc.Root = []byte("hello-world")
|
||||
require.NoError(t, beaconState.SetPreviousJustifiedCheckpoint(pfc))
|
||||
require.NoError(t, beaconState.SetPreviousEpochAttestations([]*pb.PendingAttestation{}))
|
||||
|
||||
want := fmt.Sprintf(
|
||||
"expected source epoch %d, received %d",
|
||||
helpers.PrevEpoch(beaconState),
|
||||
attestations[0].Data.Source.Epoch,
|
||||
)
|
||||
want := "source check point not equal to previous justified checkpoint"
|
||||
_, err = blocks.ProcessAttestations(context.Background(), beaconState, b)
|
||||
assert.ErrorContains(t, want, err)
|
||||
|
||||
b.Block.Body.Attestations[0].Data.Source.Epoch = helpers.PrevEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Target.Epoch = helpers.CurrentEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Target.Epoch = helpers.PrevEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
|
||||
|
||||
want = fmt.Sprintf(
|
||||
"expected source root %#x, received %#x",
|
||||
beaconState.CurrentJustifiedCheckpoint().Root,
|
||||
attestations[0].Data.Source.Root,
|
||||
)
|
||||
_, err = blocks.ProcessAttestations(context.Background(), beaconState, b)
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
@@ -211,15 +189,13 @@ func TestProcessAttestations_OK(t *testing.T) {
|
||||
aggBits.SetBitAt(0, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := ðpb.Attestation{
|
||||
att := testutil.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
})
|
||||
|
||||
cfc := beaconState.CurrentJustifiedCheckpoint()
|
||||
cfc.Root = mockRoot[:]
|
||||
@@ -228,7 +204,7 @@ func TestProcessAttestations_OK(t *testing.T) {
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices := attestationutil.AttestingIndices(att.AggregationBits, committee)
|
||||
attestingIndices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
@@ -238,7 +214,7 @@ func TestProcessAttestations_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()[:]
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
block := testutil.NewBeaconBlock()
|
||||
block.Block.Body.Attestations = []*ethpb.Attestation{att}
|
||||
@@ -251,15 +227,13 @@ func TestProcessAttestations_OK(t *testing.T) {
|
||||
|
||||
func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
|
||||
data := ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
}
|
||||
aggBits1 := bitfield.NewBitlist(4)
|
||||
data := testutil.HydrateAttestationData(ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
})
|
||||
aggBits1 := bitfield.NewBitlist(3)
|
||||
aggBits1.SetBitAt(0, true)
|
||||
aggBits1.SetBitAt(1, true)
|
||||
aggBits1.SetBitAt(2, true)
|
||||
att1 := ðpb.Attestation{
|
||||
Data: data,
|
||||
AggregationBits: aggBits1,
|
||||
@@ -272,7 +246,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att1.Data.Slot, att1.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices1 := attestationutil.AttestingIndices(att1.AggregationBits, committee)
|
||||
attestingIndices1, err := attestationutil.AttestingIndices(att1.AggregationBits, committee)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices1))
|
||||
for i, indice := range attestingIndices1 {
|
||||
@@ -282,12 +256,11 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()[:]
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
aggBits2 := bitfield.NewBitlist(4)
|
||||
aggBits2 := bitfield.NewBitlist(3)
|
||||
aggBits2.SetBitAt(1, true)
|
||||
aggBits2.SetBitAt(2, true)
|
||||
aggBits2.SetBitAt(3, true)
|
||||
att2 := ðpb.Attestation{
|
||||
Data: data,
|
||||
AggregationBits: aggBits2,
|
||||
@@ -295,7 +268,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
|
||||
|
||||
committee, err = helpers.BeaconCommitteeFromState(beaconState, att2.Data.Slot, att2.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices2 := attestationutil.AttestingIndices(att2.AggregationBits, committee)
|
||||
attestingIndices2, err := attestationutil.AttestingIndices(att2.AggregationBits, committee)
|
||||
require.NoError(t, err)
|
||||
sigs = make([]bls.Signature, len(attestingIndices2))
|
||||
for i, indice := range attestingIndices2 {
|
||||
@@ -305,7 +278,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()[:]
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
_, err = attaggregation.AggregatePair(att1, att2)
|
||||
assert.ErrorContains(t, aggregation.ErrBitsOverlap.Error(), err)
|
||||
@@ -316,11 +289,10 @@ func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) {
|
||||
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
data := ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
}
|
||||
data := testutil.HydrateAttestationData(ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
})
|
||||
aggBits1 := bitfield.NewBitlist(9)
|
||||
aggBits1.SetBitAt(0, true)
|
||||
aggBits1.SetBitAt(1, true)
|
||||
@@ -337,7 +309,7 @@ func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) {
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att1.Data.Slot, att1.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices1 := attestationutil.AttestingIndices(att1.AggregationBits, committee)
|
||||
attestingIndices1, err := attestationutil.AttestingIndices(att1.AggregationBits, committee)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices1))
|
||||
for i, indice := range attestingIndices1 {
|
||||
@@ -347,7 +319,7 @@ func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()[:]
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
aggBits2 := bitfield.NewBitlist(9)
|
||||
aggBits2.SetBitAt(2, true)
|
||||
@@ -360,7 +332,7 @@ func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) {
|
||||
|
||||
committee, err = helpers.BeaconCommitteeFromState(beaconState, att2.Data.Slot, att2.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices2 := attestationutil.AttestingIndices(att2.AggregationBits, committee)
|
||||
attestingIndices2, err := attestationutil.AttestingIndices(att2.AggregationBits, committee)
|
||||
require.NoError(t, err)
|
||||
sigs = make([]bls.Signature, len(attestingIndices2))
|
||||
for i, indice := range attestingIndices2 {
|
||||
@@ -370,7 +342,7 @@ func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()[:]
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
aggregatedAtt, err := attaggregation.AggregatePair(att1, att2)
|
||||
require.NoError(t, err)
|
||||
@@ -384,17 +356,17 @@ func TestProcessAggregatedAttestation_NoOverlappingBits(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestProcessAttestationsNoVerify_IncorrectSlotTargetEpoch(t *testing.T) {
|
||||
func TestVerifyAttestationNoVerifySignature_IncorrectSlotTargetEpoch(t *testing.T) {
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 1)
|
||||
|
||||
att := ðpb.Attestation{
|
||||
att := testutil.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
}
|
||||
wanted := fmt.Sprintf("data slot is not in the same epoch as target %d != %d", helpers.SlotToEpoch(att.Data.Slot), att.Data.Target.Epoch)
|
||||
_, err := blocks.ProcessAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
})
|
||||
wanted := "slot 32 does not match target epoch 0"
|
||||
_, err := blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
}
|
||||
|
||||
@@ -429,7 +401,38 @@ func TestProcessAttestationsNoVerify_OK(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestProcessAttestationsNoVerify_BadAttIdx(t *testing.T) {
|
||||
func TestVerifyAttestationNoVerifySignature_OK(t *testing.T) {
|
||||
// Attestation with an empty signature
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 100)
|
||||
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(1, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
|
||||
zeroSig := [96]byte{}
|
||||
att.Signature = zeroSig[:]
|
||||
|
||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
ckp := beaconState.CurrentJustifiedCheckpoint()
|
||||
copy(ckp.Root, "hello-world")
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
|
||||
require.NoError(t, beaconState.SetCurrentEpochAttestations([]*pb.PendingAttestation{}))
|
||||
|
||||
_, err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestVerifyAttestationNoVerifySignature_BadAttIdx(t *testing.T) {
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 100)
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(1, true)
|
||||
@@ -450,7 +453,7 @@ func TestProcessAttestationsNoVerify_BadAttIdx(t *testing.T) {
|
||||
copy(ckp.Root, "hello-world")
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
|
||||
require.NoError(t, beaconState.SetCurrentEpochAttestations([]*pb.PendingAttestation{}))
|
||||
_, err := blocks.ProcessAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
_, err := blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
|
||||
require.ErrorContains(t, "committee index 100 >= committee count 1", err)
|
||||
}
|
||||
|
||||
@@ -478,24 +481,20 @@ func TestConvertToIndexed_OK(t *testing.T) {
|
||||
wantedAttestingIndices: []uint64{43, 47},
|
||||
},
|
||||
{
|
||||
aggregationBitfield: bitfield.Bitlist{0x03},
|
||||
aggregationBitfield: bitfield.Bitlist{0x05},
|
||||
wantedAttestingIndices: []uint64{47},
|
||||
},
|
||||
{
|
||||
aggregationBitfield: bitfield.Bitlist{0x01},
|
||||
aggregationBitfield: bitfield.Bitlist{0x04},
|
||||
wantedAttestingIndices: []uint64{},
|
||||
},
|
||||
}
|
||||
|
||||
var sig [96]byte
|
||||
copy(sig[:], "signed")
|
||||
attestation := ðpb.Attestation{
|
||||
attestation := testutil.HydrateAttestation(ðpb.Attestation{
|
||||
Signature: sig[:],
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
},
|
||||
}
|
||||
})
|
||||
for _, tt := range tests {
|
||||
attestation.AggregationBits = tt.aggregationBitfield
|
||||
wanted := ðpb.IndexedAttestation{
|
||||
@@ -506,13 +505,14 @@ func TestConvertToIndexed_OK(t *testing.T) {
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(state, attestation.Data.Slot, attestation.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
ia := attestationutil.ConvertToIndexed(context.Background(), attestation, committee)
|
||||
ia, err := attestationutil.ConvertToIndexed(context.Background(), attestation, committee)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, wanted, ia, "Convert attestation to indexed attestation didn't result as wanted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
numOfValidators := 4 * params.BeaconConfig().SlotsPerEpoch
|
||||
numOfValidators := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(4))
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := testutil.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
@@ -539,58 +539,39 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
attestation *ethpb.IndexedAttestation
|
||||
}{
|
||||
{attestation: ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Data: testutil.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 2,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
Source: ðpb.Checkpoint{
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
Source: ðpb.Checkpoint{},
|
||||
}),
|
||||
AttestingIndices: []uint64{1},
|
||||
Signature: make([]byte, 96),
|
||||
}},
|
||||
{attestation: ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Data: testutil.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
Source: ðpb.Checkpoint{
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
}),
|
||||
AttestingIndices: []uint64{47, 99, 101},
|
||||
Signature: make([]byte, 96),
|
||||
}},
|
||||
{attestation: ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Data: testutil.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 4,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
Source: ðpb.Checkpoint{
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
}),
|
||||
AttestingIndices: []uint64{21, 72},
|
||||
Signature: make([]byte, 96),
|
||||
}},
|
||||
{attestation: ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Data: testutil.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 7,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
Source: ðpb.Checkpoint{
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
}),
|
||||
AttestingIndices: []uint64{100, 121, 122},
|
||||
Signature: make([]byte, 96),
|
||||
}},
|
||||
@@ -624,7 +605,7 @@ func TestValidateIndexedAttestation_AboveMaxLength(t *testing.T) {
|
||||
indexedAtt1.AttestingIndices[i] = i
|
||||
indexedAtt1.Data = ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: i,
|
||||
Epoch: types.Epoch(i),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -635,11 +616,11 @@ func TestValidateIndexedAttestation_AboveMaxLength(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
beaconState, keys := testutil.DeterministicGenesisState(t, 1000)
|
||||
beaconState, keys := testutil.DeterministicGenesisState(t, 128)
|
||||
|
||||
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
|
||||
list := bitfield.Bitlist{0b11111111}
|
||||
atts := []*ethpb.Attestation{}
|
||||
list := bitfield.Bitlist{0b11111}
|
||||
var atts []*ethpb.Attestation
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
@@ -656,7 +637,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
assert.ErrorContains(t, want, err)
|
||||
|
||||
atts = []*ethpb.Attestation{}
|
||||
list = bitfield.Bitlist{0b00000000}
|
||||
list = bitfield.Bitlist{0b10000}
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
@@ -676,79 +657,9 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestVerifyAttestations_VerifiesMultipleAttestations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
numOfValidators := 4 * params.BeaconConfig().SlotsPerEpoch
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := testutil.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
st := testutil.NewBeaconState()
|
||||
require.NoError(t, st.SetSlot(5))
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
|
||||
comm1, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att1 := ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
domain, err := helpers.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := helpers.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err)
|
||||
var sigs []bls.Signature
|
||||
for i, u := range comm1 {
|
||||
att1.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
root, err = helpers.ComputeSigningRoot(att2.Data, domain)
|
||||
require.NoError(t, err)
|
||||
sigs = nil
|
||||
for i, u := range comm2 {
|
||||
att2.AggregationBits.SetBitAt(uint64(i), true)
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Body.Attestations = []*ethpb.Attestation{att1, att2}
|
||||
require.NoError(t, blocks.VerifyAttestationsSignatures(ctx, st, b))
|
||||
}
|
||||
|
||||
func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
// In this test, att1 is from the prior fork and att2 is from the new fork.
|
||||
ctx := context.Background()
|
||||
numOfValidators := 4 * params.BeaconConfig().SlotsPerEpoch
|
||||
numOfValidators := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(4))
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := testutil.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
@@ -760,7 +671,8 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(35))
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
require.NoError(t, st.SetFork(&pb.Fork{
|
||||
@@ -771,17 +683,12 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
|
||||
comm1, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att1 := ðpb.Attestation{
|
||||
att1 := testutil.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Slot: 1,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
})
|
||||
prevDomain, err := helpers.Domain(st.Fork(), st.Fork().Epoch-1, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := helpers.ComputeSigningRoot(att1.Data, prevDomain)
|
||||
@@ -795,18 +702,15 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(st, 1*params.BeaconConfig().SlotsPerEpoch+1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := ðpb.Attestation{
|
||||
att2 := testutil.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1*params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Slot: 1*params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
})
|
||||
currDomain, err := helpers.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
root, err = helpers.ComputeSigningRoot(att2.Data, currDomain)
|
||||
require.NoError(t, err)
|
||||
sigs = nil
|
||||
@@ -815,15 +719,11 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
sigs = append(sigs, keys[u].Sign(root[:]))
|
||||
}
|
||||
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Body.Attestations = []*ethpb.Attestation{att1, att2}
|
||||
require.NoError(t, blocks.VerifyAttestationsSignatures(ctx, st, b))
|
||||
}
|
||||
|
||||
func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
numOfValidators := 4 * params.BeaconConfig().SlotsPerEpoch
|
||||
numOfValidators := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(4))
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := testutil.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
@@ -835,23 +735,19 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
}
|
||||
}
|
||||
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(5))
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
|
||||
comm1, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 0 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att1 := ðpb.Attestation{
|
||||
att1 := testutil.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 0,
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Slot: 1,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
})
|
||||
domain, err := helpers.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := helpers.ComputeSigningRoot(att1.Data, domain)
|
||||
@@ -865,17 +761,13 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
|
||||
comm2, err := helpers.BeaconCommitteeFromState(st, 1 /*slot*/, 1 /*committeeIndex*/)
|
||||
require.NoError(t, err)
|
||||
att2 := ðpb.Attestation{
|
||||
att2 := testutil.HydrateAttestation(ðpb.Attestation{
|
||||
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
})
|
||||
root, err = helpers.ComputeSigningRoot(att2.Data, domain)
|
||||
require.NoError(t, err)
|
||||
sigs = nil
|
||||
|
||||
@@ -5,11 +5,14 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
v "github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/slashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
)
|
||||
|
||||
@@ -37,8 +40,8 @@ func ProcessAttesterSlashings(
|
||||
beaconState *stateTrie.BeaconState,
|
||||
b *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
if b.Block == nil || b.Block.Body == nil {
|
||||
return nil, errors.New("block and block body can't be nil")
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body := b.Block.Body
|
||||
@@ -53,14 +56,14 @@ func ProcessAttesterSlashings(
|
||||
currentEpoch := helpers.SlotToEpoch(beaconState.Slot())
|
||||
var err error
|
||||
var slashedAny bool
|
||||
var val *stateTrie.ReadOnlyValidator
|
||||
var val iface.ReadOnlyValidator
|
||||
for _, validatorIndex := range slashableIndices {
|
||||
val, err = beaconState.ValidatorAtIndexReadOnly(validatorIndex)
|
||||
val, err = beaconState.ValidatorAtIndexReadOnly(types.ValidatorIndex(validatorIndex))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
|
||||
beaconState, err = v.SlashValidator(beaconState, validatorIndex)
|
||||
beaconState, err = v.SlashValidator(beaconState, types.ValidatorIndex(validatorIndex))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash validator index %d",
|
||||
validatorIndex)
|
||||
@@ -115,12 +118,15 @@ func VerifyAttesterSlashing(ctx context.Context, beaconState *stateTrie.BeaconSt
|
||||
// # Surround vote
|
||||
// (data_1.source.epoch < data_2.source.epoch and data_2.target.epoch < data_1.target.epoch)
|
||||
// )
|
||||
func IsSlashableAttestationData(data1 *ethpb.AttestationData, data2 *ethpb.AttestationData) bool {
|
||||
func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
|
||||
if data1 == nil || data2 == nil || data1.Target == nil || data2.Target == nil || data1.Source == nil || data2.Source == nil {
|
||||
return false
|
||||
}
|
||||
isDoubleVote := !attestationutil.AttDataIsEqual(data1, data2) && data1.Target.Epoch == data2.Target.Epoch
|
||||
isSurroundVote := data1.Source.Epoch < data2.Source.Epoch && data2.Target.Epoch < data1.Target.Epoch
|
||||
att1 := ðpb.IndexedAttestation{Data: data1}
|
||||
att2 := ðpb.IndexedAttestation{Data: data2}
|
||||
// Check if att1 is surrounding att2.
|
||||
isSurroundVote := slashutil.IsSurround(att1, att2)
|
||||
return isDoubleVote || isSurroundVote
|
||||
}
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@ package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -19,16 +19,14 @@ import (
|
||||
)
|
||||
|
||||
func TestSlashableAttestationData_CanSlash(t *testing.T) {
|
||||
att1 := ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
}
|
||||
att2 := ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'B'}, 32)},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
}
|
||||
att1 := testutil.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)},
|
||||
})
|
||||
att2 := testutil.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'B'}, 32)},
|
||||
})
|
||||
assert.Equal(t, true, blocks.IsSlashableAttestationData(att1, att2), "Atts should have been slashable")
|
||||
att1.Target.Epoch = 4
|
||||
att1.Source.Epoch = 2
|
||||
@@ -37,28 +35,16 @@ func TestSlashableAttestationData_CanSlash(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Attestation_2: ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
}
|
||||
registry := []*ethpb.Validator{}
|
||||
currentSlot := uint64(0)
|
||||
slashings := []*ethpb.AttesterSlashing{{
|
||||
Attestation_1: testutil.HydrateIndexedAttestation(ðpb.IndexedAttestation{}),
|
||||
Attestation_2: testutil.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
Target: ðpb.Checkpoint{Epoch: 1}},
|
||||
})}}
|
||||
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := types.Slot(0)
|
||||
|
||||
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Validators: registry,
|
||||
@@ -71,14 +57,13 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
AttesterSlashings: slashings,
|
||||
},
|
||||
}
|
||||
want := fmt.Sprint("attestations are not slashable")
|
||||
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, b)
|
||||
assert.ErrorContains(t, want, err)
|
||||
assert.ErrorContains(t, "attestations are not slashable", err)
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T) {
|
||||
registry := []*ethpb.Validator{}
|
||||
currentSlot := uint64(0)
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := types.Slot(0)
|
||||
|
||||
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Validators: registry,
|
||||
@@ -88,24 +73,15 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: ðpb.IndexedAttestation{
|
||||
Attestation_1: testutil.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: make([]uint64, params.BeaconConfig().MaxValidatorsPerCommittee+1),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Attestation_2: ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
}),
|
||||
Attestation_2: testutil.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: make([]uint64, params.BeaconConfig().MaxValidatorsPerCommittee+1),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -116,25 +92,22 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
},
|
||||
}
|
||||
|
||||
want := fmt.Sprint("validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE")
|
||||
_, err = blocks.ProcessAttesterSlashings(context.Background(), beaconState, b)
|
||||
assert.ErrorContains(t, want, err)
|
||||
assert.ErrorContains(t, "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE", err)
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
|
||||
for _, vv := range beaconState.Validators() {
|
||||
vv.WithdrawableEpoch = 1 * params.BeaconConfig().SlotsPerEpoch
|
||||
vv.WithdrawableEpoch = types.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
att1 := ðpb.IndexedAttestation{
|
||||
att1 := testutil.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
}
|
||||
})
|
||||
domain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := helpers.ComputeSigningRoot(att1.Data, domain)
|
||||
@@ -142,22 +115,17 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
sig0 := privKeys[0].Sign(signingRoot[:])
|
||||
sig1 := privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()[:]
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
att2 := testutil.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
}
|
||||
})
|
||||
signingRoot, err = helpers.ComputeSigningRoot(att2.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = privKeys[0].Sign(signingRoot[:])
|
||||
sig1 = privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att2.Signature = aggregateSig.Marshal()[:]
|
||||
att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
|
||||
@@ -5,26 +5,25 @@ import (
|
||||
"testing"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
|
||||
//"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ctx := context.Background()
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
att := ð.Attestation{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(att)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
_, err = ProcessAttestationNoVerifySignature(ctx, s, att)
|
||||
_ = err
|
||||
}
|
||||
@@ -32,14 +31,15 @@ func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
block := ð.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
_, err = ProcessBlockHeader(context.Background(), s, block)
|
||||
_ = err
|
||||
}
|
||||
@@ -47,13 +47,13 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ba := []byte{}
|
||||
var ba []byte
|
||||
pubkey := [48]byte{}
|
||||
sig := [96]byte{}
|
||||
domain := [4]byte{}
|
||||
p := []byte{}
|
||||
s := []byte{}
|
||||
d := []byte{}
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(&ba)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
@@ -63,6 +63,7 @@ func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(&s)
|
||||
fuzzer.Fuzz(&d)
|
||||
err := verifySignature(ba, pubkey[:], sig[:], domain[:])
|
||||
_ = err
|
||||
err = verifySignature(ba, p, s, d)
|
||||
_ = err
|
||||
}
|
||||
@@ -98,13 +99,14 @@ func TestFuzzareEth1DataEqual_10000(t *testing.T) {
|
||||
func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
eth1data := ð.Eth1Data{}
|
||||
stateVotes := []*eth.Eth1Data{}
|
||||
var stateVotes []*eth.Eth1Data
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(eth1data)
|
||||
fuzzer.Fuzz(&stateVotes)
|
||||
s, err := beaconstate.InitializeFromProto(ðereum_beacon_p2p_v1.BeaconState{
|
||||
s, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Eth1DataVotes: stateVotes,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = Eth1DataHasEnoughSupport(s, eth1data)
|
||||
_ = err
|
||||
}
|
||||
@@ -113,13 +115,14 @@ func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
block := ð.BeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
_, err = ProcessBlockHeaderNoVerify(s, block)
|
||||
_ = err
|
||||
}
|
||||
@@ -127,13 +130,14 @@ func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessRandao_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessRandao(context.Background(), s, b)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
|
||||
@@ -143,13 +147,14 @@ func TestFuzzProcessRandao_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
blockBody := ð.BeaconBlockBody{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessRandaoNoVerify(s, blockBody)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
|
||||
@@ -159,13 +164,14 @@ func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessProposerSlashings(ctx, s, b)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
|
||||
@@ -175,12 +181,13 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
proposerSlashing := ð.ProposerSlashing{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(proposerSlashing)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
err = VerifyProposerSlashing(s, proposerSlashing)
|
||||
_ = err
|
||||
}
|
||||
@@ -188,13 +195,14 @@ func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, b)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
|
||||
@@ -204,13 +212,14 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
attesterSlashing := ð.AttesterSlashing{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
err = VerifyAttesterSlashing(ctx, s, attesterSlashing)
|
||||
_ = err
|
||||
}
|
||||
@@ -240,13 +249,14 @@ func TestFuzzslashableAttesterIndices_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessAttestations_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttestations(ctx, s, b)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
|
||||
@@ -256,13 +266,14 @@ func TestFuzzProcessAttestations_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttestationsNoVerifySignature(ctx, s, b)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
|
||||
@@ -272,13 +283,14 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessAttestation_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
attestation := ð.Attestation{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attestation)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttestation(ctx, s, attestation)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, attestation)
|
||||
@@ -288,13 +300,14 @@ func TestFuzzProcessAttestation_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
idxAttestation := ð.IndexedAttestation{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(idxAttestation)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
err = VerifyIndexedAttestation(ctx, s, idxAttestation)
|
||||
_ = err
|
||||
}
|
||||
@@ -302,13 +315,14 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzVerifyAttestation_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
attestation := ð.Attestation{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attestation)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
err = VerifyAttestationSignature(ctx, s, attestation)
|
||||
_ = err
|
||||
}
|
||||
@@ -316,13 +330,14 @@ func TestFuzzVerifyAttestation_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessDeposits(ctx, s, b)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
|
||||
@@ -332,14 +347,15 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
deposit := ð.Deposit{}
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessPreGenesisDeposits(ctx, s, []*eth.Deposit{deposit})
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
@@ -349,13 +365,14 @@ func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
deposit := ð.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessDeposit(s, deposit, true)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
@@ -365,12 +382,13 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
deposit := ð.Deposit{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
err = verifyDeposit(s, deposit)
|
||||
_ = err
|
||||
}
|
||||
@@ -378,13 +396,14 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessVoluntaryExits(ctx, s, b)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
|
||||
@@ -394,12 +413,13 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
|
||||
|
||||
func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
state := &pb.BeaconState{}
|
||||
b := ð.SignedBeaconBlock{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
s, err := stateTrie.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessVoluntaryExits(context.Background(), s, b)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
|
||||
@@ -410,13 +430,13 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
func TestFuzzVerifyExit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ve := ð.SignedVoluntaryExit{}
|
||||
val := &beaconstate.ReadOnlyValidator{}
|
||||
val := stateTrie.ReadOnlyValidator{}
|
||||
fork := &pb.Fork{}
|
||||
var slot uint64
|
||||
var slot types.Slot
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(ve)
|
||||
fuzzer.Fuzz(val)
|
||||
fuzzer.Fuzz(&val)
|
||||
fuzzer.Fuzz(fork)
|
||||
fuzzer.Fuzz(&slot)
|
||||
err := VerifyExitAndSignature(val, slot, fork, ve, params.BeaconConfig().ZeroHash[:])
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -18,7 +19,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
testutil.ResetCache()
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, 5500)
|
||||
for _, vv := range beaconState.Validators() {
|
||||
vv.WithdrawableEpoch = 1 * params.BeaconConfig().SlotsPerEpoch
|
||||
vv.WithdrawableEpoch = types.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
// This set of indices is very similar to the one from our sapphire testnet
|
||||
// when close to 100 validators were incorrectly slashed. The set is from 0 -5500,
|
||||
@@ -38,11 +39,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
|
||||
root1 := [32]byte{'d', 'o', 'u', 'b', 'l', 'e', '1'}
|
||||
att1 := ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: root1[:]},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
Data: testutil.HydrateAttestationData(ðpb.AttestationData{Target: ðpb.Checkpoint{Epoch: 0, Root: root1[:]}}),
|
||||
AttestingIndices: setA,
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
@@ -50,21 +47,19 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := helpers.ComputeSigningRoot(att1.Data, domain)
|
||||
require.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
aggSigs := []bls.Signature{}
|
||||
var aggSigs []bls.Signature
|
||||
for _, index := range setA {
|
||||
sig := privKeys[index].Sign(signingRoot[:])
|
||||
aggSigs = append(aggSigs, sig)
|
||||
}
|
||||
aggregateSig := bls.AggregateSignatures(aggSigs)
|
||||
att1.Signature = aggregateSig.Marshal()[:]
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
root2 := [32]byte{'d', 'o', 'u', 'b', 'l', 'e', '2'}
|
||||
att2 := ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: root2[:]},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
Data: testutil.HydrateAttestationData(ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: root2[:]},
|
||||
}),
|
||||
AttestingIndices: setB,
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
@@ -76,7 +71,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
aggSigs = append(aggSigs, sig)
|
||||
}
|
||||
aggregateSig = bls.AggregateSignatures(aggSigs)
|
||||
att2.Signature = aggregateSig.Marshal()[:]
|
||||
att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
@@ -33,8 +32,10 @@ func ProcessPreGenesisDeposits(
|
||||
for _, deposit := range deposits {
|
||||
pubkey := deposit.Data.PublicKey
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubkey))
|
||||
// In the event of the pubkey not existing, we continue processing the other
|
||||
// deposits.
|
||||
if !ok {
|
||||
return beaconState, nil
|
||||
continue
|
||||
}
|
||||
balance, err := beaconState.BalanceAtIndex(index)
|
||||
if err != nil {
|
||||
@@ -69,8 +70,8 @@ func ProcessDeposits(
|
||||
beaconState *stateTrie.BeaconState,
|
||||
b *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
if b.Block == nil || b.Block.Body == nil {
|
||||
return nil, errors.New("block and block body can't be nil")
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
deposits := b.Block.Body.Deposits
|
||||
@@ -158,8 +159,7 @@ func ProcessDeposit(beaconState *stateTrie.BeaconState, deposit *ethpb.Deposit,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depositSig := deposit.Data.Signature
|
||||
if err := verifyDepositDataSigningRoot(deposit.Data, pubKey, depositSig, domain); err != nil {
|
||||
if err := verifyDepositDataSigningRoot(deposit.Data, domain); err != nil {
|
||||
// Ignore this error as in the spec pseudo code.
|
||||
log.Debugf("Skipping deposit: could not verify deposit data signature: %v", err)
|
||||
return beaconState, nil
|
||||
@@ -222,8 +222,7 @@ func verifyDeposit(beaconState *stateTrie.BeaconState, deposit *ethpb.Deposit) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: This method uses deprecated ssz.SigningRoot.
|
||||
func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, pub []byte, signature []byte, domain []byte) error {
|
||||
func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, domain []byte) error {
|
||||
return depositutil.VerifyDepositSignature(obj, domain)
|
||||
}
|
||||
|
||||
@@ -247,19 +246,16 @@ func verifyDepositDataWithDomain(ctx context.Context, deps []*ethpb.Deposit, dom
|
||||
}
|
||||
pks[i] = dpk
|
||||
sigs[i] = dep.Data.Signature
|
||||
root, err := ssz.SigningRoot(dep.Data)
|
||||
depositMessage := &pb.DepositMessage{
|
||||
PublicKey: dep.Data.PublicKey,
|
||||
WithdrawalCredentials: dep.Data.WithdrawalCredentials,
|
||||
Amount: dep.Data.Amount,
|
||||
}
|
||||
sr, err := helpers.ComputeSigningRoot(depositMessage, domain)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root")
|
||||
return err
|
||||
}
|
||||
signingData := &pb.SigningData{
|
||||
ObjectRoot: root[:],
|
||||
Domain: domain,
|
||||
}
|
||||
ctrRoot, err := signingData.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get container root")
|
||||
}
|
||||
msgs[i] = ctrRoot
|
||||
msgs[i] = sr
|
||||
}
|
||||
verify, err := bls.VerifyMultipleSignatures(sigs, msgs, pks)
|
||||
if err != nil {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -67,7 +68,7 @@ func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
@@ -132,7 +133,8 @@ func TestProcessDeposits_AddsNewValidatorDeposit(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T) {
|
||||
sk := bls.RandKey()
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
@@ -149,7 +151,7 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
@@ -187,7 +189,7 @@ func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T)
|
||||
}
|
||||
|
||||
func TestProcessDeposit_AddsNewValidatorDeposit(t *testing.T) {
|
||||
//Similar to TestProcessDeposits_AddsNewValidatorDeposit except that this test directly calls ProcessDeposit
|
||||
// Similar to TestProcessDeposits_AddsNewValidatorDeposit except that this test directly calls ProcessDeposit
|
||||
dep, _, err := testutil.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := testutil.DeterministicEth1Data(len(dep))
|
||||
@@ -271,3 +273,69 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
t.Errorf("Expected validator balance at index 0 to stay 0, received: %v", newState.Balances()[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreGenesisDeposits_SkipInvalidDeposit(t *testing.T) {
|
||||
testutil.ResetCache()
|
||||
dep, _, err := testutil.DeterministicDepositsAndKeys(100)
|
||||
require.NoError(t, err)
|
||||
defer testutil.ResetCache()
|
||||
dep[0].Data.Signature = make([]byte, 96)
|
||||
trie, _, err := testutil.DepositTrieFromDeposits(dep)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := range dep {
|
||||
proof, err := trie.MerkleProof(i)
|
||||
require.NoError(t, err)
|
||||
dep[i].Proof = proof
|
||||
}
|
||||
root := trie.Root()
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: 1,
|
||||
}
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1},
|
||||
WithdrawalCredentials: []byte{1, 2, 3},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0}
|
||||
beaconState, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: eth1Data,
|
||||
Fork: &pb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := blocks.ProcessPreGenesisDeposits(context.Background(), beaconState, dep)
|
||||
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
|
||||
|
||||
_, ok := newState.ValidatorIndexByPubkey(bytesutil.ToBytes48(dep[0].Data.PublicKey))
|
||||
require.Equal(t, false, ok, "bad pubkey should not exist in state")
|
||||
|
||||
for i := 1; i < newState.NumValidators(); i++ {
|
||||
val, err := newState.ValidatorAtIndex(types.ValidatorIndex(i))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, val.EffectiveBalance, "unequal effective balance")
|
||||
require.Equal(t, types.Epoch(0), val.ActivationEpoch)
|
||||
require.Equal(t, types.Epoch(0), val.ActivationEligibilityEpoch)
|
||||
}
|
||||
if newState.Eth1DepositIndex() != 100 {
|
||||
t.Errorf(
|
||||
"Expected Eth1DepositIndex to be increased by 99 after processing an invalid deposit, received change: %v",
|
||||
newState.Eth1DepositIndex(),
|
||||
)
|
||||
}
|
||||
if len(newState.Validators()) != 100 {
|
||||
t.Errorf("Expected validator list to have length 100, received: %v", len(newState.Validators()))
|
||||
}
|
||||
if len(newState.Balances()) != 100 {
|
||||
t.Errorf("Expected validator balances list to have length 100, received: %v", len(newState.Balances()))
|
||||
}
|
||||
if newState.Balances()[0] != 0 {
|
||||
t.Errorf("Expected validator balance at index 0 to stay 0, received: %v", newState.Balances()[0])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
// state.eth1_data_votes.append(body.eth1_data)
|
||||
// if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH:
|
||||
// state.latest_eth1_data = body.eth1_data
|
||||
func ProcessEth1DataInBlock(ctx context.Context, beaconState *stateTrie.BeaconState, b *ethpb.SignedBeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
func ProcessEth1DataInBlock(_ context.Context, beaconState *stateTrie.BeaconState, b *ethpb.SignedBeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
block := b.Block
|
||||
if beaconState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
@@ -71,6 +71,6 @@ func Eth1DataHasEnoughSupport(beaconState *stateTrie.BeaconState, data *ethpb.Et
|
||||
|
||||
// If 50+% majority converged on the same eth1data, then it has enough support to update the
|
||||
// state.
|
||||
support := params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch
|
||||
return voteCount*2 > support, nil
|
||||
support := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod))
|
||||
return voteCount*2 > uint64(support), nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -33,10 +34,10 @@ func TestEth1DataHasEnoughSupport(t *testing.T) {
|
||||
stateVotes []*ethpb.Eth1Data
|
||||
data *ethpb.Eth1Data
|
||||
hasSupport bool
|
||||
votingPeriodLength uint64
|
||||
votingPeriodLength types.Epoch
|
||||
}{
|
||||
{
|
||||
stateVotes: FakeDeposits(4 * params.BeaconConfig().SlotsPerEpoch),
|
||||
stateVotes: FakeDeposits(uint64(params.BeaconConfig().SlotsPerEpoch.Mul(4))),
|
||||
data: ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
DepositRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
@@ -44,7 +45,7 @@ func TestEth1DataHasEnoughSupport(t *testing.T) {
|
||||
hasSupport: true,
|
||||
votingPeriodLength: 7,
|
||||
}, {
|
||||
stateVotes: FakeDeposits(4 * params.BeaconConfig().SlotsPerEpoch),
|
||||
stateVotes: FakeDeposits(uint64(params.BeaconConfig().SlotsPerEpoch.Mul(4))),
|
||||
data: ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
DepositRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
@@ -52,7 +53,7 @@ func TestEth1DataHasEnoughSupport(t *testing.T) {
|
||||
hasSupport: false,
|
||||
votingPeriodLength: 8,
|
||||
}, {
|
||||
stateVotes: FakeDeposits(4 * params.BeaconConfig().SlotsPerEpoch),
|
||||
stateVotes: FakeDeposits(uint64(params.BeaconConfig().SlotsPerEpoch.Mul(4))),
|
||||
data: ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
DepositRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
@@ -174,7 +175,7 @@ func TestProcessEth1Data_SetsCorrectly(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
period := params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch
|
||||
period := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod)))
|
||||
for i := uint64(0); i < period; i++ {
|
||||
beaconState, err = blocks.ProcessEth1DataInBlock(context.Background(), beaconState, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -5,14 +5,23 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
v "github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
iface "github.com/prysmaticlabs/prysm/beacon-chain/state/interface"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// ValidatorAlreadyExitedMsg defines a message saying that a validator has already exited.
|
||||
var ValidatorAlreadyExitedMsg = "has already submitted an exit, which will take place at epoch"
|
||||
|
||||
// ValidatorCannotExitYetMsg defines a message saying that a validator cannot exit
|
||||
// because it has not been active long enough.
|
||||
var ValidatorCannotExitYetMsg = "validator has not been active long enough to exit"
|
||||
|
||||
// ProcessVoluntaryExits is one of the operations performed
|
||||
// on each processed beacon block to determine which validators
|
||||
// should exit the state's validator registry.
|
||||
@@ -37,12 +46,12 @@ import (
|
||||
// # Initiate exit
|
||||
// initiate_validator_exit(state, exit.validator_index)
|
||||
func ProcessVoluntaryExits(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
b *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
if b.Block == nil || b.Block.Body == nil {
|
||||
return nil, errors.New("block and block body can't be nil")
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body := b.Block.Body
|
||||
@@ -66,39 +75,6 @@ func ProcessVoluntaryExits(
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessVoluntaryExitsNoVerifySignature processes all the voluntary exits in
|
||||
// a block body, without verifying their BLS signatures.
|
||||
// This function is here to satisfy fuzz tests.
|
||||
func ProcessVoluntaryExitsNoVerifySignature(
|
||||
beaconState *stateTrie.BeaconState,
|
||||
body *ethpb.BeaconBlockBody,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
exits := body.VoluntaryExits
|
||||
|
||||
for idx, exit := range exits {
|
||||
if exit == nil || exit.Exit == nil {
|
||||
return nil, errors.New("nil exit")
|
||||
}
|
||||
val, err := beaconState.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := verifyExitConditions(val, beaconState.Slot(), exit.Exit); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Validate that fork and genesis root are valid.
|
||||
_, err = helpers.Domain(beaconState.Fork(), exit.Exit.Epoch, params.BeaconConfig().DomainVoluntaryExit, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
beaconState, err = v.InitiateValidatorExit(beaconState, exit.Exit.ValidatorIndex)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to process voluntary exit at index %d", idx)
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// VerifyExitAndSignature implements the spec defined validation for voluntary exits.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
@@ -118,7 +94,7 @@ func ProcessVoluntaryExitsNoVerifySignature(
|
||||
// # Verify signature
|
||||
// domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, exit.epoch)
|
||||
// assert bls_verify(validator.pubkey, signing_root(exit), exit.signature, domain)
|
||||
func VerifyExitAndSignature(validator *stateTrie.ReadOnlyValidator, currentSlot uint64, fork *pb.Fork, signed *ethpb.SignedVoluntaryExit, genesisRoot []byte) error {
|
||||
func VerifyExitAndSignature(validator iface.ReadOnlyValidator, currentSlot types.Slot, fork *pb.Fork, signed *ethpb.SignedVoluntaryExit, genesisRoot []byte) error {
|
||||
if signed == nil || signed.Exit == nil {
|
||||
return errors.New("nil exit")
|
||||
}
|
||||
@@ -154,7 +130,7 @@ func VerifyExitAndSignature(validator *stateTrie.ReadOnlyValidator, currentSlot
|
||||
// assert get_current_epoch(state) >= exit.epoch
|
||||
// # Verify the validator has been active long enough
|
||||
// assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
|
||||
func verifyExitConditions(validator *stateTrie.ReadOnlyValidator, currentSlot uint64, exit *ethpb.VoluntaryExit) error {
|
||||
func verifyExitConditions(validator iface.ReadOnlyValidator, currentSlot types.Slot, exit *ethpb.VoluntaryExit) error {
|
||||
currentEpoch := helpers.SlotToEpoch(currentSlot)
|
||||
// Verify the validator is active.
|
||||
if !helpers.IsActiveValidatorUsingTrie(validator, currentEpoch) {
|
||||
@@ -162,9 +138,7 @@ func verifyExitConditions(validator *stateTrie.ReadOnlyValidator, currentSlot ui
|
||||
}
|
||||
// Verify the validator has not yet submitted an exit.
|
||||
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
return fmt.Errorf(
|
||||
"validator has already submitted an exit, which will take place at epoch: %v",
|
||||
validator.ExitEpoch())
|
||||
return fmt.Errorf("validator with index %d %s: %v", exit.ValidatorIndex, ValidatorAlreadyExitedMsg, validator.ExitEpoch())
|
||||
}
|
||||
// Exits must specify an epoch when they become valid; they are not valid before then.
|
||||
if currentEpoch < exit.Epoch {
|
||||
@@ -173,7 +147,8 @@ func verifyExitConditions(validator *stateTrie.ReadOnlyValidator, currentSlot ui
|
||||
// Verify the validator has been active long enough.
|
||||
if currentEpoch < validator.ActivationEpoch()+params.BeaconConfig().ShardCommitteePeriod {
|
||||
return fmt.Errorf(
|
||||
"validator has not been active long enough to exit: %d epochs vs required %d epochs",
|
||||
"%s: %d epochs vs required %d epochs",
|
||||
ValidatorCannotExitYetMsg,
|
||||
currentEpoch,
|
||||
validator.ActivationEpoch()+params.BeaconConfig().ShardCommitteePeriod,
|
||||
)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -16,73 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestProcessVoluntaryExits_ValidatorNotActive(t *testing.T) {
|
||||
exits := []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
ExitEpoch: 0,
|
||||
},
|
||||
}
|
||||
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Validators: registry,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
VoluntaryExits: exits,
|
||||
},
|
||||
}
|
||||
|
||||
want := "non-active validator cannot exit"
|
||||
_, err = blocks.ProcessVoluntaryExits(context.Background(), state, b)
|
||||
assert.ErrorContains(t, want, err)
|
||||
|
||||
// Check conformance of no verify method.
|
||||
_, err = blocks.ProcessVoluntaryExitsNoVerifySignature(state, b.Block.Body)
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestProcessVoluntaryExits_InvalidExitEpoch(t *testing.T) {
|
||||
exits := []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 10,
|
||||
},
|
||||
},
|
||||
}
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
},
|
||||
}
|
||||
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Validators: registry,
|
||||
Slot: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
VoluntaryExits: exits,
|
||||
},
|
||||
}
|
||||
|
||||
want := "expected current epoch >= exit epoch"
|
||||
_, err = blocks.ProcessVoluntaryExits(context.Background(), state, b)
|
||||
assert.ErrorContains(t, want, err)
|
||||
|
||||
// Check conformance of no verify method.
|
||||
_, err = blocks.ProcessVoluntaryExitsNoVerifySignature(state, b.Block.Body)
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestProcessVoluntaryExits_NotActiveLongEnoughToExit(t *testing.T) {
|
||||
exits := []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
@@ -139,7 +73,7 @@ func TestProcessVoluntaryExits_ExitAlreadySubmitted(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
want := "validator has already submitted an exit, which will take place at epoch: 10"
|
||||
want := "validator with index 0 has already submitted an exit, which will take place at epoch: 10"
|
||||
_, err = blocks.ProcessVoluntaryExits(context.Background(), state, b)
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
@@ -168,13 +102,15 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = state.SetSlot(state.Slot() + (params.BeaconConfig().ShardCommitteePeriod * params.BeaconConfig().SlotsPerEpoch))
|
||||
err = state.SetSlot(state.Slot() + params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)))
|
||||
require.NoError(t, err)
|
||||
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
priv := bls.RandKey()
|
||||
val, err := state.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
val.PublicKey = priv.PublicKey().Marshal()[:]
|
||||
val.PublicKey = priv.PublicKey().Marshal()
|
||||
require.NoError(t, state.UpdateValidatorAtIndex(0, val))
|
||||
exits[0].Signature, err = helpers.ComputeDomainAndSign(state, helpers.CurrentEpoch(state), exits[0].Exit, params.BeaconConfig().DomainVoluntaryExit, priv)
|
||||
require.NoError(t, err)
|
||||
@@ -186,21 +122,11 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
stateCopy := state.Copy()
|
||||
newState, err := blocks.ProcessVoluntaryExits(context.Background(), state, b)
|
||||
require.NoError(t, err, "Could not process exits")
|
||||
newRegistry := newState.Validators()
|
||||
if newRegistry[0].ExitEpoch != helpers.ActivationExitEpoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch) {
|
||||
if newRegistry[0].ExitEpoch != helpers.ActivationExitEpoch(types.Epoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch)) {
|
||||
t.Errorf("Expected validator exit epoch to be %d, got %d",
|
||||
helpers.ActivationExitEpoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch), newRegistry[0].ExitEpoch)
|
||||
}
|
||||
|
||||
// Check conformance with NoVerify Exit Method.
|
||||
newState, err = blocks.ProcessVoluntaryExitsNoVerifySignature(stateCopy, b.Block.Body)
|
||||
require.NoError(t, err, "Could not process exits")
|
||||
newRegistry = newState.Validators()
|
||||
if newRegistry[0].ExitEpoch != helpers.ActivationExitEpoch(stateCopy.Slot()/params.BeaconConfig().SlotsPerEpoch) {
|
||||
t.Errorf("Expected validator exit epoch to be %d, got %d",
|
||||
helpers.ActivationExitEpoch(stateCopy.Slot()/params.BeaconConfig().SlotsPerEpoch), newRegistry[0].ExitEpoch)
|
||||
helpers.ActivationExitEpoch(types.Epoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch)), newRegistry[0].ExitEpoch)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ import (
|
||||
// # Verify proposer signature
|
||||
// assert bls_verify(proposer.pubkey, signing_root(block), block.signature, get_domain(state, DOMAIN_BEACON_PROPOSER))
|
||||
func ProcessBlockHeader(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
block *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
|
||||
@@ -7,16 +7,16 @@ import (
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -34,21 +34,20 @@ func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
state := testutil.NewBeaconState()
|
||||
state, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetLatestBlockHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 10, // Must be less than block.Slot
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}))
|
||||
require.NoError(t, state.SetLatestBlockHeader(testutil.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 10, // Must be less than block.Slot
|
||||
})))
|
||||
|
||||
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
priv := bls.RandKey()
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(state)
|
||||
require.NoError(t, err)
|
||||
block := testutil.NewBeaconBlock()
|
||||
@@ -73,12 +72,9 @@ func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
|
||||
func TestProcessBlockHeader_WrongProposerSig(t *testing.T) {
|
||||
testutil.ResetCache()
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
|
||||
require.NoError(t, beaconState.SetLatestBlockHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}))
|
||||
require.NoError(t, beaconState.SetLatestBlockHeader(testutil.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
})))
|
||||
require.NoError(t, beaconState.SetSlot(10))
|
||||
|
||||
lbhdr, err := beaconState.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -111,23 +107,22 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
state := testutil.NewBeaconState()
|
||||
state, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetLatestBlockHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}))
|
||||
require.NoError(t, state.SetLatestBlockHeader(testutil.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
})))
|
||||
|
||||
lbhsr, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
|
||||
priv := bls.RandKey()
|
||||
blockSig, err := helpers.ComputeDomainAndSign(state, currentEpoch, []byte("hello"), params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
sszBytes := p2ptypes.SSZBytes("hello")
|
||||
blockSig, err := helpers.ComputeDomainAndSign(state, currentEpoch, &sszBytes, params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
validators[5896].PublicKey = priv.PublicKey().Marshal()
|
||||
block := ðpb.SignedBeaconBlock{
|
||||
@@ -157,15 +152,18 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
state := testutil.NewBeaconState()
|
||||
state, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
bh := state.LatestBlockHeader()
|
||||
bh.Slot = 9
|
||||
require.NoError(t, state.SetLatestBlockHeader(bh))
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
priv := bls.RandKey()
|
||||
blockSig, err := helpers.ComputeDomainAndSign(state, currentEpoch, []byte("hello"), params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
sszBytes := p2ptypes.SSZBytes("hello")
|
||||
blockSig, err := helpers.ComputeDomainAndSign(state, currentEpoch, &sszBytes, params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
validators[5896].PublicKey = priv.PublicKey().Marshal()
|
||||
pID, err := helpers.BeaconProposerIndex(state)
|
||||
@@ -193,7 +191,8 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
state := testutil.NewBeaconState()
|
||||
state, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
bh := state.LatestBlockHeader()
|
||||
@@ -202,8 +201,10 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
|
||||
parentRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
priv := bls.RandKey()
|
||||
blockSig, err := helpers.ComputeDomainAndSign(state, currentEpoch, []byte("hello"), params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
sszBytes := p2ptypes.SSZBytes("hello")
|
||||
blockSig, err := helpers.ComputeDomainAndSign(state, currentEpoch, &sszBytes, params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
validators[12683].PublicKey = priv.PublicKey().Marshal()
|
||||
@@ -232,22 +233,20 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
state := testutil.NewBeaconState()
|
||||
state, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetLatestBlockHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}))
|
||||
require.NoError(t, state.SetLatestBlockHeader(testutil.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
})))
|
||||
|
||||
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
priv := bls.RandKey()
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(state)
|
||||
require.NoError(t, err)
|
||||
block := testutil.NewBeaconBlock()
|
||||
@@ -292,22 +291,21 @@ func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
state := testutil.NewBeaconState()
|
||||
state, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetLatestBlockHeader(ðpb.BeaconBlockHeader{
|
||||
require.NoError(t, state.SetLatestBlockHeader(testutil.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}))
|
||||
})))
|
||||
|
||||
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
priv := bls.RandKey()
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(state)
|
||||
require.NoError(t, err)
|
||||
block := testutil.NewBeaconBlock()
|
||||
|
||||
@@ -4,13 +4,14 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
v "github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// ProcessProposerSlashings is one of the operations performed
|
||||
@@ -36,12 +37,12 @@ import (
|
||||
//
|
||||
// slash_validator(state, proposer_slashing.proposer_index)
|
||||
func ProcessProposerSlashings(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
b *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
if b.Block == nil || b.Block.Body == nil {
|
||||
return nil, errors.New("block and block body can't be nil")
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body := b.Block.Body
|
||||
@@ -86,7 +87,7 @@ func VerifyProposerSlashing(
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !helpers.IsSlashableValidatorUsingTrie(proposer, helpers.SlotToEpoch(beaconState.Slot())) {
|
||||
if !helpers.IsSlashableValidatorUsingTrie(proposer, helpers.CurrentEpoch(beaconState)) {
|
||||
return fmt.Errorf("validator with key %#x is not slashable", proposer.PublicKey())
|
||||
}
|
||||
headers := []*ethpb.SignedBeaconBlockHeader{slashing.Header_1, slashing.Header_2}
|
||||
@@ -96,6 +97,5 @@ func VerifyProposerSlashing(
|
||||
return errors.Wrap(err, "could not verify beacon block header")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
// Beaconfuzz discovered an issue where a proposer slashing could be produced which would pass
|
||||
// validation where we use the slashing's slot instead of the current epoch of our state for validation.
|
||||
// This would lead to us accepting an invalid slashing by marking the respective validator as 'slashable'
|
||||
// when it was not in actuality.
|
||||
// See: https://github.com/sigp/beacon-fuzz/issues/91
|
||||
func TestVerifyProposerSlashing_BeaconFuzzIssue91(t *testing.T) {
|
||||
file, err := ioutil.ReadFile("testdata/beaconfuzz_91_beacon.ssz")
|
||||
require.NoError(t, err)
|
||||
rawState := &pb.BeaconState{}
|
||||
err = rawState.UnmarshalSSZ(file)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, err := stateTrie.InitializeFromProtoUnsafe(rawState)
|
||||
require.NoError(t, err)
|
||||
|
||||
file, err = ioutil.ReadFile("testdata/beaconfuzz_91_proposer_slashing.ssz")
|
||||
require.NoError(t, err)
|
||||
slashing := ðpb.ProposerSlashing{}
|
||||
err = slashing.UnmarshalSSZ(file)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = blocks.VerifyProposerSlashing(st, slashing)
|
||||
require.ErrorContains(t, "validator with key 0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb is not slashable", err)
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -21,7 +22,7 @@ import (
|
||||
func TestProcessProposerSlashings_UnmatchedHeaderSlots(t *testing.T) {
|
||||
testutil.ResetCache()
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 20)
|
||||
currentSlot := uint64(0)
|
||||
currentSlot := types.Slot(0)
|
||||
slashings := []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
@@ -54,7 +55,7 @@ func TestProcessProposerSlashings_UnmatchedHeaderSlots(t *testing.T) {
|
||||
func TestProcessProposerSlashings_SameHeaders(t *testing.T) {
|
||||
testutil.ResetCache()
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 2)
|
||||
currentSlot := uint64(0)
|
||||
currentSlot := types.Slot(0)
|
||||
slashings := []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
@@ -93,7 +94,7 @@ func TestProcessProposerSlashings_ValidatorNotSlashable(t *testing.T) {
|
||||
WithdrawableEpoch: 0,
|
||||
},
|
||||
}
|
||||
currentSlot := uint64(0)
|
||||
currentSlot := types.Slot(0)
|
||||
slashings := []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
@@ -138,30 +139,24 @@ func TestProcessProposerSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
// We test the case when data is correct and verify the validator
|
||||
// registry has been updated.
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
|
||||
proposerIdx := uint64(1)
|
||||
proposerIdx := types.ValidatorIndex(1)
|
||||
|
||||
header1 := ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Header: testutil.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: proposerIdx,
|
||||
Slot: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("A"), 32),
|
||||
},
|
||||
}),
|
||||
}
|
||||
var err error
|
||||
header1.Signature, err = helpers.ComputeDomainAndSign(beaconState, 0, header1.Header, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
header2 := ðpb.SignedBeaconBlockHeader{
|
||||
header2 := testutil.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: proposerIdx,
|
||||
Slot: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
StateRoot: bytesutil.PadTo([]byte("B"), 32),
|
||||
},
|
||||
}
|
||||
})
|
||||
header2.Signature, err = helpers.ComputeDomainAndSign(beaconState, 0, header2.Header, params.BeaconConfig().DomainBeaconProposer, privKeys[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -191,9 +186,16 @@ func TestVerifyProposerSlashing(t *testing.T) {
|
||||
slashing *ethpb.ProposerSlashing
|
||||
}
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 2)
|
||||
currentSlot := uint64(0)
|
||||
beaconState, sks := testutil.DeterministicGenesisState(t, 2)
|
||||
currentSlot := types.Slot(0)
|
||||
require.NoError(t, beaconState.SetSlot(currentSlot))
|
||||
rand1, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
sig1 := rand1.Sign([]byte("foo")).Marshal()
|
||||
|
||||
rand2, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
sig2 := rand2.Sign([]byte("bar")).Marshal()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -201,21 +203,21 @@ func TestVerifyProposerSlashing(t *testing.T) {
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "same header, no signatures",
|
||||
name: "same header, same slot as state",
|
||||
args: args{
|
||||
slashing: ðpb.ProposerSlashing{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header_1: testutil.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 1,
|
||||
Slot: 0,
|
||||
Slot: currentSlot,
|
||||
},
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
}),
|
||||
Header_2: testutil.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 1,
|
||||
Slot: 0,
|
||||
Slot: currentSlot,
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
beaconState: beaconState,
|
||||
},
|
||||
@@ -225,29 +227,67 @@ func TestVerifyProposerSlashing(t *testing.T) {
|
||||
name: "same header, different signatures",
|
||||
args: args{
|
||||
slashing: ðpb.ProposerSlashing{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header_1: testutil.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 1,
|
||||
Slot: 0,
|
||||
},
|
||||
Signature: bls.RandKey().Sign([]byte("foo")).Marshal(),
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Signature: sig1,
|
||||
}),
|
||||
Header_2: testutil.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 1,
|
||||
Slot: 0,
|
||||
},
|
||||
Signature: bls.RandKey().Sign([]byte("bar")).Marshal(),
|
||||
},
|
||||
Signature: sig2,
|
||||
}),
|
||||
},
|
||||
beaconState: beaconState,
|
||||
},
|
||||
wantErr: "expected slashing headers to differ",
|
||||
},
|
||||
{
|
||||
name: "slashing in future epoch",
|
||||
args: args{
|
||||
slashing: ðpb.ProposerSlashing{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 1,
|
||||
Slot: 65,
|
||||
StateRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
ParentRoot: bytesutil.PadTo([]byte("foo"), 32),
|
||||
},
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: 1,
|
||||
Slot: 65,
|
||||
StateRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
ParentRoot: bytesutil.PadTo([]byte("bar"), 32),
|
||||
},
|
||||
},
|
||||
},
|
||||
beaconState: beaconState,
|
||||
},
|
||||
wantErr: "",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testutil.ResetCache()
|
||||
sk := sks[tt.args.slashing.Header_1.Header.ProposerIndex]
|
||||
d, err := helpers.Domain(tt.args.beaconState.Fork(), helpers.SlotToEpoch(tt.args.slashing.Header_1.Header.Slot), params.BeaconConfig().DomainBeaconProposer, tt.args.beaconState.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
if tt.args.slashing.Header_1.Signature == nil {
|
||||
sr, err := helpers.ComputeSigningRoot(tt.args.slashing.Header_1.Header, d)
|
||||
require.NoError(t, err)
|
||||
tt.args.slashing.Header_1.Signature = sk.Sign(sr[:]).Marshal()
|
||||
}
|
||||
if tt.args.slashing.Header_2.Signature == nil {
|
||||
sr, err := helpers.ComputeSigningRoot(tt.args.slashing.Header_2.Header, d)
|
||||
require.NoError(t, err)
|
||||
tt.args.slashing.Header_2.Signature = sk.Sign(sr[:]).Marshal()
|
||||
}
|
||||
if err := blocks.VerifyProposerSlashing(tt.args.beaconState, tt.args.slashing); (err != nil || tt.wantErr != "") && err.Error() != tt.wantErr {
|
||||
t.Errorf("VerifyProposerSlashing() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
@@ -26,20 +26,19 @@ import (
|
||||
// mix = xor(get_randao_mix(state, epoch), hash(body.randao_reveal))
|
||||
// state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR] = mix
|
||||
func ProcessRandao(
|
||||
ctx context.Context,
|
||||
_ context.Context,
|
||||
beaconState *stateTrie.BeaconState,
|
||||
b *ethpb.SignedBeaconBlock,
|
||||
) (*stateTrie.BeaconState, error) {
|
||||
if b.Block == nil || b.Block.Body == nil {
|
||||
return nil, errors.New("block and block body can't be nil")
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body := b.Block.Body
|
||||
buf, proposerPub, domain, err := randaoSigningData(beaconState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := verifySignature(buf, proposerPub[:], body.RandaoReveal, domain); err != nil {
|
||||
if err := verifySignature(buf, proposerPub, body.RandaoReveal, domain); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify block randao")
|
||||
}
|
||||
|
||||
@@ -67,7 +66,7 @@ func ProcessRandaoNoVerify(
|
||||
// If block randao passed verification, we XOR the state's latest randao mix with the block's
|
||||
// randao and update the state's corresponding latest randao mix value.
|
||||
latestMixesLength := params.BeaconConfig().EpochsPerHistoricalVector
|
||||
latestMixSlice, err := beaconState.RandaoMixAtIndex(currentEpoch % latestMixesLength)
|
||||
latestMixSlice, err := beaconState.RandaoMixAtIndex(uint64(currentEpoch % latestMixesLength))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -78,7 +77,7 @@ func ProcessRandaoNoVerify(
|
||||
for i, x := range blockRandaoReveal {
|
||||
latestMixSlice[i] ^= x
|
||||
}
|
||||
if err := beaconState.UpdateRandaoMixesAtIndex(currentEpoch%latestMixesLength, latestMixSlice); err != nil {
|
||||
if err := beaconState.UpdateRandaoMixesAtIndex(uint64(currentEpoch%latestMixesLength), latestMixSlice); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -21,9 +21,9 @@ func TestProcessRandao_IncorrectProposerFailsVerification(t *testing.T) {
|
||||
// We fetch the proposer's index as that is whom the RANDAO will be verified against.
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(beaconState)
|
||||
require.NoError(t, err)
|
||||
epoch := uint64(0)
|
||||
epoch := types.Epoch(0)
|
||||
buf := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(buf, epoch)
|
||||
binary.LittleEndian.PutUint64(buf, uint64(epoch))
|
||||
domain, err := helpers.Domain(beaconState.Fork(), epoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := (&pb.SigningData{ObjectRoot: buf, Domain: domain}).HashTreeRoot()
|
||||
@@ -64,13 +64,7 @@ func TestProcessRandao_SignatureVerifiesAndUpdatesLatestStateMixes(t *testing.T)
|
||||
require.NoError(t, err, "Unexpected error processing block randao")
|
||||
currentEpoch := helpers.CurrentEpoch(beaconState)
|
||||
mix := newState.RandaoMixes()[currentEpoch%params.BeaconConfig().EpochsPerHistoricalVector]
|
||||
|
||||
if bytes.Equal(mix, params.BeaconConfig().ZeroHash[:]) {
|
||||
t.Errorf(
|
||||
"Expected empty signature to be overwritten by randao reveal, received %v",
|
||||
params.BeaconConfig().EmptySignature,
|
||||
)
|
||||
}
|
||||
assert.DeepNotEqual(t, params.BeaconConfig().ZeroHash[:], mix, "Expected empty signature to be overwritten by randao reveal")
|
||||
}
|
||||
|
||||
func TestRandaoSignatureSet_OK(t *testing.T) {
|
||||
@@ -86,7 +80,7 @@ func TestRandaoSignatureSet_OK(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
set, _, err := blocks.RandaoSignatureSet(beaconState, block.Body)
|
||||
set, err := blocks.RandaoSignatureSet(beaconState, block.Body)
|
||||
require.NoError(t, err)
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
// retrieves the signature set from the raw data, public key,signature and domain provided.
|
||||
func retrieveSignatureSet(signedData []byte, pub []byte, signature []byte, domain []byte) (*bls.SignatureSet, error) {
|
||||
func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet, error) {
|
||||
publicKey, err := bls.PublicKeyFromBytes(pub)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert bytes to public key")
|
||||
@@ -36,8 +37,8 @@ func retrieveSignatureSet(signedData []byte, pub []byte, signature []byte, domai
|
||||
}
|
||||
|
||||
// verifies the signature from the raw data, public key and domain provided.
|
||||
func verifySignature(signedData []byte, pub []byte, signature []byte, domain []byte) error {
|
||||
set, err := retrieveSignatureSet(signedData, pub, signature, domain)
|
||||
func verifySignature(signedData, pub, signature, domain []byte) error {
|
||||
set, err := signatureSet(signedData, pub, signature, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -70,7 +71,7 @@ func VerifyBlockSignature(beaconState *stateTrie.BeaconState, block *ethpb.Signe
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return helpers.VerifyBlockSigningRoot(block.Block, proposerPubKey[:], block.Signature, domain)
|
||||
return helpers.VerifyBlockSigningRoot(block.Block, proposerPubKey, block.Signature, domain)
|
||||
}
|
||||
|
||||
// BlockSignatureSet retrieves the block signature set from the provided block and its corresponding state.
|
||||
@@ -85,23 +86,23 @@ func BlockSignatureSet(beaconState *stateTrie.BeaconState, block *ethpb.SignedBe
|
||||
return nil, err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return helpers.RetrieveBlockSignatureSet(block.Block, proposerPubKey, block.Signature, domain)
|
||||
return helpers.BlockSignatureSet(block.Block, proposerPubKey, block.Signature, domain)
|
||||
}
|
||||
|
||||
// RandaoSignatureSet retrieves the relevant randao specific signature set object
|
||||
// from a block and its corresponding state.
|
||||
func RandaoSignatureSet(beaconState *stateTrie.BeaconState,
|
||||
body *ethpb.BeaconBlockBody,
|
||||
) (*bls.SignatureSet, *stateTrie.BeaconState, error) {
|
||||
) (*bls.SignatureSet, error) {
|
||||
buf, proposerPub, domain, err := randaoSigningData(beaconState)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
set, err := retrieveSignatureSet(buf, proposerPub[:], body.RandaoReveal, domain)
|
||||
set, err := signatureSet(buf, proposerPub, body.RandaoReveal, domain)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
return set, beaconState, nil
|
||||
return set, nil
|
||||
}
|
||||
|
||||
// retrieves the randao related signing data from the state.
|
||||
@@ -114,7 +115,7 @@ func randaoSigningData(beaconState *stateTrie.BeaconState) ([]byte, []byte, []by
|
||||
|
||||
currentEpoch := helpers.SlotToEpoch(beaconState.Slot())
|
||||
buf := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(buf, currentEpoch)
|
||||
binary.LittleEndian.PutUint64(buf, uint64(currentEpoch))
|
||||
|
||||
domain, err := helpers.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainRandao, beaconState.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
@@ -138,14 +139,17 @@ func createAttestationSignatureSet(ctx context.Context, beaconState *stateTrie.B
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ia := attestationutil.ConvertToIndexed(ctx, a, c)
|
||||
ia, err := attestationutil.ConvertToIndexed(ctx, a, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := attestationutil.IsValidAttestationIndices(ctx, ia); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices := ia.AttestingIndices
|
||||
pubkeys := make([][]byte, len(indices))
|
||||
for i := 0; i < len(indices); i++ {
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(indices[i])
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(types.ValidatorIndex(indices[i]))
|
||||
pubkeys[i] = pubkeyAtIdx[:]
|
||||
}
|
||||
aggP, err := bls.AggregatePublicKeys(pubkeys)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user