mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
699 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
75521fffbd | ||
|
|
8ba6c84d6b | ||
|
|
6ae829a555 | ||
|
|
89f4053c33 | ||
|
|
3332abbb5a | ||
|
|
76e9111833 | ||
|
|
81c53c26fb | ||
|
|
62aaec1e20 | ||
|
|
cc18b2f4d3 | ||
|
|
a938c305b4 | ||
|
|
b87d0abc6c | ||
|
|
768c2bd812 | ||
|
|
c8b8c6165d | ||
|
|
db866e6580 | ||
|
|
b50f1583f3 | ||
|
|
0be4e6fed8 | ||
|
|
e9bd530221 | ||
|
|
67cf86ad5e | ||
|
|
5a789fca4a | ||
|
|
295b3a74e9 | ||
|
|
d35c5db260 | ||
|
|
9c1e3c260a | ||
|
|
371f808aa4 | ||
|
|
8df65c1bcc | ||
|
|
6b6273fec1 | ||
|
|
6e90931837 | ||
|
|
bf49fa3c26 | ||
|
|
d4e7e15e50 | ||
|
|
7d1633230d | ||
|
|
db26c0d012 | ||
|
|
2b0acffe7f | ||
|
|
485fc538c3 | ||
|
|
be1b90d511 | ||
|
|
8977e5088e | ||
|
|
cc16a10a33 | ||
|
|
103bdfc688 | ||
|
|
ab92326dfb | ||
|
|
da6c270d46 | ||
|
|
86cd873e67 | ||
|
|
49e0ddf861 | ||
|
|
9ca95530fa | ||
|
|
a29032c2bf | ||
|
|
82de66bb90 | ||
|
|
b2b48c2a4d | ||
|
|
5f79abd828 | ||
|
|
1b1e994a80 | ||
|
|
fbc31dc99b | ||
|
|
2785a6d5ee | ||
|
|
2b444ea954 | ||
|
|
ae89cce593 | ||
|
|
749f4b776b | ||
|
|
0481eb4872 | ||
|
|
f1627c0b67 | ||
|
|
3d1b69e945 | ||
|
|
f8c870aa91 | ||
|
|
094f1974be | ||
|
|
c4d47faae5 | ||
|
|
582c382771 | ||
|
|
c5b0b3c326 | ||
|
|
57036d16f9 | ||
|
|
1c4b4c8393 | ||
|
|
0a3825e79e | ||
|
|
acf11262de | ||
|
|
2d98902eed | ||
|
|
ae1e435231 | ||
|
|
d5547355d5 | ||
|
|
544ce2b4ed | ||
|
|
9a0fb5dca1 | ||
|
|
32271aeae1 | ||
|
|
2fefe6d14b | ||
|
|
27bd188ea8 | ||
|
|
c2e7aa7a39 | ||
|
|
be5451abef | ||
|
|
e4dafd8475 | ||
|
|
7b8331c607 | ||
|
|
552baf1c21 | ||
|
|
e37e757226 | ||
|
|
f86b7ac62d | ||
|
|
a440c32155 | ||
|
|
1138c2cb51 | ||
|
|
c9a7a9c709 | ||
|
|
97905c3e79 | ||
|
|
d8e70fe83c | ||
|
|
635e20529a | ||
|
|
053fa5e616 | ||
|
|
5000535907 | ||
|
|
04113baf9d | ||
|
|
1433fab0d4 | ||
|
|
23be8419fe | ||
|
|
2437a0e33c | ||
|
|
e42af4f11d | ||
|
|
a05dca18c7 | ||
|
|
a41ac6b498 | ||
|
|
fb510a3510 | ||
|
|
aedd38092f | ||
|
|
89ef6d6648 | ||
|
|
fef6b95fed | ||
|
|
d4db7a68aa | ||
|
|
921d0a6e7e | ||
|
|
6bf14dedcd | ||
|
|
cde87ae39b | ||
|
|
4130c78be7 | ||
|
|
2d863a1e63 | ||
|
|
a62ac97a35 | ||
|
|
86a8ec035c | ||
|
|
f0944d205d | ||
|
|
b0eccd24a2 | ||
|
|
9d441011d7 | ||
|
|
f63c12b7b2 | ||
|
|
00a5a25323 | ||
|
|
0d1aeeeaf4 | ||
|
|
c5d4d5dfce | ||
|
|
2bd1e54d92 | ||
|
|
9e6b4d1f29 | ||
|
|
1dbb67af81 | ||
|
|
aa07843157 | ||
|
|
707dfca62c | ||
|
|
d4001a8b29 | ||
|
|
964c54f911 | ||
|
|
df80a7d949 | ||
|
|
9bf55e53e7 | ||
|
|
1c4ea5c471 | ||
|
|
1a94ef12b9 | ||
|
|
46ecbdc997 | ||
|
|
384fd5336e | ||
|
|
4e22f52ab3 | ||
|
|
9254ebf3ba | ||
|
|
cbeedeb5a7 | ||
|
|
093c32e229 | ||
|
|
23764c4640 | ||
|
|
750bc83369 | ||
|
|
14d9a83cda | ||
|
|
66dcf2b80d | ||
|
|
91cb081b7e | ||
|
|
73ffde869f | ||
|
|
24cbcc552f | ||
|
|
aa819bf5ba | ||
|
|
273871940c | ||
|
|
20e97bc6c3 | ||
|
|
fddb51fc45 | ||
|
|
50b1d209ab | ||
|
|
fb0e504856 | ||
|
|
58dbdfb6f5 | ||
|
|
a6f6bb12fa | ||
|
|
4bee60826d | ||
|
|
6d2ce49c06 | ||
|
|
7a04ff6368 | ||
|
|
f046c77499 | ||
|
|
f39f4336a0 | ||
|
|
1064f6ebaf | ||
|
|
33b746e025 | ||
|
|
4daf62fc28 | ||
|
|
8bab55d88e | ||
|
|
c632b96454 | ||
|
|
323ee8dfac | ||
|
|
42a2d5c1ee | ||
|
|
d5e02eaa43 | ||
|
|
2d9550e55c | ||
|
|
d9c0e65cef | ||
|
|
f78d6e66b3 | ||
|
|
87f0581742 | ||
|
|
3d37a4e038 | ||
|
|
2a5046fbc9 | ||
|
|
98f3efffea | ||
|
|
628da919a4 | ||
|
|
944d3b16fd | ||
|
|
8d215feb25 | ||
|
|
6a203dce81 | ||
|
|
4f1d2868f8 | ||
|
|
a2a66e7cb7 | ||
|
|
8ece8fb44b | ||
|
|
22ddcb253d | ||
|
|
23c3138c57 | ||
|
|
2dd71c076e | ||
|
|
7c6270143f | ||
|
|
5675038e5d | ||
|
|
571efc11d1 | ||
|
|
1c51b509ad | ||
|
|
0e8828abd3 | ||
|
|
7fe65bb53b | ||
|
|
5a92725329 | ||
|
|
508fac65be | ||
|
|
4c8269aca3 | ||
|
|
00e68c6cc7 | ||
|
|
877f596c54 | ||
|
|
d02e73c5fe | ||
|
|
707a816f2b | ||
|
|
59b4ade50b | ||
|
|
24df2d3e44 | ||
|
|
ee837ecbb9 | ||
|
|
4bd2730c5e | ||
|
|
e1e36e1424 | ||
|
|
14bc8d7637 | ||
|
|
b089cdd216 | ||
|
|
f681bc6867 | ||
|
|
1600217eb1 | ||
|
|
ddf6f7d4d9 | ||
|
|
9dc1674417 | ||
|
|
4a73bc13b5 | ||
|
|
90a02a035b | ||
|
|
6a4b46ab0e | ||
|
|
af1301ddcb | ||
|
|
156e3ca65a | ||
|
|
d7891fca88 | ||
|
|
b8bd28cca2 | ||
|
|
32ffb70a1a | ||
|
|
2690c2080d | ||
|
|
9b008522b8 | ||
|
|
8f0b131631 | ||
|
|
a683f4652f | ||
|
|
5a533f8e4a | ||
|
|
82ac56d0e7 | ||
|
|
73938876b1 | ||
|
|
0a2dfedf0f | ||
|
|
3ef681e649 | ||
|
|
f52bac7d06 | ||
|
|
fb74dae835 | ||
|
|
3a890e70f7 | ||
|
|
ef6f2a196e | ||
|
|
ba4f45b180 | ||
|
|
f6a3fcb778 | ||
|
|
b5984af17c | ||
|
|
5345ddf686 | ||
|
|
8ce96428b1 | ||
|
|
5398faea44 | ||
|
|
6c892dc376 | ||
|
|
7c9ddfeb58 | ||
|
|
3dcaeabb3e | ||
|
|
2335b5eae7 | ||
|
|
e64287773c | ||
|
|
0e329fc115 | ||
|
|
0db690df75 | ||
|
|
41631c2e3a | ||
|
|
07360bcc07 | ||
|
|
cbcbb487ac | ||
|
|
ad47817bcd | ||
|
|
305d0299dd | ||
|
|
5d33514001 | ||
|
|
f5aa25821d | ||
|
|
b8bdf71d5b | ||
|
|
9577e2c123 | ||
|
|
b015dc793a | ||
|
|
4432c88f73 | ||
|
|
b5b10a8d35 | ||
|
|
5294a6c5af | ||
|
|
ab2d4e8ad6 | ||
|
|
64795bd231 | ||
|
|
4e6ed2744d | ||
|
|
41ea8a18a0 | ||
|
|
71098b6ed8 | ||
|
|
b7853f1fa8 | ||
|
|
762f108ea5 | ||
|
|
041735ef54 | ||
|
|
315d4f0549 | ||
|
|
2b2ef4f37c | ||
|
|
fb8d6a4046 | ||
|
|
37596ac188 | ||
|
|
9fcc6fc201 | ||
|
|
c29a7be0ec | ||
|
|
cf2ad1f21c | ||
|
|
a2aa142b90 | ||
|
|
44e5e5de65 | ||
|
|
4bc2d628b1 | ||
|
|
ac176a5078 | ||
|
|
4ffef61e1d | ||
|
|
cba44e5151 | ||
|
|
8179ed57b9 | ||
|
|
e8b6951591 | ||
|
|
33ef5f9150 | ||
|
|
495621e99b | ||
|
|
e1861bdb31 | ||
|
|
f69195f211 | ||
|
|
36e3a9f82a | ||
|
|
6f25e4ce81 | ||
|
|
b919429801 | ||
|
|
26af4496c0 | ||
|
|
8701ccfe87 | ||
|
|
d9664d3b6b | ||
|
|
037c01f4d7 | ||
|
|
9ab08e6998 | ||
|
|
bdb1b472b6 | ||
|
|
0d318b394e | ||
|
|
b9f9cf0b2c | ||
|
|
b1b76ac87c | ||
|
|
b63e938cfb | ||
|
|
31eae719b9 | ||
|
|
b863004b2a | ||
|
|
7eba8da9d2 | ||
|
|
7e7941b0af | ||
|
|
49a529388b | ||
|
|
9683a83750 | ||
|
|
c9f48373cb | ||
|
|
bf07cfcdab | ||
|
|
bef58620fc | ||
|
|
c5b4cf7f7d | ||
|
|
a2685245f2 | ||
|
|
d597410d9b | ||
|
|
86d4eb5868 | ||
|
|
c6236df603 | ||
|
|
9d62e542e5 | ||
|
|
d36061d62f | ||
|
|
8887ccdd51 | ||
|
|
1e086b63e8 | ||
|
|
8c7ef61238 | ||
|
|
0a0d579822 | ||
|
|
91f824fe10 | ||
|
|
bee3aff6c5 | ||
|
|
b04b542e64 | ||
|
|
3e8a94516d | ||
|
|
273b917319 | ||
|
|
e0e3dada7b | ||
|
|
8ff289fe1a | ||
|
|
14ed36a41e | ||
|
|
ccece73483 | ||
|
|
798bbbdc82 | ||
|
|
b4975f2b9d | ||
|
|
1edeb8ec4c | ||
|
|
3708a8f476 | ||
|
|
af07c13730 | ||
|
|
8d234014a4 | ||
|
|
4dad28d1f6 | ||
|
|
5e939378d0 | ||
|
|
d94522510f | ||
|
|
adc27a0bc2 | ||
|
|
a3c3a72e72 | ||
|
|
4235980511 | ||
|
|
fb20fc7881 | ||
|
|
171e5007c5 | ||
|
|
56a395a297 | ||
|
|
b133bced26 | ||
|
|
14c59b2ff9 | ||
|
|
75bce9b7e1 | ||
|
|
c383b6a30c | ||
|
|
75c0b01932 | ||
|
|
b0e6d7215c | ||
|
|
b6e0d700ec | ||
|
|
0a61c379a5 | ||
|
|
6614816061 | ||
|
|
60c048a0ec | ||
|
|
5ec629af71 | ||
|
|
399f704bf5 | ||
|
|
8f342cc5bb | ||
|
|
8ce8717676 | ||
|
|
90b2a880c6 | ||
|
|
d23ba8e69d | ||
|
|
b52f32d17c | ||
|
|
b1a102fd1d | ||
|
|
da630f349f | ||
|
|
c412dde3bd | ||
|
|
250e911faa | ||
|
|
510184c9cc | ||
|
|
b32c19a004 | ||
|
|
34a163b110 | ||
|
|
876e0ea84d | ||
|
|
25dbc5ea85 | ||
|
|
a4ac23160a | ||
|
|
146b611dc8 | ||
|
|
ca2a55874c | ||
|
|
8e2dcb81ae | ||
|
|
f131585041 | ||
|
|
9a6410ec15 | ||
|
|
314bc513af | ||
|
|
95c528f0bc | ||
|
|
205fe1baa5 | ||
|
|
c425bf2c31 | ||
|
|
538babb7e9 | ||
|
|
f0332e1131 | ||
|
|
1f0aad31d2 | ||
|
|
f49469a820 | ||
|
|
d8fd7e502a | ||
|
|
206222c5bc | ||
|
|
816aac82d5 | ||
|
|
9e5864fc61 | ||
|
|
5d7c33a8dc | ||
|
|
d84ae95309 | ||
|
|
e8f030977a | ||
|
|
14f77449ce | ||
|
|
cbb66dab50 | ||
|
|
2ee4f00b81 | ||
|
|
7bb5ac0dde | ||
|
|
9f2c2f0197 | ||
|
|
323bbe10ed | ||
|
|
3a138b9e77 | ||
|
|
ca0f61bf24 | ||
|
|
701c70ae3b | ||
|
|
7beafa159d | ||
|
|
f188609137 | ||
|
|
aca775e405 | ||
|
|
64d0826469 | ||
|
|
a1020585fd | ||
|
|
53d9fca201 | ||
|
|
f99e2bd7c9 | ||
|
|
0b5b3865ef | ||
|
|
5828278807 | ||
|
|
9ad00ffafb | ||
|
|
6bcb68f862 | ||
|
|
045badc5f3 | ||
|
|
919877f301 | ||
|
|
122166b317 | ||
|
|
8870bcea64 | ||
|
|
06c97256bc | ||
|
|
9d15196bed | ||
|
|
111f225177 | ||
|
|
a31057de83 | ||
|
|
5294caf5e8 | ||
|
|
a852d610e2 | ||
|
|
3b422cb9c6 | ||
|
|
b04bfb87a8 | ||
|
|
0353cc533e | ||
|
|
0c0ec97343 | ||
|
|
4484558d87 | ||
|
|
ce65b11801 | ||
|
|
2e8a06d6d4 | ||
|
|
02ca2290e1 | ||
|
|
15f052c48d | ||
|
|
74df2aa0c3 | ||
|
|
22f4807e0b | ||
|
|
7f475bee00 | ||
|
|
ebb0e398d3 | ||
|
|
f342224410 | ||
|
|
c47598514c | ||
|
|
0d64f7b80e | ||
|
|
b59b3ec09c | ||
|
|
8f01b76366 | ||
|
|
4e25f6d78f | ||
|
|
ce28feea45 | ||
|
|
2e352cf5ff | ||
|
|
e0d3e78746 | ||
|
|
bb542d2032 | ||
|
|
36c9a5665d | ||
|
|
83083b9c65 | ||
|
|
c09a6b87c3 | ||
|
|
b91639a32e | ||
|
|
4b17711702 | ||
|
|
de82956088 | ||
|
|
3ca4d6fd91 | ||
|
|
01de412956 | ||
|
|
8fef74ab25 | ||
|
|
bfbff885fe | ||
|
|
b440891aea | ||
|
|
79e57e8e8e | ||
|
|
acb20e269c | ||
|
|
0f123ae562 | ||
|
|
3cb32c3792 | ||
|
|
8fc3c55199 | ||
|
|
e146bc35c0 | ||
|
|
6195a0bfa1 | ||
|
|
e330fa5733 | ||
|
|
1c4b7329f2 | ||
|
|
121a277726 | ||
|
|
900b550864 | ||
|
|
3f0d1c1d41 | ||
|
|
01bbc552cd | ||
|
|
8f967d26d7 | ||
|
|
a7d336a7d0 | ||
|
|
1c8ac6658e | ||
|
|
0b8cbd06b6 | ||
|
|
b7b62e24ad | ||
|
|
e88bbaf614 | ||
|
|
6ac0d12f5b | ||
|
|
4c1ff2a897 | ||
|
|
5f2e0493eb | ||
|
|
16c5d96e6a | ||
|
|
a26ef9b44f | ||
|
|
b8e550b1e9 | ||
|
|
68210eb733 | ||
|
|
78bf39aff7 | ||
|
|
81f868bd48 | ||
|
|
6ec9d7e6e2 | ||
|
|
eb192049b8 | ||
|
|
65ee6eb3af | ||
|
|
c0627e29a8 | ||
|
|
df65a8d118 | ||
|
|
11ac9585ad | ||
|
|
d0bdbe5a33 | ||
|
|
072bb4be27 | ||
|
|
5b7182cf18 | ||
|
|
1eb29a2394 | ||
|
|
8ea586a3e6 | ||
|
|
27319a8990 | ||
|
|
d2186726a3 | ||
|
|
82efca9b6f | ||
|
|
e31792f999 | ||
|
|
4e886a84f9 | ||
|
|
a3ac250ac1 | ||
|
|
655f5830f4 | ||
|
|
856dde497b | ||
|
|
8d8849feed | ||
|
|
fa0ef76561 | ||
|
|
551ed1d335 | ||
|
|
0ab969a87d | ||
|
|
6bd8ae8f67 | ||
|
|
715b9cd5ba | ||
|
|
212f8d6c3f | ||
|
|
22df351e89 | ||
|
|
06950907c8 | ||
|
|
4c62b0410f | ||
|
|
a938274d57 | ||
|
|
dce9c41094 | ||
|
|
bb2d79be85 | ||
|
|
830a0a4bca | ||
|
|
d153abd992 | ||
|
|
3d63bca127 | ||
|
|
e1dfe73525 | ||
|
|
d860dbbb60 | ||
|
|
32c426ed1b | ||
|
|
b3e29399aa | ||
|
|
2ec8a46cb2 | ||
|
|
ccc7d8d7b7 | ||
|
|
4e041c852b | ||
|
|
cb5c920502 | ||
|
|
9ec54ae432 | ||
|
|
dec694916b | ||
|
|
64f7569894 | ||
|
|
7d2bb5878f | ||
|
|
bccd2f95cc | ||
|
|
d97b691f7d | ||
|
|
d59800210a | ||
|
|
7e819990f6 | ||
|
|
d6b311ab84 | ||
|
|
ea09a918d8 | ||
|
|
b1fcaa03ae | ||
|
|
0beb919fc0 | ||
|
|
953c59a302 | ||
|
|
7eca7ba43b | ||
|
|
f019e54ebb | ||
|
|
474fd20123 | ||
|
|
08ac1c3c35 | ||
|
|
b504d3beb8 | ||
|
|
da551f688d | ||
|
|
57d60d681a | ||
|
|
f70d94675b | ||
|
|
3c3e4a2cb5 | ||
|
|
5f4cdd6095 | ||
|
|
63cf0f07a2 | ||
|
|
68f29967f3 | ||
|
|
f72f7677b3 | ||
|
|
3fe0933936 | ||
|
|
ad82e84503 | ||
|
|
fc907261e9 | ||
|
|
96c32c3865 | ||
|
|
8cbd1097d7 | ||
|
|
956b07f5c1 | ||
|
|
4ebe2fb5b5 | ||
|
|
40fca7bb2c | ||
|
|
ed78f1f406 | ||
|
|
b80d9f4f7f | ||
|
|
c1eeeef853 | ||
|
|
88b715d8f6 | ||
|
|
e452b46873 | ||
|
|
59253afb96 | ||
|
|
94a73e847f | ||
|
|
7d47be84ed | ||
|
|
f58afa62af | ||
|
|
9f2543267e | ||
|
|
590aaaf370 | ||
|
|
17576af752 | ||
|
|
9d0e9fa77d | ||
|
|
41e55a6902 | ||
|
|
930e992e85 | ||
|
|
a2caba9956 | ||
|
|
be514076c1 | ||
|
|
5374350a1c | ||
|
|
d42fab070d | ||
|
|
b06876d698 | ||
|
|
6a930ba175 | ||
|
|
1d71398b7c | ||
|
|
8cfbf0309d | ||
|
|
d5dcc25472 | ||
|
|
1b5b8a57e0 | ||
|
|
c8e8e84c60 | ||
|
|
4bb5160817 | ||
|
|
365580706b | ||
|
|
fd22f73d1f | ||
|
|
db5549f143 | ||
|
|
4a422b13f0 | ||
|
|
acf1ebff2d | ||
|
|
4a4316eb95 | ||
|
|
cc696d90e3 | ||
|
|
dfc64121c6 | ||
|
|
e744d1a07e | ||
|
|
d8e24af4c3 | ||
|
|
ddff0f1c51 | ||
|
|
31fd73d173 | ||
|
|
dd18f15cd5 | ||
|
|
7511a497d0 | ||
|
|
18e7ced517 | ||
|
|
b3323bfb57 | ||
|
|
87894eb12f | ||
|
|
f12fdfda0f | ||
|
|
72139c41ea | ||
|
|
7b49697dff | ||
|
|
5853a399f6 | ||
|
|
9ac950f480 | ||
|
|
9bd6147027 | ||
|
|
c944b281c8 | ||
|
|
ebdbc230c3 | ||
|
|
9460232550 | ||
|
|
2c8bddc324 | ||
|
|
cebefde335 | ||
|
|
df84615496 | ||
|
|
132a5f10f2 | ||
|
|
c0752f0de5 | ||
|
|
c30bc3dd97 | ||
|
|
dd131561bf | ||
|
|
3a167e54b5 | ||
|
|
c66186b54a | ||
|
|
663490ee1f | ||
|
|
d48e0925d0 | ||
|
|
6611916689 | ||
|
|
d9ee55013d | ||
|
|
9793de59a6 | ||
|
|
81f777cd46 | ||
|
|
fbac09c1f6 | ||
|
|
78c3166ef2 | ||
|
|
85c5672ab3 | ||
|
|
9e98e914a1 | ||
|
|
71b5d5beec | ||
|
|
55bedd0745 | ||
|
|
932e68571b | ||
|
|
fcc54317a3 | ||
|
|
3871be006c | ||
|
|
9ce5de3d95 | ||
|
|
0aeaed866e | ||
|
|
e71e91c9aa | ||
|
|
2d7dcfae61 | ||
|
|
305fea5bdb | ||
|
|
4fd8ddf8ca | ||
|
|
3bf46ff6e8 | ||
|
|
5e3931dc44 | ||
|
|
b040ac909e | ||
|
|
2617f5c3ac | ||
|
|
3f205e462f | ||
|
|
46f215b673 | ||
|
|
40588021d4 | ||
|
|
632f6797cd | ||
|
|
3bad541f3c | ||
|
|
d43ea74244 | ||
|
|
85ef099360 | ||
|
|
50063912a8 | ||
|
|
becd06553b | ||
|
|
bdf4590b86 | ||
|
|
208c5dfea6 | ||
|
|
25ce3a3676 | ||
|
|
08288f0958 | ||
|
|
72d1fa2899 | ||
|
|
3349fb4cba | ||
|
|
15cac0c0b1 | ||
|
|
b9fe8b172c | ||
|
|
dd734f23c3 | ||
|
|
40fb4b01fa | ||
|
|
d20c3d6cf7 | ||
|
|
15a48dbd75 | ||
|
|
fc4fd7834b | ||
|
|
5e4b9c0909 | ||
|
|
4837629091 | ||
|
|
64ce41f9fc | ||
|
|
56130404fc | ||
|
|
d672a06026 | ||
|
|
c10c45c4b1 | ||
|
|
fd4c7ffc07 | ||
|
|
23880351f8 | ||
|
|
e33a6d8aa5 | ||
|
|
eef35996de | ||
|
|
215e6fc494 | ||
|
|
c9ce8b5246 | ||
|
|
9f7f7d6cff | ||
|
|
cf8e474410 | ||
|
|
678ffa607e | ||
|
|
d34656a76d | ||
|
|
78a76e56fb | ||
|
|
d1fa88ce4b | ||
|
|
39a3689a57 | ||
|
|
94dbac4016 | ||
|
|
fc1fbf8017 | ||
|
|
a4d50f097e | ||
|
|
9a82845c3c | ||
|
|
65f4c78750 | ||
|
|
ed8a88337b | ||
|
|
13e9bb5020 | ||
|
|
991ee7e81b | ||
|
|
ecef1093eb | ||
|
|
c1dfa2677e | ||
|
|
5fc6f2d728 | ||
|
|
729c45df67 | ||
|
|
a4128f691b | ||
|
|
7c47db0015 | ||
|
|
b05f64ff91 | ||
|
|
8a4f322e2c | ||
|
|
104966b63d | ||
|
|
fe3fd57600 | ||
|
|
0bab9f492d | ||
|
|
57495bc8fe | ||
|
|
e5cb1db5bc | ||
|
|
76881fd1ae | ||
|
|
7642f950d8 | ||
|
|
eb626e5834 | ||
|
|
0f0510096e | ||
|
|
1be950f90c |
24
.bazelrc
Normal file
24
.bazelrc
Normal file
@@ -0,0 +1,24 @@
|
||||
# Print warnings for tests with inappropriate test size or timeout.
|
||||
test --test_verbose_timeout_warnings
|
||||
|
||||
# Only build test targets when running bazel test //...
|
||||
test --build_tests_only
|
||||
test --test_output=errors
|
||||
|
||||
# Fix for rules_docker. See: https://github.com/bazelbuild/rules_docker/issues/842
|
||||
build --host_force_python=PY2
|
||||
test --host_force_python=PY2
|
||||
run --host_force_python=PY2
|
||||
|
||||
# Networking is blocked for tests by default, add "requires-network" tag to your test if networking
|
||||
# is required within the sandbox. This flag is no longer experimental after 0.29.0.
|
||||
# Network sandboxing only works on linux.
|
||||
--experimental_sandbox_default_allow_network=false
|
||||
|
||||
# Use minimal protobufs at runtime
|
||||
run --define ssz=minimal
|
||||
|
||||
# Prevent PATH changes from rebuilding when switching from IDE to command line.
|
||||
build --incompatible_strict_action_env
|
||||
test --incompatible_strict_action_env
|
||||
run --incompatible_strict_action_env
|
||||
@@ -2,8 +2,7 @@
|
||||
# across machines, developers, and workspaces.
|
||||
#
|
||||
# This config is loaded from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/latest.bazelrc
|
||||
build:remote-cache --remote_cache=remotebuildexecution.googleapis.com
|
||||
build:remote-cache --tls_enabled=true
|
||||
build:remote-cache --remote_cache=grpcs://remotebuildexecution.googleapis.com
|
||||
build:remote-cache --remote_timeout=3600
|
||||
build:remote-cache --auth_enabled=true
|
||||
build:remote-cache --spawn_strategy=standalone
|
||||
@@ -11,12 +10,26 @@ build:remote-cache --strategy=Javac=standalone
|
||||
build:remote-cache --strategy=Closure=standalone
|
||||
build:remote-cache --strategy=Genrule=standalone
|
||||
|
||||
# Build results backend.
|
||||
build:remote-cache --bes_results_url="https://source.cloud.google.com/results/invocations/"
|
||||
build:remote-cache --bes_backend=buildeventservice.googleapis.com
|
||||
build:remote-cache --bes_timeout=60s
|
||||
build:remote-cache --project_id=prysmaticlabs
|
||||
build:remote-cache --bes_upload_mode=fully_async
|
||||
|
||||
# Prysm specific remote-cache properties.
|
||||
build:remote-cache --disk_cache=
|
||||
build:remote-cache --jobs=50
|
||||
build:remote-cache --host_platform_remote_properties_override='properties:{name:\"cache-silo-key\" value:\"prysm\"}'
|
||||
build:remote-cache --remote_instance_name=projects/prysmaticlabs/instances/default_instance
|
||||
|
||||
build:remote-cache --experimental_remote_download_outputs=minimal
|
||||
build:remote-cache --experimental_inmemory_jdeps_files
|
||||
build:remote-cache --experimental_inmemory_dotd_files
|
||||
|
||||
# Import workspace options.
|
||||
import %workspace%/.bazelrc
|
||||
|
||||
startup --host_jvm_args=-Xmx1000m --host_jvm_args=-Xms1000m
|
||||
build --experimental_strict_action_env
|
||||
build --disk_cache=/tmp/bazelbuilds
|
||||
build --experimental_multi_threaded_digest
|
||||
@@ -28,6 +41,8 @@ build --curses=yes --color=yes
|
||||
build --keep_going
|
||||
build --test_output=errors
|
||||
build --flaky_test_attempts=5
|
||||
build --test_timeout=5,60,-1,-1
|
||||
build --jobs=50
|
||||
build --stamp
|
||||
test --local_test_jobs=2
|
||||
# Disabled race detection due to unstable test results under constrained environment build kite
|
||||
# build --features=race
|
||||
|
||||
@@ -27,3 +27,7 @@ comment:
|
||||
layout: "header, diff"
|
||||
behavior: default
|
||||
require_changes: no
|
||||
|
||||
ignore:
|
||||
- "**/*.pb.go"
|
||||
- "**/*_mock.go"
|
||||
|
||||
2
.dockerignore
Normal file
2
.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
||||
bazel-*
|
||||
.git
|
||||
1
.github/FUNDING.yml
vendored
Normal file
1
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
custom: https://gitcoin.co/grants/24/prysm-by-prysmatic-labs
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -21,4 +21,8 @@ yarn-error.log
|
||||
.vscode/
|
||||
|
||||
# Ignore password file
|
||||
password.txt
|
||||
password.txt
|
||||
|
||||
# go dependancy
|
||||
/go.mod
|
||||
/go.sum
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"extends": "solium:recommended",
|
||||
"plugins": [
|
||||
"security"
|
||||
],
|
||||
"rules": {
|
||||
"quotes": [
|
||||
"error",
|
||||
"double"
|
||||
],
|
||||
"security/no-inline-assembly": ["warning"],
|
||||
|
||||
"indentation": [
|
||||
"error",
|
||||
4
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@ matrix:
|
||||
- go get ${gobuild_args} -t ./...
|
||||
- go get ${gobuild_args} github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
script:
|
||||
- golangci-lint run
|
||||
- golangci-lint run --skip-dirs ./proto
|
||||
email: false
|
||||
after_success:
|
||||
- wget https://raw.githubusercontent.com/k3rn31p4nic/travis-ci-discord-webhook/master/send.sh
|
||||
|
||||
26
.well-known/security.txt
Normal file
26
.well-known/security.txt
Normal file
@@ -0,0 +1,26 @@
|
||||
-----BEGIN PGP SIGNED MESSAGE-----
|
||||
Hash: SHA512
|
||||
|
||||
Contact: mailto:security@prysmaticlabs.com
|
||||
Encryption: openpgp4fpr:0AE0051D647BA3C1A917AF4072E33E4DF1A5036E
|
||||
Encryption: openpgp4fpr:341396BAFACC28C5082327F889725027FC8EC0D4
|
||||
Encryption: openpgp4fpr:8B7814F1B221A8E8AA465FC7BDBF744ADE1A0033
|
||||
Preferred-Languages: en
|
||||
Canonical: https://github.com/prysmaticlabs/prysm/tree/master/.well-known/security.txt
|
||||
|
||||
-----BEGIN PGP SIGNATURE-----
|
||||
|
||||
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAlzi0WgACgkQcuM+TfGl
|
||||
A241pw/+Ks3Hxx8eGbjRIeuncuK811FkCiofNJS+MY2p4W2/tIrk48DtLRx8/k5L
|
||||
Dh1QyypZsqUgofrK7PbGVdEin6oEb2jYbTWUarAVTbhlsUdM4YcxwpgmGVslW7+C
|
||||
Hm8wMasQZhCkFfakzhfKX5hIQoFaFI/OvtVKIQsodP8dAieCDaGmtfq1Bs1LgFqi
|
||||
KrpeEdC2XbBQs33ADheC5SdGT1mnatP3VX8cOhLsfoPksYgTSpwK0clkoWs1eZOQ
|
||||
l1ImfW/FJCpSndBWgBR503ZgaU3Ic+5qxmAIuUP4chl0DFRMlPFEM5OWC6JkkCOd
|
||||
5kKrXGRmrhgtQg+pA3zqJnFItRj7gxPBA/ypxCkKPrLEkRvbdpdZEl5vAlYkeBL6
|
||||
iKSLHnMswGKldiYxy7ofam5bM3myhYYNFb25boV5pRptrnoUmWOACHioBGQHwWNt
|
||||
B0XktD0j7+pCCiJyyYxmOnElsk/Y/u4Tv5pYWvfFuxTF2XOg+P/EH64AIFLWgB1U
|
||||
VnITxhakxqejCBxZkuVCFNSzt+TXG0NS9EIj/UOYBY+wxrBZ62ITjdA16RS/3n3z
|
||||
DuIDtxOOwUumbOO32+a5zIb+ARmnocYJviI7FuENb01/U6qb+nm9hQI6oIpSCNsv
|
||||
Pb4O/ZlOx70U/7mt4Xn/dTKH9bnKOOVhOw00KJWFfAce73AVnLA=
|
||||
=Uhqg
|
||||
-----END PGP SIGNATURE-----
|
||||
64
BUILD.bazel
64
BUILD.bazel
@@ -3,10 +3,14 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
|
||||
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
|
||||
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "nogo")
|
||||
load("@graknlabs_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
|
||||
load("//tools:binary_targets.bzl", "binary_targets", "determine_targets")
|
||||
|
||||
prefix = "github.com/prysmaticlabs/prysm"
|
||||
|
||||
exports_files(["genesis.json"])
|
||||
exports_files([
|
||||
"LICENSE.md",
|
||||
])
|
||||
|
||||
# gazelle:prefix github.com/prysmaticlabs/prysm
|
||||
gazelle(
|
||||
@@ -32,6 +36,24 @@ alias(
|
||||
],
|
||||
)
|
||||
|
||||
# Protobuf gRPC compiler without gogoproto. Required for gRPC gateway.
|
||||
alias(
|
||||
name = "grpc_nogogo_proto_compiler",
|
||||
actual = "@io_bazel_rules_go//proto:go_grpc",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
)
|
||||
|
||||
# Protobuf gRPC gateway compiler
|
||||
alias(
|
||||
name = "grpc_gateway_proto_compiler",
|
||||
actual = "@grpc_ecosystem_grpc_gateway//protoc-gen-grpc-gateway:go_gen_grpc_gateway",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
)
|
||||
|
||||
gometalinter(
|
||||
name = "gometalinter",
|
||||
config = "//:.gometalinter.json",
|
||||
@@ -44,8 +66,8 @@ gometalinter(
|
||||
goimports(
|
||||
name = "goimports",
|
||||
display_diffs = True,
|
||||
write = False,
|
||||
prefix = prefix,
|
||||
write = False,
|
||||
)
|
||||
|
||||
workspace_binary(
|
||||
@@ -55,6 +77,8 @@ workspace_binary(
|
||||
|
||||
nogo(
|
||||
name = "nogo",
|
||||
config = "nogo_config.json",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@org_golang_x_tools//go/analysis/passes/unsafeptr:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/unreachable:go_tool_library",
|
||||
@@ -68,7 +92,8 @@ nogo(
|
||||
"@org_golang_x_tools//go/analysis/passes/pkgfact:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/nilness:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/nilfunc:go_tool_library",
|
||||
# "@org_golang_x_tools//go/analysis/passes/lostcancel:go_tool_library",
|
||||
# lost cancel ignore doesn't seem to work when running with coverage
|
||||
#"@org_golang_x_tools//go/analysis/passes/lostcancel:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/loopclosure:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/httpresponse:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/findcall:go_tool_library",
|
||||
@@ -86,6 +111,35 @@ nogo(
|
||||
"@org_golang_x_tools//go/analysis/passes/inspect:go_tool_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_tool_library",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
config = "nogo_config.json",
|
||||
)
|
||||
|
||||
assemble_versioned(
|
||||
name = "assemble-versioned-all",
|
||||
tags = ["manual"],
|
||||
targets = [
|
||||
":assemble-{}-{}-targz".format(
|
||||
pair[0],
|
||||
pair[1],
|
||||
)
|
||||
for pair in binary_targets
|
||||
],
|
||||
version_file = "//:VERSION",
|
||||
)
|
||||
|
||||
common_files = {
|
||||
"//:LICENSE.md": "LICENSE.md",
|
||||
"//:README.md": "README.md",
|
||||
}
|
||||
|
||||
[assemble_targz(
|
||||
name = "assemble-{}-{}-targz".format(
|
||||
pair[0],
|
||||
pair[1],
|
||||
),
|
||||
additional_files = determine_targets(pair, common_files),
|
||||
output_filename = "prysm-{}-{}".format(
|
||||
pair[0],
|
||||
pair[1],
|
||||
),
|
||||
tags = ["manual"],
|
||||
) for pair in binary_targets]
|
||||
|
||||
@@ -63,7 +63,7 @@ $ go test <file_you_are_working_on>
|
||||
Changes that affect multiple files can be tested with ...
|
||||
|
||||
```
|
||||
$ gometalinter && bazel test
|
||||
$ golangci-lint run && bazel test //...
|
||||
```
|
||||
|
||||
**10. Stage the file or files that you want to commit.**
|
||||
@@ -88,10 +88,10 @@ You can use the –amend flag to include previous commits that have not yet been
|
||||
$ git fetch prysm
|
||||
```
|
||||
|
||||
**13. Rebase your branch atop of the latest version of Prysm.**
|
||||
**13. Pull latest version of Prysm.**
|
||||
|
||||
```
|
||||
$ git rebase prysm/master
|
||||
$ git pull origin master
|
||||
```
|
||||
|
||||
If there are conflicts between your edits and those made by others since you started work Git will ask you to resolve them. To find out which files have conflicts run ...
|
||||
@@ -115,10 +115,10 @@ The code from the Prysm repo is inserted between <<< and === while the change yo
|
||||
|
||||
**14. Push your changes to your fork of the Prysm repo.**
|
||||
|
||||
Rebasing a pull request changes the history on your branch, so Git will reject a normal git push after a rebase. Use a force push to move your changes to your fork of the repo.
|
||||
Use git push to move your changes to your fork of the repo.
|
||||
|
||||
```
|
||||
$ git push myrepo feature-in-progress-branch -f
|
||||
$ git push myrepo feature-in-progress-branch
|
||||
```
|
||||
|
||||
**15. Check to be sure your fork of the Prysm repo contains your feature branch with the latest edits.**
|
||||
|
||||
93
INTEROP.md
Normal file
93
INTEROP.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# Prysm Client Interoperability Guide
|
||||
|
||||
This README details how to setup Prysm for interop testing for usage with other Ethereum 2.0 clients.
|
||||
|
||||
## Installation & Setup
|
||||
|
||||
1. Install [Bazel](https://docs.bazel.build/versions/master/install.html) **(Recommended)**
|
||||
2. `git clone https://github.com/prysmaticlabs/prysm && cd prysm`
|
||||
3. `bazel build //...`
|
||||
|
||||
## Starting from Genesis
|
||||
|
||||
Prysm supports a few ways to quickly launch a beacon node from basic configurations:
|
||||
|
||||
- `NumValidators + GenesisTime`: Launches a beacon node by deterministically generating a state from a num-validators flag along with a genesis time **(Recommended)**
|
||||
- `SSZ Genesis`: Launches a beacon node from a .ssz file containing a SSZ-encoded, genesis beacon state
|
||||
|
||||
## Generating a Genesis State
|
||||
|
||||
To setup the necessary files for these quick starts, Prysm provides a tool to generate a `genesis.ssz` from
|
||||
a deterministically generated set of validator private keys following the official interop YAML format
|
||||
[here](https://github.com/ethereum/eth2.0-pm/blob/master/interop/mocked_start).
|
||||
|
||||
You can use `bazel run //tools/genesis-state-gen` to create a deterministic genesis state for interop.
|
||||
|
||||
### Usage
|
||||
|
||||
- **--genesis-time** uint: Unix timestamp used as the genesis time in the generated genesis state (defaults to now)
|
||||
- **--mainnet-config** bool: Select whether genesis state should be generated with mainnet or minimal (default) params
|
||||
- **--num-validators** int: Number of validators to deterministically include in the generated genesis state
|
||||
- **--output-ssz** string: Output filename of the SSZ marshaling of the generated genesis state
|
||||
|
||||
The example below creates 64 validator keys, instantiates a genesis state with those 64 validators and with genesis unix timestamp 1567542540,
|
||||
and finally writes a ssz encoded output to ~/Desktop/genesis.ssz. This file can be used to kickstart the beacon chain in the next section.
|
||||
|
||||
```
|
||||
bazel run //tools/genesis-state-gen -- --output-ssz ~/Desktop/genesis.ssz --num-validators 64 --genesis-time 1567542540
|
||||
```
|
||||
|
||||
## Launching a Beacon Node + Validator Client
|
||||
|
||||
### Launching from Pure CLI Flags
|
||||
|
||||
Open up two terminal windows, run:
|
||||
|
||||
```
|
||||
bazel run //beacon-chain -- \
|
||||
--no-genesis-delay \
|
||||
--bootstrap-node= \
|
||||
--deposit-contract 0xD775140349E6A5D12524C6ccc3d6A1d4519D4029 \
|
||||
--clear-db \
|
||||
--interop-num-validators 64 \
|
||||
--interop-eth1data-votes
|
||||
```
|
||||
|
||||
This will deterministically generate a beacon genesis state and start
|
||||
the system with 64 validators and the genesis time set to the current unix timestamp.
|
||||
Wait a bit until your beacon chain starts, and in the other window:
|
||||
|
||||
```
|
||||
bazel run //validator -- --interop-num-validators 64
|
||||
```
|
||||
|
||||
This will launch and kickstart the system with your 64 validators performing their duties accordingly.
|
||||
specify which keys
|
||||
|
||||
### Launching from `genesis.ssz`
|
||||
|
||||
Assuming you generated a `genesis.ssz` file with 64 validators, open up two terminal windows, run:
|
||||
|
||||
```
|
||||
bazel run //beacon-chain -- \
|
||||
--no-genesis-delay \
|
||||
--bootstrap-node= \
|
||||
--deposit-contract 0xD775140349E6A5D12524C6ccc3d6A1d4519D4029 \
|
||||
--clear-db \
|
||||
--interop-genesis-state /path/to/genesis.ssz \
|
||||
--interop-eth1data-votes
|
||||
```
|
||||
|
||||
Wait a bit until your beacon chain starts, and in the other window:
|
||||
|
||||
```
|
||||
bazel run //validator -- --interop-num-validators 64
|
||||
```
|
||||
|
||||
This will launch and kickstart the system with your 64 validators performing their duties accordingly.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
212
README.md
212
README.md
@@ -1,122 +1,206 @@
|
||||
# Prysmatic Labs Ethereum Serenity Implementation
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
|
||||
This is the main repository for the Go implementation of the Ethereum 2.0 Serenity [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
|
||||
Before you begin, check out our [official documentation portal](https://prysmaticlabs.gitbook.io/prysm/) and join our active chat room on Discord or Gitter below:
|
||||
# Prysm: Ethereum 'Serenity' 2.0 Go Implementation
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://github.com/ethereum/eth2.0-specs/commit/452ecf8e27c7852c7854597f2b1bb4a62b80c7ec)
|
||||
[](https://discord.gg/KSA7rPr)
|
||||
[](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
|
||||
Also, read our [Roadmap Reference Implementation Doc](https://github.com/prysmaticlabs/prysm/blob/master/docs/ROADMAP.md). This doc provides a background on the milestones we aim for the project to achieve.
|
||||
This is the Core repository for Prysm, [Prysmatic Labs](https://prysmaticlabs.com)' [Go](https://golang.org/) implementation of the Ethereum protocol 2.0 (Serenity).
|
||||
|
||||
### Need assistance?
|
||||
A more detailed set of installation and usage instructions as well as explanations of each component are available on our [official documentation portal](https://prysmaticlabs.gitbook.io/prysm/). If you still have questions, feel free to stop by either our [Discord](https://discord.gg/KSA7rPr) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) and a member of the team or our community will be happy to assist you.
|
||||
|
||||
**Interested in what's next?** Be sure to read our [Roadmap Reference Implementation](https://github.com/prysmaticlabs/prysm/blob/master/docs/ROADMAP.md) document. This page outlines the basics of sharding as well as the various short-term milestones that we hope to achieve over the coming year.
|
||||
|
||||
### Come join the testnet!
|
||||
Participation is now open to the public in our testnet release for Ethereum 2.0 phase 0. Visit [prylabs.net](https://prylabs.net) for more information on the project itself or to sign up as a validator on the network.
|
||||
|
||||
# Table of Contents
|
||||
|
||||
- [Join Our Testnet](#join-our-testnet)
|
||||
- [Dependencies](#dependencies)
|
||||
- [Installation](#installation)
|
||||
- [Run Via Docker](#run-via-docker-recommended)
|
||||
- [Run Via Bazel](#run-via-bazel)
|
||||
- [Prysm Main Components](#prysm-main-components)
|
||||
- [Running an Ethereum 2.0 Beacon Node](#running-an-ethereum-20-beacon-node)
|
||||
- [Staking ETH: Running a Validator Client](#staking-eth-running-a-validator-client)
|
||||
- [Testing](#testing)
|
||||
- [Build Via Docker](#build-via-docker)
|
||||
- [Build Via Bazel](#build-via-bazel)
|
||||
- [Running an Ethereum 2.0 Beacon Node](#running-an-ethereum-20-beacon-node)
|
||||
- [Staking ETH: Running a Validator Client](#staking-eth-running-a-validator-client)
|
||||
- [Testing Prysm](#testing-prysm)
|
||||
- [Contributing](#contributing)
|
||||
- [License](#license)
|
||||
|
||||
# Join Our Testnet
|
||||
## Dependencies
|
||||
Prysm can be installed either with Docker **(recommended method)** or using our build tool, Bazel. The below instructions include sections for performing both.
|
||||
|
||||
You can now participate in our public testnet release for Ethereum 2.0 phase 0. Visit [prylabs.net](https://prylabs.net) 💎 to participate!
|
||||
**For Docker installations:**
|
||||
- The latest release of [Docker](https://docs.docker.com/install/)
|
||||
|
||||
# Installing Prysm
|
||||
**For Bazel installations:**
|
||||
- The latest release of [Bazel](https://docs.bazel.build/versions/master/install.html)
|
||||
- A modern UNIX operating system (MacOS included)
|
||||
|
||||
### Installation Options
|
||||
You can either choose to run our system via:
|
||||
- Our latest [release](https://github.com/prysmaticlabs/prysm/releases) **(Easiest)**
|
||||
- Using Docker **(Recommended)**
|
||||
- Using Our Build Tool, Bazel
|
||||
|
||||
### Fetching via Docker (Recommended)
|
||||
Docker is a convenient way to run Prysm, as all you need to do is fetch the latest images:
|
||||
## Installation
|
||||
|
||||
### Build via Docker
|
||||
1. Ensure you are running the most recent version of Docker by issuing the command:
|
||||
```
|
||||
docker -v
|
||||
```
|
||||
2. To pull the Prysm images from the server, issue the following commands:
|
||||
```
|
||||
docker pull gcr.io/prysmaticlabs/prysm/validator:latest
|
||||
docker pull gcr.io/prysmaticlabs/prysm/beacon-chain:latest
|
||||
```
|
||||
This process will also install any related dependencies.
|
||||
|
||||
### Build Via Bazel
|
||||
First, clone our repository:
|
||||
|
||||
```
|
||||
git clone https://github.com/prysmaticlabs/prysm
|
||||
```
|
||||
|
||||
Download the Bazel build tool by Google here and ensure it works by typing:
|
||||
### Build via Bazel
|
||||
|
||||
1. Open a terminal window. Ensure you are running the most recent version of Bazel by issuing the command:
|
||||
```
|
||||
bazel version
|
||||
```
|
||||
|
||||
Bazel manages all of the dependencies for you (including go and necessary compilers) so you are all set to build prysm. Then, build both parts of our system: a beacon chain node implementation, and a validator client:
|
||||
|
||||
2. Clone this repository and enter the directory:
|
||||
```
|
||||
git clone https://github.com/prysmaticlabs/prysm
|
||||
cd prysm
|
||||
```
|
||||
3. Build both the beacon chain node implementation and the validator client:
|
||||
```
|
||||
bazel build //beacon-chain:beacon-chain
|
||||
bazel build //validator:validator
|
||||
```
|
||||
Bazel will automatically pull and install any dependencies as well, including Go and necessary compilers.
|
||||
|
||||
# Prysm Main Components
|
||||
Prysm ships with two important components: a beacon node and a validator client. The beacon node is the server that performs the heavy lifting of Ethereum 2.0., A validator client is another piece of software that securely connects to the beacon node and allows you to stake 3.2 Goerli ETH in order to secure the network. You'll be mostly interacting with the validator client to manage your stake.
|
||||
Another critical component of Ethereum 2.0 is the Validator Deposit Contract, which is a smart contract deployed on the Ethereum 1.0 chain which can be used for current holders of ETH to do a one-way transfer into Ethereum 2.0.
|
||||
|
||||
### Running an Ethereum 2.0 Beacon Node
|
||||
With docker:
|
||||
4. Build the configuration for the Prysm testnet by issuing the commands:
|
||||
|
||||
```
|
||||
docker run -v /tmp/prysm-data:/data -p 4000:4000 \
|
||||
bazel build --define ssz=minimal //beacon-chain:beacon-chain
|
||||
bazel build --define ssz=minimal //validator:validator
|
||||
```
|
||||
|
||||
The binaries will be built in an architecture-dependent subdirectory of `bazel-bin`, and are supplied as part of Bazel's build process. To fetch the location, issue the command:
|
||||
|
||||
```
|
||||
$ bazel build --define ssz=minimal //beacon-chain:beacon-chain
|
||||
...
|
||||
Target //beacon-chain:beacon-chain up-to-date:
|
||||
bazel-bin/beacon-chain/linux_amd64_stripped/beacon-chain
|
||||
...
|
||||
```
|
||||
|
||||
In the example above, the beacon chain binary has been created in `bazel-bin/beacon-chain/linux_amd64_stripped/beacon-chain`.
|
||||
|
||||
## Running an Ethereum 2.0 Beacon Node
|
||||
To understand the role that both the beacon node and validator play in Prysm, see [this section of our documentation](https://prysmaticlabs.gitbook.io/prysm/how-prysm-works/overview-technical).
|
||||
|
||||
### Running via Docker
|
||||
|
||||
**Docker on Linux/Mac:**
|
||||
|
||||
To start your beacon node, issue the following command:
|
||||
|
||||
```
|
||||
docker run -v $HOME/prysm-data:/data -p 4000:4000 \
|
||||
--name beacon-node \
|
||||
gcr.io/prysmaticlabs/prysm/beacon-chain:latest \
|
||||
--no-genesis-delay \
|
||||
--datadir=/data
|
||||
--clear-db
|
||||
```
|
||||
|
||||
To start your beacon node with bazel:
|
||||
(Optional) If you want to enable gRPC, then run this command instead of the one above:
|
||||
|
||||
```
|
||||
bazel run //beacon-chain -- --clear-db --datadir=/tmp/prysm-data
|
||||
docker run -v $HOME/prysm-data:/data -p 4000:4000 -p 7000:7000 \
|
||||
--name beacon-node \
|
||||
gcr.io/prysmaticlabs/prysm/beacon-chain:latest \
|
||||
--datadir=/data \
|
||||
--no-genesis-delay \
|
||||
--grpc-gateway-port=7000
|
||||
```
|
||||
|
||||
This will sync you up with the latest head block in the network, and then you'll have a ready beacon node.
|
||||
The chain will then be waiting for you to deposit 3.2 Goerli ETH into the Validator Deposit Contract before your validator can become active! Now, you'll need to create a validator client to connect to this node and stake 3.2 Goerli ETH to participate as a validator in Ethereum 2.0's Proof of Stake system.
|
||||
You can stop the beacon node using `Ctrl+c` or with the following command:
|
||||
=======
|
||||
|
||||
### Staking ETH: Running a Validator Client
|
||||
Once your beacon node is up, you'll need to attach a validator client as a separate process. Each validator represents 3.2 Goerli ETH being staked in the system, so you can spin up as many as you want to have more at stake in the network
|
||||
```
|
||||
docker stop beacon-node
|
||||
```
|
||||
|
||||
**Activating Your Validator: Depositing 3.2 Goerli ETH**
|
||||
To restart the beacon node, issue the command:
|
||||
|
||||
Using your validator deposit data from the previous step, use the instructions in https://alpha.prylabs.net/participate to deposit.
|
||||
```
|
||||
docker start -ai beacon-node
|
||||
```
|
||||
|
||||
It'll take a while for the nodes in the network to process your deposit, but once you're active, your validator will begin doing its responsibility! In your validator client, you'll be able to frequently see your validator balance as it goes up. If you ever go offline for a while, you'll start gradually losing your deposit until you get kicked out of the system. Congratulations, you are now running Ethereum 2.0 Phase 0 :).
|
||||
To delete a corrupted container, issue the command:
|
||||
|
||||
# Testing
|
||||
```
|
||||
docker rm beacon-node
|
||||
```
|
||||
|
||||
To run the unit tests of our system do:
|
||||
To recreate a deleted container and refresh the chain database, issue the start command with an additional `--force-clear-db` parameter:
|
||||
|
||||
```
|
||||
docker run -it -v $HOME/prysm-data:/data -p 4000:4000 --name beacon-node \
|
||||
gcr.io/prysmaticlabs/prysm/beacon-chain:latest \
|
||||
--datadir=/data \
|
||||
--force-clear-db
|
||||
```
|
||||
|
||||
**Docker on Windows:**
|
||||
|
||||
1) You will need to share the local drive you wish to mount to to container (e.g. C:).
|
||||
1. Enter Docker settings (right click the tray icon)
|
||||
2. Click 'Shared Drives'
|
||||
3. Select a drive to share
|
||||
4. Click 'Apply'
|
||||
|
||||
2) You will next need to create a directory named ```/tmp/prysm-data/``` within your selected shared Drive. This folder will be used as a local data directory for Beacon Node chain data as well as account and keystore information required by the validator. Docker will **not** create this directory if it does not exist already. For the purposes of these instructions, it is assumed that ```C:``` is your prior-selected shared Drive.
|
||||
|
||||
4) To run the beacon node, issue the command:
|
||||
```
|
||||
docker run -it -v c:/tmp/prysm-data:/data -p 4000:4000 gcr.io/prysmaticlabs/prysm/beacon-chain:latest --datadir=/data
|
||||
```
|
||||
|
||||
### Running via Bazel
|
||||
|
||||
1) To start your Beacon Node with Bazel, issue the command:
|
||||
```
|
||||
bazel run //beacon-chain -- --datadir=/tmp/prysm-data
|
||||
```
|
||||
This will sync up the Beacon Node with the latest head block in the network. Note that the beacon node must be **completely synced** before attempting to initialise a validator client, otherwise the validator will not be able to complete the deposit and funds will be lost.
|
||||
|
||||
|
||||
## Staking ETH: Running a Validator Client
|
||||
|
||||
Once your beacon node is up, the chain will be waiting for you to deposit 3.2 Goerli ETH into the Validator Deposit Contract to activate your validator (discussed in the section below). First though, you will need to create a validator client to connect to this node in order to stake and participate. Each validator represents 3.2 Goerli ETH being staked in the system, and it is possible to spin up as many as you desire in order to have more stake in the network.
|
||||
|
||||
### Activating Your Validator: Depositing 3.2 Goerli ETH
|
||||
|
||||
Using your validator deposit data from the previous step, follow the instructions found on https://prylabs.net/participate to make a deposit.
|
||||
|
||||
It will take a while for the nodes in the network to process your deposit, but once your node is active, the validator will begin doing its responsibility. In your validator client, you will be able to frequently see your validator balance as it goes up over time. Note that, should your node ever go offline for a long period, you'll start gradually losing your deposit until you are removed from the system.
|
||||
|
||||
### Starting the validator with Bazel
|
||||
|
||||
1. Open another terminal window. Enter your Prysm directory and run the validator by issuing the following command:
|
||||
```
|
||||
cd prysm
|
||||
bazel run //validator
|
||||
```
|
||||
**Congratulations, you are now running Ethereum 2.0 Phase 0!**
|
||||
|
||||
## Testing Prysm
|
||||
|
||||
To run the unit tests of our system, issue the command:
|
||||
```
|
||||
bazel test //...
|
||||
```
|
||||
|
||||
To run our linter, make sure you have [golangci-lint](https://https://github.com/golangci/golangci-lint) installed and then run:
|
||||
|
||||
To run the linter, make sure you have [golangci-lint](https://github.com/golangci/golangci-lint) installed and then issue the command:
|
||||
```
|
||||
golangci-lint run
|
||||
```
|
||||
|
||||
# Contributing
|
||||
|
||||
## Contributing
|
||||
We have put all of our contribution guidelines into [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/master/CONTRIBUTING.md)! Check it out to get started.
|
||||
|
||||

|
||||
|
||||
# License
|
||||
|
||||
## License
|
||||
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||
|
||||
43
TESTNET.md
Normal file
43
TESTNET.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Testnet
|
||||
|
||||
The Prysmatic Labs test network is available for anyone to join. The easiest way to participate is by joining through the website, https://prylabs.net.
|
||||
|
||||
## Interop
|
||||
|
||||
For developers looking to connect a client other than Prysm to the test network, here is the relevant information for compatability.
|
||||
|
||||
|
||||
**Spec version** - [v0.8.3](https://github.com/ethereum/eth2.0-specs/tree/v0.8.3)
|
||||
|
||||
**ETH 1 Deposit Contract Address** - See https://prylabs.net/contract. This contract is deployed on the [goerli](https://goerli.net/) network.
|
||||
|
||||
**Genesis time** - The ETH1 block time in which the 64th deposit to start ETH2 was included. This is NOT midnight of the next day as required by spec.
|
||||
|
||||
### ETH 2 Configuration
|
||||
|
||||
Use the [minimal config](https://github.com/ethereum/eth2.0-specs/blob/v0.8.3/configs/minimal.yaml) with the following changes.
|
||||
|
||||
| field | value |
|
||||
|-------|-------|
|
||||
| MIN_DEPOSIT_AMOUNT | 100 |
|
||||
| MAX_EFFECTIVE_BALANCE | 3.2 * 1e9 |
|
||||
| EJECTION_BALANCE | 1.6 * 1e9 |
|
||||
| EFFECTIVE_BALANCE_INCREMENT | 0.1 * 1e9 |
|
||||
| ETH1_FOLLOW_DISTANCE | 16 |
|
||||
| GENESIS_FORK_VERSION | See [latest code](https://github.com/prysmaticlabs/prysm/blob/master/shared/params/config.go#L236) |
|
||||
|
||||
These parameters reduce the minimal config to 1/10 of the required ETH.
|
||||
|
||||
We have a genesis.ssz file available for download [here](https://prysmaticlabs.com/uploads/genesis.ssz)
|
||||
|
||||
### Connecting to the network
|
||||
|
||||
We have a libp2p bootstrap node available at `/dns4/prylabs.net/tcp/30001/p2p/16Uiu2HAm7Qwe19vz9WzD2Mxn7fXd1vgHHp4iccuyq7TxwRXoAGfc`.
|
||||
|
||||
Some of the Prysmatic Labs hosted nodes are behind a libp2p relay, so your libp2p implementation protocol should understand this functionality.
|
||||
|
||||
### Other
|
||||
|
||||
Undoubtably, you will have bugs. Reach out to us on [Discord](https://discord.gg/KSA7rPr) and be sure to capture issues on Github at https://github.com/prysmaticlabs/prysm/issues.
|
||||
|
||||
If you have instructions for you client, we would love to attempt this on your behalf. Kindly send over the instructions via github issue, PR, email to team@prysmaticlabs.com, or discord.
|
||||
@@ -1,6 +1,8 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
|
||||
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_push")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle")
|
||||
load("//tools:binary_targets.bzl", "binary_targets")
|
||||
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@@ -11,15 +13,20 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/node:go_default_library",
|
||||
"//beacon-chain/utils:go_default_library",
|
||||
"//shared/cmd:go_default_library",
|
||||
"//shared/debug:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/logutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ipfs_go_log//:go_default_library",
|
||||
"@com_github_joonix_log//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli//:go_default_library",
|
||||
"@com_github_whyrusleeping_go_logging//:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
"@org_uber_go_automaxprocs//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -37,27 +44,36 @@ go_image(
|
||||
tags = ["manual"],
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/node:go_default_library",
|
||||
"//beacon-chain/utils:go_default_library",
|
||||
"//shared/cmd:go_default_library",
|
||||
"//shared/debug:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/logutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ipfs_go_log//:go_default_library",
|
||||
"@com_github_joonix_log//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli//:go_default_library",
|
||||
"@com_github_whyrusleeping_go_logging//:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
"@org_uber_go_automaxprocs//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
container_push(
|
||||
name = "push_image",
|
||||
format = "Docker",
|
||||
image = ":image",
|
||||
registry = "gcr.io",
|
||||
repository = "prysmaticlabs/prysm/beacon-chain",
|
||||
tag = "latest",
|
||||
container_bundle(
|
||||
name = "image_bundle",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest": ":image",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}": ":image",
|
||||
},
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images",
|
||||
bundle = ":image_bundle",
|
||||
tags = ["manual"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
@@ -65,3 +81,23 @@ go_binary(
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["usage_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["@com_github_urfave_cli//:go_default_library"],
|
||||
)
|
||||
|
||||
[go_binary(
|
||||
name = "beacon-chain-{}-{}".format(
|
||||
pair[0],
|
||||
pair[1],
|
||||
),
|
||||
embed = [":go_default_library"],
|
||||
goarch = pair[1],
|
||||
goos = pair[0],
|
||||
tags = ["manual"],
|
||||
visibility = ["//visibility:public"],
|
||||
) for pair in binary_targets]
|
||||
|
||||
@@ -2,27 +2,19 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"service.go",
|
||||
"vote_metrics.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/attestation",
|
||||
srcs = ["service.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/archiver",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bitutil:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/messagehandler:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -32,13 +24,16 @@ go_test(
|
||||
srcs = ["service_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/internal:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
169
beacon-chain/archiver/service.go
Normal file
169
beacon-chain/archiver/service.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "archiver")
|
||||
|
||||
// Service defining archiver functionality for persisting checkpointed
|
||||
// beacon chain information to a database backend for historical purposes.
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.Database
|
||||
headFetcher blockchain.HeadFetcher
|
||||
newHeadNotifier blockchain.NewHeadNotifier
|
||||
newHeadRootChan chan [32]byte
|
||||
}
|
||||
|
||||
// Config options for the archiver service.
|
||||
type Config struct {
|
||||
BeaconDB db.Database
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
NewHeadNotifier blockchain.NewHeadNotifier
|
||||
}
|
||||
|
||||
// NewArchiverService initializes the service from configuration options.
|
||||
func NewArchiverService(ctx context.Context, cfg *Config) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
headFetcher: cfg.HeadFetcher,
|
||||
newHeadNotifier: cfg.NewHeadNotifier,
|
||||
newHeadRootChan: make(chan [32]byte, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// Start the archiver service event loop.
|
||||
func (s *Service) Start() {
|
||||
go s.run(s.ctx)
|
||||
}
|
||||
|
||||
// Stop the archiver service event loop.
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status reports the healthy status of the archiver. Returning nil means service
|
||||
// is correctly running without error.
|
||||
func (s *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We archive committee information pertaining to the head state's epoch.
|
||||
func (s *Service) archiveCommitteeInfo(ctx context.Context, headState *pb.BeaconState) error {
|
||||
currentEpoch := helpers.SlotToEpoch(headState.Slot)
|
||||
proposerSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
attesterSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
|
||||
info := ðpb.ArchivedCommitteeInfo{
|
||||
ProposerSeed: proposerSeed[:],
|
||||
AttesterSeed: attesterSeed[:],
|
||||
}
|
||||
if err := s.beaconDB.SaveArchivedCommitteeInfo(ctx, currentEpoch, info); err != nil {
|
||||
return errors.Wrap(err, "could not archive committee info")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// We archive active validator set changes that happened during the epoch.
|
||||
func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *pb.BeaconState) error {
|
||||
activations := validators.ActivatedValidatorIndices(headState)
|
||||
slashings := validators.SlashedValidatorIndices(headState)
|
||||
exited, err := validators.ExitedValidatorIndices(headState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine exited validator indices")
|
||||
}
|
||||
activeSetChanges := ðpb.ArchivedActiveSetChanges{
|
||||
Activated: activations,
|
||||
Exited: exited,
|
||||
Slashed: slashings,
|
||||
}
|
||||
if err := s.beaconDB.SaveArchivedActiveValidatorChanges(ctx, helpers.CurrentEpoch(headState), activeSetChanges); err != nil {
|
||||
return errors.Wrap(err, "could not archive active validator set changes")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// We compute participation metrics by first retrieving the head state and
|
||||
// matching validator attestations during the epoch.
|
||||
func (s *Service) archiveParticipation(ctx context.Context, headState *pb.BeaconState) error {
|
||||
participation, err := epoch.ComputeValidatorParticipation(headState, helpers.SlotToEpoch(headState.Slot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute participation")
|
||||
}
|
||||
return s.beaconDB.SaveArchivedValidatorParticipation(ctx, helpers.SlotToEpoch(headState.Slot), participation)
|
||||
}
|
||||
|
||||
// We archive validator balances and active indices.
|
||||
func (s *Service) archiveBalances(ctx context.Context, headState *pb.BeaconState) error {
|
||||
balances := headState.Balances
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
if err := s.beaconDB.SaveArchivedBalances(ctx, currentEpoch, balances); err != nil {
|
||||
return errors.Wrap(err, "could not archive balances")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) run(ctx context.Context) {
|
||||
sub := s.newHeadNotifier.HeadUpdatedFeed().Subscribe(s.newHeadRootChan)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case r := <-s.newHeadRootChan:
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", r)).Debug("New chain head event")
|
||||
headState := s.headFetcher.HeadState()
|
||||
if !helpers.IsEpochEnd(headState.Slot) {
|
||||
continue
|
||||
}
|
||||
if err := s.archiveCommitteeInfo(ctx, headState); err != nil {
|
||||
log.WithError(err).Error("Could not archive committee info")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveActiveSetChanges(ctx, headState); err != nil {
|
||||
log.WithError(err).Error("Could not archive active validator set changes")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveParticipation(ctx, headState); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator participation")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveBalances(ctx, headState); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator balances and active indices")
|
||||
continue
|
||||
}
|
||||
log.WithField(
|
||||
"epoch",
|
||||
helpers.CurrentEpoch(headState),
|
||||
).Debug("Successfully archived beacon chain data during epoch")
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-sub.Err():
|
||||
log.WithError(err).Error("Subscription to new chain head notifier failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
291
beacon-chain/archiver/service_test.go
Normal file
291
beacon-chain/archiver/service_test.go
Normal file
@@ -0,0 +1,291 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
}
|
||||
|
||||
func TestArchiverService_ReceivesNewChainHeadEvent(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: &pb.BeaconState{Slot: 1},
|
||||
}
|
||||
headRoot := [32]byte{1, 2, 3}
|
||||
triggerNewHeadEvent(t, svc, headRoot)
|
||||
testutil.AssertLogsContain(t, hook, fmt.Sprintf("%#x", headRoot))
|
||||
testutil.AssertLogsContain(t, hook, "New chain head event")
|
||||
}
|
||||
|
||||
func TestArchiverService_OnlyArchiveAtEpochEnd(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
// The head state is NOT an epoch end.
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: &pb.BeaconState{Slot: params.BeaconConfig().SlotsPerEpoch - 3},
|
||||
}
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
|
||||
// The context should have been canceled.
|
||||
if svc.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "New chain head event")
|
||||
// The service should ONLY log any archival logs if we receive a
|
||||
// head slot that is an epoch end.
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
|
||||
attestedBalance := uint64(1)
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
wanted := ðpb.ValidatorParticipation{
|
||||
VotedEther: attestedBalance,
|
||||
EligibleEther: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
|
||||
GlobalParticipationRate: float32(attestedBalance) / float32(validatorCount*params.BeaconConfig().MaxEffectiveBalance),
|
||||
}
|
||||
|
||||
retrieved, err := svc.beaconDB.ArchivedValidatorParticipation(svc.ctx, currentEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !proto.Equal(wanted, retrieved) {
|
||||
t.Errorf("Wanted participation for epoch %d %v, retrieved %v", currentEpoch, wanted, retrieved)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesIndicesAndBalances(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
|
||||
retrieved, err := svc.beaconDB.ArchivedBalances(svc.ctx, helpers.CurrentEpoch(headState))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(headState.Balances, retrieved) {
|
||||
t.Errorf(
|
||||
"Wanted balances for epoch %d %v, retrieved %v",
|
||||
helpers.CurrentEpoch(headState),
|
||||
headState.Balances,
|
||||
retrieved,
|
||||
)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesCommitteeInfo(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
proposerSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
attesterSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted := ðpb.ArchivedCommitteeInfo{
|
||||
ProposerSeed: proposerSeed[:],
|
||||
AttesterSeed: attesterSeed[:],
|
||||
}
|
||||
|
||||
retrieved, err := svc.beaconDB.ArchivedCommitteeInfo(svc.ctx, helpers.CurrentEpoch(headState))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !proto.Equal(wanted, retrieved) {
|
||||
t.Errorf(
|
||||
"Wanted committee info for epoch %d %v, retrieved %v",
|
||||
helpers.CurrentEpoch(headState),
|
||||
wanted,
|
||||
retrieved,
|
||||
)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesActivatedValidatorChanges(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
delayedActEpoch := helpers.DelayedActivationExitEpoch(currentEpoch)
|
||||
headState.Validators[4].ActivationEpoch = delayedActEpoch
|
||||
headState.Validators[5].ActivationEpoch = delayedActEpoch
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Activated, []uint64{4, 5}) {
|
||||
t.Errorf("Wanted indices 4 5 activated, received %v", retrieved.Activated)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesSlashedValidatorChanges(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
headState.Validators[95].Slashed = true
|
||||
headState.Validators[96].Slashed = true
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Slashed, []uint64{95, 96}) {
|
||||
t.Errorf("Wanted indices 95, 96 slashed, received %v", retrieved.Slashed)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesExitedValidatorChanges(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
headState.Validators[95].ExitEpoch = currentEpoch + 1
|
||||
headState.Validators[95].WithdrawableEpoch = currentEpoch + 1 + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Exited, []uint64{95}) {
|
||||
t.Errorf("Wanted indices 95 exited, received %v", retrieved.Exited)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func setupState(t *testing.T, validatorCount uint64) *pb.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
balances := make([]uint64, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
atts := []*pb.PendingAttestation{{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{}}}}
|
||||
|
||||
// We initialize a head state that has attestations from participated
|
||||
// validators in a simulated fashion.
|
||||
return &pb.BeaconState{
|
||||
Slot: (2 * params.BeaconConfig().SlotsPerEpoch) - 1,
|
||||
Validators: validators,
|
||||
Balances: balances,
|
||||
BlockRoots: make([][]byte, 128),
|
||||
Slashings: []uint64{0, 1e9, 1e9},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CurrentEpochAttestations: atts,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
JustificationBits: bitfield.Bitvector4{0x00},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{},
|
||||
}
|
||||
}
|
||||
|
||||
func setupService(t *testing.T) (*Service, db.Database) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &Service{
|
||||
beaconDB: beaconDB,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
newHeadRootChan: make(chan [32]byte, 0),
|
||||
newHeadNotifier: &mock.ChainService{},
|
||||
}, beaconDB
|
||||
}
|
||||
|
||||
func triggerNewHeadEvent(t *testing.T, svc *Service, headRoot [32]byte) {
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
svc.run(svc.ctx)
|
||||
<-exitRoutine
|
||||
}()
|
||||
|
||||
svc.newHeadRootChan <- headRoot
|
||||
if err := svc.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exitRoutine <- true
|
||||
|
||||
// The context should have been canceled.
|
||||
if svc.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
}
|
||||
@@ -1,333 +0,0 @@
|
||||
// Package attestation defines the life-cycle and status of single and aggregated attestation.
|
||||
package attestation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bitutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
handler "github.com/prysmaticlabs/prysm/shared/messagehandler"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "attestation")
|
||||
var committeeCache = cache.NewCommitteesCache()
|
||||
|
||||
// TargetHandler provides an interface for fetching latest attestation targets
|
||||
// and updating attestations in batches.
|
||||
type TargetHandler interface {
|
||||
LatestAttestationTarget(state *pb.BeaconState, validatorIndex uint64) (*pb.AttestationTarget, error)
|
||||
BatchUpdateLatestAttestation(ctx context.Context, atts []*pb.Attestation) error
|
||||
}
|
||||
|
||||
type attestationStore struct {
|
||||
sync.RWMutex
|
||||
m map[[48]byte]*pb.Attestation
|
||||
}
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing single and aggregated attestation.
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB *db.BeaconDB
|
||||
incomingFeed *event.Feed
|
||||
incomingChan chan *pb.Attestation
|
||||
// store is the mapping of individual
|
||||
// validator's public key to it's latest attestation.
|
||||
store attestationStore
|
||||
}
|
||||
|
||||
// Config options for the service.
|
||||
type Config struct {
|
||||
BeaconDB *db.BeaconDB
|
||||
}
|
||||
|
||||
// NewAttestationService instantiates a new service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewAttestationService(ctx context.Context, cfg *Config) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
incomingFeed: new(event.Feed),
|
||||
incomingChan: make(chan *pb.Attestation, params.BeaconConfig().DefaultBufferSize),
|
||||
store: attestationStore{m: make(map[[48]byte]*pb.Attestation)},
|
||||
}
|
||||
}
|
||||
|
||||
// Start an attestation service's main event loop.
|
||||
func (a *Service) Start() {
|
||||
log.Info("Starting service")
|
||||
go a.attestationPool()
|
||||
}
|
||||
|
||||
// Stop the Attestation service's main event loop and associated goroutines.
|
||||
func (a *Service) Stop() error {
|
||||
defer a.cancel()
|
||||
log.Info("Stopping service")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status always returns nil.
|
||||
// TODO(1201): Add service health checks.
|
||||
func (a *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IncomingAttestationFeed returns a feed that any service can send incoming p2p attestations into.
|
||||
// The attestation service will subscribe to this feed in order to relay incoming attestations.
|
||||
func (a *Service) IncomingAttestationFeed() *event.Feed {
|
||||
return a.incomingFeed
|
||||
}
|
||||
|
||||
// LatestAttestationTarget returns the target block that the validator index attested to,
|
||||
// the highest slotNumber attestation in attestation pool gets returned.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// Let `get_latest_attestation_target(store: Store, validator_index: ValidatorIndex) ->
|
||||
// BeaconBlock` be the target block in the attestation
|
||||
// `get_latest_attestation(store, validator_index)`.
|
||||
func (a *Service) LatestAttestationTarget(beaconState *pb.BeaconState, index uint64) (*pb.AttestationTarget, error) {
|
||||
if index >= uint64(len(beaconState.ValidatorRegistry)) {
|
||||
return nil, fmt.Errorf("invalid validator index %d", index)
|
||||
}
|
||||
validator := beaconState.ValidatorRegistry[index]
|
||||
|
||||
pubKey := bytesutil.ToBytes48(validator.Pubkey)
|
||||
a.store.RLock()
|
||||
defer a.store.RUnlock()
|
||||
if _, exists := a.store.m[pubKey]; !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
attestation := a.store.m[pubKey]
|
||||
if attestation == nil {
|
||||
return nil, nil
|
||||
}
|
||||
targetRoot := bytesutil.ToBytes32(attestation.Data.BeaconBlockRootHash32)
|
||||
if !a.beaconDB.HasBlock(targetRoot) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return a.beaconDB.AttestationTarget(targetRoot)
|
||||
}
|
||||
|
||||
// attestationPool takes an newly received attestation from sync service
|
||||
// and updates attestation pool.
|
||||
func (a *Service) attestationPool() {
|
||||
incomingSub := a.incomingFeed.Subscribe(a.incomingChan)
|
||||
defer incomingSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case <-a.ctx.Done():
|
||||
log.Debug("Attestation pool closed, exiting goroutine")
|
||||
return
|
||||
// Listen for a newly received incoming attestation from the sync service.
|
||||
case attestations := <-a.incomingChan:
|
||||
handler.SafelyHandleMessage(a.ctx, a.handleAttestation, attestations)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Service) handleAttestation(ctx context.Context, msg proto.Message) error {
|
||||
attestation := msg.(*pb.Attestation)
|
||||
if err := a.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
return fmt.Errorf("could not update attestation pool: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLatestAttestation inputs an new attestation and checks whether
|
||||
// the attesters who submitted this attestation with the higher slot number
|
||||
// have been noted in the attestation pool. If not, it updates the
|
||||
// attestation pool with attester's public key to attestation.
|
||||
func (a *Service) UpdateLatestAttestation(ctx context.Context, attestation *pb.Attestation) error {
|
||||
totalAttestationSeen.Inc()
|
||||
|
||||
// Potential improvement, instead of getting the state,
|
||||
// we could get a mapping of validator index to public key.
|
||||
beaconState, err := a.beaconDB.HeadState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
head, err := a.beaconDB.ChainHead()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headRoot, err := hashutil.HashBeaconBlock(head)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return a.updateAttestation(ctx, headRoot, beaconState, attestation)
|
||||
}
|
||||
|
||||
// BatchUpdateLatestAttestation updates multiple attestations and adds them into the attestation store
|
||||
// if they are valid.
|
||||
func (a *Service) BatchUpdateLatestAttestation(ctx context.Context, attestations []*pb.Attestation) error {
|
||||
|
||||
if attestations == nil {
|
||||
return nil
|
||||
}
|
||||
// Potential improvement, instead of getting the state,
|
||||
// we could get a mapping of validator index to public key.
|
||||
beaconState, err := a.beaconDB.HeadState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
head, err := a.beaconDB.ChainHead()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headRoot, err := hashutil.HashBeaconBlock(head)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attestations = a.sortAttestations(attestations)
|
||||
|
||||
for _, attestation := range attestations {
|
||||
if err := a.updateAttestation(ctx, headRoot, beaconState, attestation); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertAttestationIntoStore locks the store, inserts the attestation, then
|
||||
// unlocks the store again. This method may be used by external services
|
||||
// in testing to populate the attestation store.
|
||||
func (a *Service) InsertAttestationIntoStore(pubkey [48]byte, att *pb.Attestation) {
|
||||
a.store.Lock()
|
||||
defer a.store.Unlock()
|
||||
a.store.m[pubkey] = att
|
||||
}
|
||||
|
||||
func (a *Service) updateAttestation(ctx context.Context, headRoot [32]byte, beaconState *pb.BeaconState,
|
||||
attestation *pb.Attestation) error {
|
||||
totalAttestationSeen.Inc()
|
||||
|
||||
slot := attestation.Data.Slot
|
||||
var committee []uint64
|
||||
var cachedCommittees *cache.CommitteesInSlot
|
||||
var err error
|
||||
|
||||
for beaconState.Slot < slot {
|
||||
beaconState, err = state.ExecuteStateTransition(
|
||||
ctx, beaconState, nil /* block */, headRoot, &state.TransitionConfig{},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not execute head transition: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
cachedCommittees, err = committeeCache.CommitteesInfoBySlot(slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cachedCommittees == nil {
|
||||
crosslinkCommittees, err := helpers.CrosslinkCommitteesAtSlot(beaconState, slot, false /* registryChange */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cachedCommittees = helpers.ToCommitteeCache(slot, crosslinkCommittees)
|
||||
if err := committeeCache.AddCommittees(cachedCommittees); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Find committee for shard.
|
||||
for _, v := range cachedCommittees.Committees {
|
||||
if v.Shard == attestation.Data.Shard {
|
||||
committee = v.Committee
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestationSlot": attestation.Data.Slot - params.BeaconConfig().GenesisSlot,
|
||||
"attestationShard": attestation.Data.Shard,
|
||||
"committeesShard": cachedCommittees.Committees[0].Shard,
|
||||
"committeesList": cachedCommittees.Committees[0].Committee,
|
||||
"lengthOfCommittees": len(cachedCommittees.Committees),
|
||||
}).Debug("Updating latest attestation")
|
||||
|
||||
// The participation bitfield from attestation is represented in bytes,
|
||||
// here we multiply by 8 to get an accurate validator count in bits.
|
||||
bitfield := attestation.AggregationBitfield
|
||||
totalBits := len(bitfield) * 8
|
||||
|
||||
// Check each bit of participation bitfield to find out which
|
||||
// attester has submitted new attestation.
|
||||
// This is has O(n) run time and could be optimized down the line.
|
||||
for i := 0; i < totalBits; i++ {
|
||||
bitSet, err := bitutil.CheckBit(bitfield, i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bitSet {
|
||||
continue
|
||||
}
|
||||
|
||||
if i >= len(committee) {
|
||||
log.Errorf("Bitfield points to an invalid index in the committee: bitfield %08b", bitfield)
|
||||
continue
|
||||
}
|
||||
|
||||
if int(committee[i]) >= len(beaconState.ValidatorRegistry) {
|
||||
log.Errorf("Index doesn't exist in validator registry: index %d", committee[i])
|
||||
}
|
||||
|
||||
// If the attestation came from this attester. We use the slot committee to find the
|
||||
// validator's actual index.
|
||||
pubkey := bytesutil.ToBytes48(beaconState.ValidatorRegistry[committee[i]].Pubkey)
|
||||
newAttestationSlot := attestation.Data.Slot
|
||||
currentAttestationSlot := uint64(0)
|
||||
a.store.Lock()
|
||||
defer a.store.Unlock()
|
||||
if _, exists := a.store.m[pubkey]; exists {
|
||||
currentAttestationSlot = a.store.m[pubkey].Data.Slot
|
||||
}
|
||||
// If the attestation is newer than this attester's one in pool.
|
||||
if newAttestationSlot > currentAttestationSlot {
|
||||
a.store.m[pubkey] = attestation
|
||||
|
||||
log.WithFields(
|
||||
logrus.Fields{
|
||||
"attestationSlot": attestation.Data.Slot - params.BeaconConfig().GenesisSlot,
|
||||
"justifiedEpoch": attestation.Data.JustifiedEpoch - params.BeaconConfig().GenesisEpoch,
|
||||
},
|
||||
).Debug("Attestation store updated")
|
||||
|
||||
blockRoot := bytesutil.ToBytes32(attestation.Data.BeaconBlockRootHash32)
|
||||
votedBlock, err := a.beaconDB.Block(blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reportVoteMetrics(committee[i], votedBlock)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortAttestations sorts attestations by their slot number in ascending order.
|
||||
func (a *Service) sortAttestations(attestations []*pb.Attestation) []*pb.Attestation {
|
||||
sort.SliceStable(attestations, func(i, j int) bool {
|
||||
return attestations[i].Data.Slot < attestations[j].Data.Slot
|
||||
})
|
||||
|
||||
return attestations
|
||||
}
|
||||
@@ -1,446 +0,0 @@
|
||||
package attestation
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/internal"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
|
||||
var _ = TargetHandler(&Service{})
|
||||
|
||||
func TestUpdateLatestAttestation_UpdatesLatest(t *testing.T) {
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 64; i++ {
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: []byte{byte(i)},
|
||||
ActivationEpoch: params.BeaconConfig().GenesisEpoch,
|
||||
ExitEpoch: params.BeaconConfig().GenesisEpoch + 10,
|
||||
})
|
||||
}
|
||||
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
|
||||
attestation := &pb.Attestation{
|
||||
AggregationBitfield: []byte{0x80},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
Shard: 1,
|
||||
},
|
||||
}
|
||||
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
pubkey := bytesutil.ToBytes48([]byte{byte(3)})
|
||||
if service.store.m[pubkey].Data.Slot !=
|
||||
attestation.Data.Slot {
|
||||
t.Errorf("Incorrect slot stored, wanted: %d, got: %d",
|
||||
attestation.Data.Slot, service.store.m[pubkey].Data.Slot)
|
||||
}
|
||||
|
||||
beaconState = &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 36,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatalf("could not save state: %v", err)
|
||||
}
|
||||
|
||||
attestation.Data.Slot = params.BeaconConfig().GenesisSlot + 36
|
||||
attestation.Data.Shard = 36
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
if service.store.m[pubkey].Data.Slot !=
|
||||
attestation.Data.Slot {
|
||||
t.Errorf("Incorrect slot stored, wanted: %d, got: %d",
|
||||
attestation.Data.Slot, service.store.m[pubkey].Data.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestationPool_UpdatesAttestationPool(t *testing.T) {
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 64; i++ {
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: []byte{byte(i)},
|
||||
ActivationEpoch: params.BeaconConfig().GenesisEpoch,
|
||||
ExitEpoch: params.BeaconConfig().GenesisEpoch + 10,
|
||||
})
|
||||
}
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
attestation := &pb.Attestation{
|
||||
AggregationBitfield: []byte{0x80},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
Shard: 1,
|
||||
},
|
||||
}
|
||||
|
||||
if err := service.handleAttestation(context.Background(), attestation); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLatestAttestationTarget_CantGetAttestation(t *testing.T) {
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
if err := beaconDB.SaveState(ctx, &pb.BeaconState{
|
||||
ValidatorRegistry: []*pb.Validator{{}},
|
||||
}); err != nil {
|
||||
t.Fatalf("could not save state: %v", err)
|
||||
}
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
headState, err := beaconDB.HeadState(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
index := uint64(100)
|
||||
want := fmt.Sprintf("invalid validator index %d", index)
|
||||
if _, err := service.LatestAttestationTarget(headState, index); !strings.Contains(err.Error(), want) {
|
||||
t.Errorf("Wanted error to contain %s, received %v", want, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLatestAttestationTarget_ReturnsLatestAttestedBlock(t *testing.T) {
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
pubKey := []byte{'A'}
|
||||
if err := beaconDB.SaveState(ctx, &pb.BeaconState{
|
||||
ValidatorRegistry: []*pb.Validator{{Pubkey: pubKey}},
|
||||
}); err != nil {
|
||||
t.Fatalf("could not save state: %v", err)
|
||||
}
|
||||
|
||||
block := &pb.BeaconBlock{Slot: 999}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatalf("could not save block: %v", err)
|
||||
}
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
log.Fatalf("could not hash block: %v", err)
|
||||
}
|
||||
if err := beaconDB.SaveAttestationTarget(ctx, &pb.AttestationTarget{
|
||||
Slot: block.Slot,
|
||||
BlockRoot: blockRoot[:],
|
||||
ParentRoot: []byte{},
|
||||
}); err != nil {
|
||||
log.Fatalf("could not save att target: %v", err)
|
||||
}
|
||||
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
|
||||
attestation := &pb.Attestation{
|
||||
Data: &pb.AttestationData{
|
||||
BeaconBlockRootHash32: blockRoot[:],
|
||||
}}
|
||||
pubKey48 := bytesutil.ToBytes48(pubKey)
|
||||
service.store.m[pubKey48] = attestation
|
||||
|
||||
headState, err := beaconDB.HeadState(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
latestAttestedTarget, err := service.LatestAttestationTarget(headState, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not get latest attestation: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(blockRoot[:], latestAttestedTarget.BlockRoot) {
|
||||
t.Errorf("Wanted: %v, got: %v", blockRoot[:], latestAttestedTarget.BlockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateLatestAttestation_CacheEnabledAndMiss(t *testing.T) {
|
||||
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 64; i++ {
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: []byte{byte(i)},
|
||||
ActivationEpoch: params.BeaconConfig().GenesisEpoch,
|
||||
ExitEpoch: params.BeaconConfig().GenesisEpoch + 10,
|
||||
})
|
||||
}
|
||||
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
|
||||
attestation := &pb.Attestation{
|
||||
AggregationBitfield: []byte{0x80},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
Shard: 1,
|
||||
},
|
||||
}
|
||||
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
pubkey := bytesutil.ToBytes48([]byte{byte(3)})
|
||||
if service.store.m[pubkey].Data.Slot !=
|
||||
attestation.Data.Slot {
|
||||
t.Errorf("Incorrect slot stored, wanted: %d, got: %d",
|
||||
attestation.Data.Slot, service.store.m[pubkey].Data.Slot)
|
||||
}
|
||||
|
||||
attestation.Data.Slot = params.BeaconConfig().GenesisSlot + 36
|
||||
attestation.Data.Shard = 36
|
||||
|
||||
beaconState = &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 36,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatalf("could not save state: %v", err)
|
||||
}
|
||||
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
if service.store.m[pubkey].Data.Slot !=
|
||||
attestation.Data.Slot {
|
||||
t.Errorf("Incorrect slot stored, wanted: %d, got: %d",
|
||||
attestation.Data.Slot, service.store.m[pubkey].Data.Slot)
|
||||
}
|
||||
|
||||
// Verify the committee for attestation's data slot was cached.
|
||||
fetchedCommittees, err := committeeCache.CommitteesInfoBySlot(attestation.Data.Slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantedCommittee := []uint64{38}
|
||||
if !reflect.DeepEqual(wantedCommittee, fetchedCommittees.Committees[0].Committee) {
|
||||
t.Errorf(
|
||||
"Result indices was an unexpected value. Wanted %d, got %d",
|
||||
wantedCommittee,
|
||||
fetchedCommittees.Committees[0].Committee,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateLatestAttestation_CacheEnabledAndHit(t *testing.T) {
|
||||
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 64; i++ {
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: []byte{byte(i)},
|
||||
ActivationEpoch: params.BeaconConfig().GenesisEpoch,
|
||||
ExitEpoch: params.BeaconConfig().GenesisEpoch + 10,
|
||||
})
|
||||
}
|
||||
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 2,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 2,
|
||||
}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
|
||||
slot := params.BeaconConfig().GenesisSlot + 2
|
||||
shard := uint64(3)
|
||||
index := uint64(4)
|
||||
attestation := &pb.Attestation{
|
||||
AggregationBitfield: []byte{0x80},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: slot,
|
||||
Shard: shard,
|
||||
},
|
||||
}
|
||||
|
||||
csInSlot := &cache.CommitteesInSlot{
|
||||
Slot: slot,
|
||||
Committees: []*cache.CommitteeInfo{
|
||||
{Shard: shard, Committee: []uint64{index, 999}},
|
||||
}}
|
||||
|
||||
if err := committeeCache.AddCommittees(csInSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
pubkey := bytesutil.ToBytes48([]byte{byte(index)})
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
|
||||
if service.store.m[pubkey].Data.Slot !=
|
||||
attestation.Data.Slot {
|
||||
t.Errorf("Incorrect slot stored, wanted: %d, got: %d",
|
||||
attestation.Data.Slot, service.store.m[pubkey].Data.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateLatestAttestation_InvalidIndex(t *testing.T) {
|
||||
beaconDB := internal.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 64; i++ {
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: []byte{byte(i)},
|
||||
ActivationEpoch: params.BeaconConfig().GenesisEpoch,
|
||||
ExitEpoch: params.BeaconConfig().GenesisEpoch + 10,
|
||||
})
|
||||
}
|
||||
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
attestation := &pb.Attestation{
|
||||
AggregationBitfield: []byte{0xC0},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
Shard: 1,
|
||||
},
|
||||
}
|
||||
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Bitfield points to an invalid index in the committee")
|
||||
}
|
||||
|
||||
func TestUpdateLatestAttestation_BatchUpdate(t *testing.T) {
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 64; i++ {
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: []byte{byte(i)},
|
||||
ActivationEpoch: params.BeaconConfig().GenesisEpoch,
|
||||
ExitEpoch: params.BeaconConfig().GenesisEpoch + 10,
|
||||
})
|
||||
}
|
||||
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
attestations := make([]*pb.Attestation, 0)
|
||||
for i := 0; i < 10; i++ {
|
||||
attestations = append(attestations, &pb.Attestation{
|
||||
AggregationBitfield: []byte{0x80},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
Shard: 1,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if err := service.BatchUpdateLatestAttestation(ctx, attestations); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package attestation
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
var (
|
||||
validatorLastVoteGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "validators_last_vote",
|
||||
Help: "Votes of validators, updated when there's a new attestation",
|
||||
}, []string{
|
||||
"validatorIndex",
|
||||
})
|
||||
totalAttestationSeen = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_seen_attestations",
|
||||
Help: "Total number of attestations seen by the validators",
|
||||
})
|
||||
)
|
||||
|
||||
func reportVoteMetrics(index uint64, block *pb.BeaconBlock) {
|
||||
// Don't update vote metrics if the incoming block is nil.
|
||||
if block == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s := params.BeaconConfig().GenesisSlot
|
||||
validatorLastVoteGauge.WithLabelValues(
|
||||
"v" + strconv.Itoa(int(index))).Set(float64(block.Slot - s))
|
||||
}
|
||||
@@ -3,73 +3,118 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"block_processing.go",
|
||||
"fork_choice.go",
|
||||
"chain_info.go",
|
||||
"info.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"receive_attestation.go",
|
||||
"receive_block.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/attestation:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/blockchain/forkchoice:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/beacon/rpc/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/p2p:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
test_suite(
|
||||
name = "go_default_test",
|
||||
tests = [
|
||||
":go_raceoff_test",
|
||||
":go_raceon_test",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_raceoff_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"block_processing_test.go",
|
||||
"fork_choice_reorg_test.go",
|
||||
"fork_choice_test.go",
|
||||
"chain_info_test.go",
|
||||
"receive_attestation_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/attestation:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/internal:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/beacon/rpc/v1:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/forkutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/p2p:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_x_net//context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_raceon_test",
|
||||
srcs = [
|
||||
"chain_info_norace_test.go",
|
||||
"service_norace_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
race = "on",
|
||||
tags = ["race_on"],
|
||||
deps = [
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_x_net//context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,343 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// BlockReceiver interface defines the methods in the blockchain service which
|
||||
// directly receives a new block from other services and applies the full processing pipeline.
|
||||
type BlockReceiver interface {
|
||||
CanonicalBlockFeed() *event.Feed
|
||||
ReceiveBlock(ctx context.Context, block *pb.BeaconBlock) (*pb.BeaconState, error)
|
||||
IsCanonical(slot uint64, hash []byte) bool
|
||||
CanonicalBlock(slot uint64) (*pb.BeaconBlock, error)
|
||||
RecentCanonicalRoots(count uint64) []*pbrpc.BlockRoot
|
||||
}
|
||||
|
||||
// BlockProcessor defines a common interface for methods useful for directly applying state transitions
|
||||
// to beacon blocks and generating a new beacon state from the Ethereum 2.0 core primitives.
|
||||
type BlockProcessor interface {
|
||||
VerifyBlockValidity(ctx context.Context, block *pb.BeaconBlock, beaconState *pb.BeaconState) error
|
||||
ApplyBlockStateTransition(ctx context.Context, block *pb.BeaconBlock, beaconState *pb.BeaconState) (*pb.BeaconState, error)
|
||||
CleanupBlockOperations(ctx context.Context, block *pb.BeaconBlock) error
|
||||
}
|
||||
|
||||
// BlockFailedProcessingErr represents a block failing a state transition function.
|
||||
type BlockFailedProcessingErr struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (b *BlockFailedProcessingErr) Error() string {
|
||||
return fmt.Sprintf("block failed processing: %v", b.err)
|
||||
}
|
||||
|
||||
// ReceiveBlock is a function that defines the operations that are preformed on
|
||||
// any block that is received from p2p layer or rpc. It performs the following actions: It checks the block to see
|
||||
// 1. Verify a block passes pre-processing conditions
|
||||
// 2. Save and broadcast the block via p2p to other peers
|
||||
// 3. Apply the block state transition function and account for skip slots.
|
||||
// 4. Process and cleanup any block operations, such as attestations and deposits, which would need to be
|
||||
// either included or flushed from the beacon node's runtime.
|
||||
func (c *ChainService) ReceiveBlock(ctx context.Context, block *pb.BeaconBlock) (*pb.BeaconState, error) {
|
||||
c.receiveBlockLock.Lock()
|
||||
defer c.receiveBlockLock.Unlock()
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlock")
|
||||
defer span.End()
|
||||
parentRoot := bytesutil.ToBytes32(block.ParentRootHash32)
|
||||
parent, err := c.beaconDB.Block(parentRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get parent block: %v", err)
|
||||
}
|
||||
if parent == nil {
|
||||
return nil, errors.New("parent does not exist in DB")
|
||||
}
|
||||
beaconState, err := c.beaconDB.HistoricalStateFromSlot(ctx, parent.Slot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not retrieve beacon state: %v", err)
|
||||
}
|
||||
saveLatestBlock := beaconState.LatestBlock
|
||||
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not hash beacon block")
|
||||
}
|
||||
// We first verify the block's basic validity conditions.
|
||||
if err := c.VerifyBlockValidity(ctx, block, beaconState); err != nil {
|
||||
return beaconState, fmt.Errorf("block with slot %d is not ready for processing: %v", block.Slot, err)
|
||||
}
|
||||
|
||||
// We save the block to the DB and broadcast it to our peers.
|
||||
if err := c.SaveAndBroadcastBlock(ctx, block); err != nil {
|
||||
return beaconState, fmt.Errorf(
|
||||
"could not save and broadcast beacon block with slot %d: %v",
|
||||
block.Slot-params.BeaconConfig().GenesisSlot, err,
|
||||
)
|
||||
}
|
||||
|
||||
log.WithField("slotNumber", block.Slot-params.BeaconConfig().GenesisSlot).Info(
|
||||
"Executing state transition")
|
||||
|
||||
// We then apply the block state transition accordingly to obtain the resulting beacon state.
|
||||
beaconState, err = c.ApplyBlockStateTransition(ctx, block, beaconState)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *BlockFailedProcessingErr:
|
||||
// If the block fails processing, we mark it as blacklisted and delete it from our DB.
|
||||
c.beaconDB.MarkEvilBlockHash(blockRoot)
|
||||
if err := c.beaconDB.DeleteBlock(block); err != nil {
|
||||
return nil, fmt.Errorf("could not delete bad block from db: %v", err)
|
||||
}
|
||||
return beaconState, err
|
||||
default:
|
||||
return beaconState, fmt.Errorf("could not apply block state transition: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slotNumber": block.Slot - params.BeaconConfig().GenesisSlot,
|
||||
"currentEpoch": helpers.SlotToEpoch(block.Slot) - params.BeaconConfig().GenesisEpoch,
|
||||
}).Info("State transition complete")
|
||||
|
||||
// Check state root
|
||||
if featureconfig.FeatureConfig().EnableCheckBlockStateRoot {
|
||||
// Calc state hash with previous block
|
||||
beaconState.LatestBlock = saveLatestBlock
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not hash beacon state: %v", err)
|
||||
}
|
||||
beaconState.LatestBlock = block
|
||||
if !bytes.Equal(block.StateRootHash32, stateRoot[:]) {
|
||||
return nil, fmt.Errorf("beacon state root is not equal to block state root: %#x != %#x", stateRoot, block.StateRootHash32)
|
||||
}
|
||||
}
|
||||
|
||||
// We process the block's contained deposits, attestations, and other operations
|
||||
// and that may need to be stored or deleted from the beacon node's persistent storage.
|
||||
if err := c.CleanupBlockOperations(ctx, block); err != nil {
|
||||
return beaconState, fmt.Errorf("could not process block deposits, attestations, and other operations: %v", err)
|
||||
}
|
||||
|
||||
log.WithField("slot", block.Slot-params.BeaconConfig().GenesisSlot).Info("Finished processing beacon block")
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ApplyBlockStateTransition runs the Ethereum 2.0 state transition function
|
||||
// to produce a new beacon state and also accounts for skip slots occurring.
|
||||
//
|
||||
// def apply_block_state_transition(block):
|
||||
// # process skipped slots
|
||||
// while (state.slot < block.slot - 1):
|
||||
// state = slot_state_transition(state, block=None)
|
||||
//
|
||||
// # process slot with block
|
||||
// state = slot_state_transition(state, block)
|
||||
//
|
||||
// # check state root
|
||||
// if block.state_root == hash(state):
|
||||
// return state, error
|
||||
// else:
|
||||
// return nil, error # or throw or whatever
|
||||
//
|
||||
func (c *ChainService) ApplyBlockStateTransition(
|
||||
ctx context.Context, block *pb.BeaconBlock, beaconState *pb.BeaconState,
|
||||
) (*pb.BeaconState, error) {
|
||||
// Retrieve the last processed beacon block's hash root.
|
||||
headRoot, err := c.ChainHeadRoot()
|
||||
if err != nil {
|
||||
return beaconState, fmt.Errorf("could not retrieve chain head root: %v", err)
|
||||
}
|
||||
|
||||
// Check for skipped slots.
|
||||
numSkippedSlots := 0
|
||||
for beaconState.Slot < block.Slot-1 {
|
||||
beaconState, err = c.runStateTransition(ctx, headRoot, nil, beaconState)
|
||||
if err != nil {
|
||||
return beaconState, err
|
||||
}
|
||||
numSkippedSlots++
|
||||
}
|
||||
if numSkippedSlots > 0 {
|
||||
log.Warnf("Processed %d skipped slots", numSkippedSlots)
|
||||
}
|
||||
|
||||
beaconState, err = c.runStateTransition(ctx, headRoot, block, beaconState)
|
||||
if err != nil {
|
||||
return beaconState, err
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// VerifyBlockValidity cross-checks the block against the pre-processing conditions from
|
||||
// Ethereum 2.0, namely:
|
||||
// The parent block with root block.parent_root has been processed and accepted.
|
||||
// The node has processed its state up to slot, block.slot - 1.
|
||||
// The Ethereum 1.0 block pointed to by the state.processed_pow_receipt_root has been processed and accepted.
|
||||
// The node's local clock time is greater than or equal to state.genesis_time + block.slot * SECONDS_PER_SLOT.
|
||||
func (c *ChainService) VerifyBlockValidity(
|
||||
ctx context.Context,
|
||||
block *pb.BeaconBlock,
|
||||
beaconState *pb.BeaconState,
|
||||
) error {
|
||||
if block.Slot == params.BeaconConfig().GenesisSlot {
|
||||
return fmt.Errorf("cannot process a genesis block: received block with slot %d",
|
||||
block.Slot-params.BeaconConfig().GenesisSlot)
|
||||
}
|
||||
powBlockFetcher := c.web3Service.Client().BlockByHash
|
||||
if err := b.IsValidBlock(ctx, beaconState, block,
|
||||
c.beaconDB.HasBlock, powBlockFetcher, c.genesisTime); err != nil {
|
||||
return fmt.Errorf("block does not fulfill pre-processing conditions %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveAndBroadcastBlock stores the block in persistent storage and then broadcasts it to
|
||||
// peers via p2p. Blocks which have already been saved are not processed again via p2p, which is why
|
||||
// the order of operations is important in this function to prevent infinite p2p loops.
|
||||
func (c *ChainService) SaveAndBroadcastBlock(ctx context.Context, block *pb.BeaconBlock) error {
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not tree hash incoming block: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveBlock(block); err != nil {
|
||||
return fmt.Errorf("failed to save block: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveAttestationTarget(ctx, &pb.AttestationTarget{
|
||||
Slot: block.Slot,
|
||||
BlockRoot: blockRoot[:],
|
||||
ParentRoot: block.ParentRootHash32,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to save attestation target: %v", err)
|
||||
}
|
||||
// Announce the new block to the network.
|
||||
c.p2p.Broadcast(ctx, &pb.BeaconBlockAnnounce{
|
||||
Hash: blockRoot[:],
|
||||
SlotNumber: block.Slot,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanupBlockOperations processes and cleans up any block operations relevant to the beacon node
|
||||
// such as attestations, exits, and deposits. We update the latest seen attestation by validator
|
||||
// in the local node's runtime, cleanup and remove pending deposits which have been included in the block
|
||||
// from our node's local cache, and process validator exits and more.
|
||||
func (c *ChainService) CleanupBlockOperations(ctx context.Context, block *pb.BeaconBlock) error {
|
||||
// Forward processed block to operation pool to remove individual operation from DB.
|
||||
if c.opsPoolService.IncomingProcessedBlockFeed().Send(block) == 0 {
|
||||
log.Error("Sent processed block to no subscribers")
|
||||
}
|
||||
|
||||
if err := c.attsService.BatchUpdateLatestAttestation(ctx, block.Body.Attestations); err != nil {
|
||||
return fmt.Errorf("failed to update latest attestation for store: %v", err)
|
||||
}
|
||||
|
||||
// Remove pending deposits from the deposit queue.
|
||||
for _, dep := range block.Body.Deposits {
|
||||
c.beaconDB.RemovePendingDeposit(ctx, dep)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// runStateTransition executes the Ethereum 2.0 core state transition for the beacon chain and
|
||||
// updates important checkpoints and local persistent data during epoch transitions. It serves as a wrapper
|
||||
// around the more low-level, core state transition function primitive.
|
||||
func (c *ChainService) runStateTransition(
|
||||
ctx context.Context,
|
||||
headRoot [32]byte,
|
||||
block *pb.BeaconBlock,
|
||||
beaconState *pb.BeaconState,
|
||||
) (*pb.BeaconState, error) {
|
||||
newState, err := state.ExecuteStateTransition(
|
||||
ctx,
|
||||
beaconState,
|
||||
block,
|
||||
headRoot,
|
||||
&state.TransitionConfig{
|
||||
VerifySignatures: false, // We disable signature verification for now.
|
||||
Logging: true, // We enable logging in this state transition call.
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return beaconState, &BlockFailedProcessingErr{err}
|
||||
}
|
||||
log.WithField(
|
||||
"slotsSinceGenesis", newState.Slot-params.BeaconConfig().GenesisSlot,
|
||||
).Info("Slot transition successfully processed")
|
||||
|
||||
if block != nil {
|
||||
log.WithField(
|
||||
"slotsSinceGenesis", newState.Slot-params.BeaconConfig().GenesisSlot,
|
||||
).Info("Block transition successfully processed")
|
||||
|
||||
// Save Historical States.
|
||||
if err := c.beaconDB.SaveHistoricalState(ctx, beaconState); err != nil {
|
||||
return nil, fmt.Errorf("could not save historical state: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if helpers.IsEpochEnd(newState.Slot) {
|
||||
// Save activated validators of this epoch to public key -> index DB.
|
||||
if err := c.saveValidatorIdx(newState); err != nil {
|
||||
return newState, fmt.Errorf("could not save validator index: %v", err)
|
||||
}
|
||||
// Delete exited validators of this epoch to public key -> index DB.
|
||||
if err := c.deleteValidatorIdx(newState); err != nil {
|
||||
return newState, fmt.Errorf("could not delete validator index: %v", err)
|
||||
}
|
||||
// Update FFG checkpoints in DB.
|
||||
if err := c.updateFFGCheckPts(ctx, newState); err != nil {
|
||||
return newState, fmt.Errorf("could not update FFG checkpts: %v", err)
|
||||
}
|
||||
log.WithField(
|
||||
"SlotsSinceGenesis", newState.Slot-params.BeaconConfig().GenesisSlot,
|
||||
).Info("Epoch transition successfully processed")
|
||||
}
|
||||
return newState, nil
|
||||
}
|
||||
|
||||
// saveValidatorIdx saves the validators public key to index mapping in DB, these
|
||||
// validators were activated from current epoch. After it saves, current epoch key
|
||||
// is deleted from ActivatedValidators mapping.
|
||||
func (c *ChainService) saveValidatorIdx(state *pb.BeaconState) error {
|
||||
activatedValidators := validators.ActivatedValFromEpoch(helpers.CurrentEpoch(state) + 1)
|
||||
for _, idx := range activatedValidators {
|
||||
pubKey := state.ValidatorRegistry[idx].Pubkey
|
||||
if err := c.beaconDB.SaveValidatorIndex(pubKey, int(idx)); err != nil {
|
||||
return fmt.Errorf("could not save validator index: %v", err)
|
||||
}
|
||||
}
|
||||
validators.DeleteActivatedVal(helpers.CurrentEpoch(state))
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteValidatorIdx deletes the validators public key to index mapping in DB, the
|
||||
// validators were exited from current epoch. After it deletes, current epoch key
|
||||
// is deleted from ExitedValidators mapping.
|
||||
func (c *ChainService) deleteValidatorIdx(state *pb.BeaconState) error {
|
||||
exitedValidators := validators.ExitedValFromEpoch(helpers.CurrentEpoch(state) + 1)
|
||||
for _, idx := range exitedValidators {
|
||||
pubKey := state.ValidatorRegistry[idx].Pubkey
|
||||
if err := c.beaconDB.DeleteValidatorIndex(pubKey); err != nil {
|
||||
return fmt.Errorf("could not delete validator index: %v", err)
|
||||
}
|
||||
}
|
||||
validators.DeleteExitedVal(helpers.CurrentEpoch(state))
|
||||
return nil
|
||||
}
|
||||
@@ -1,840 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/attestation"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
v "github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/internal"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/trieutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
// Ensure ChainService implements interfaces.
|
||||
var _ = BlockProcessor(&ChainService{})
|
||||
|
||||
func initBlockStateRoot(t *testing.T, block *pb.BeaconBlock, chainService *ChainService) {
|
||||
parentRoot := bytesutil.ToBytes32(block.ParentRootHash32)
|
||||
parent, err := chainService.beaconDB.Block(parentRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState, err := chainService.beaconDB.HistoricalStateFromSlot(context.Background(), parent.Slot)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to retrieve state %v", err)
|
||||
}
|
||||
saveLatestBlock := beaconState.LatestBlock
|
||||
|
||||
computedState, err := chainService.ApplyBlockStateTransition(context.Background(), block, beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("could not apply block state transition: %v", err)
|
||||
}
|
||||
|
||||
computedState.LatestBlock = saveLatestBlock
|
||||
stateRoot, err := hashutil.HashProto(computedState)
|
||||
if err != nil {
|
||||
t.Fatalf("could not tree hash state: %v", err)
|
||||
}
|
||||
block.StateRootHash32 = stateRoot[:]
|
||||
t.Logf("state root after block: %#x", stateRoot)
|
||||
}
|
||||
|
||||
func TestReceiveBlock_FaultyPOWChain(t *testing.T) {
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
unixTime := uint64(time.Now().Unix())
|
||||
deposits, _ := setupInitialDeposits(t, 100)
|
||||
if err := db.InitializeState(context.Background(), unixTime, deposits, &pb.Eth1Data{}); err != nil {
|
||||
t.Fatalf("Could not initialize beacon state to disk: %v", err)
|
||||
}
|
||||
|
||||
if err := SetSlotInState(chainService, 1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
parentBlock := &pb.BeaconBlock{
|
||||
Slot: 1,
|
||||
}
|
||||
|
||||
parentRoot, err := hashutil.HashBeaconBlock(parentBlock)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to tree hash block %v", err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(parentBlock); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: 2,
|
||||
ParentRootHash32: parentRoot[:],
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte("a"),
|
||||
BlockHash32: []byte("b"),
|
||||
},
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := chainService.ReceiveBlock(context.Background(), block); err == nil {
|
||||
t.Errorf("Expected receive block to fail, received nil: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReceiveBlock_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
deposits, privKeys := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not tree hash state: %v", err)
|
||||
}
|
||||
if err := db.SaveHistoricalState(ctx, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
genesis := b.NewGenesisBlock([]byte{})
|
||||
if err := chainService.beaconDB.SaveBlock(genesis); err != nil {
|
||||
t.Fatalf("Could not save block to db: %v", err)
|
||||
}
|
||||
parentHash, err := hashutil.HashBeaconBlock(genesis)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to get tree hash root of canonical head: %v", err)
|
||||
}
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, genesis, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.Slot++
|
||||
randaoReveal := createRandaoReveal(t, beaconState, privKeys)
|
||||
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot,
|
||||
StateRootHash32: stateRoot[:],
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte("a"),
|
||||
BlockHash32: []byte("b"),
|
||||
},
|
||||
Body: &pb.BeaconBlockBody{
|
||||
Attestations: nil,
|
||||
},
|
||||
}
|
||||
|
||||
initBlockStateRoot(t, block, chainService)
|
||||
|
||||
if err := chainService.beaconDB.SaveJustifiedBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveFinalizedBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := chainService.ReceiveBlock(context.Background(), block); err != nil {
|
||||
t.Errorf("Block failed processing: %v", err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Finished processing beacon block")
|
||||
}
|
||||
|
||||
func TestReceiveBlock_UsesParentBlockState(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
deposits, _ := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveHistoricalState(ctx, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not tree hash state: %v", err)
|
||||
}
|
||||
|
||||
parentHash, genesisBlock := setupGenesisBlock(t, chainService)
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, genesisBlock, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// We ensure the block uses the right state parent if its ancestor is not block.Slot-1.
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 4,
|
||||
StateRootHash32: stateRoot[:],
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: []byte{},
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte("a"),
|
||||
BlockHash32: []byte("b"),
|
||||
},
|
||||
Body: &pb.BeaconBlockBody{
|
||||
Attestations: nil,
|
||||
},
|
||||
}
|
||||
initBlockStateRoot(t, block, chainService)
|
||||
if err := chainService.beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := chainService.ReceiveBlock(context.Background(), block); err != nil {
|
||||
t.Errorf("Block failed processing: %v", err)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Finished processing beacon block")
|
||||
}
|
||||
|
||||
func TestReceiveBlock_DeletesBadBlock(t *testing.T) {
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
EnableCheckBlockStateRoot: false,
|
||||
})
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
deposits, _ := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not tree hash state: %v", err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveHistoricalState(ctx, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
parentHash, genesisBlock := setupGenesisBlock(t, chainService)
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, genesisBlock, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.Slot++
|
||||
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot,
|
||||
StateRootHash32: stateRoot[:],
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: []byte{},
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte("a"),
|
||||
BlockHash32: []byte("b"),
|
||||
},
|
||||
Body: &pb.BeaconBlockBody{
|
||||
Attestations: []*pb.Attestation{
|
||||
{
|
||||
Data: &pb.AttestationData{
|
||||
JustifiedEpoch: params.BeaconConfig().GenesisSlot * 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = chainService.ReceiveBlock(context.Background(), block)
|
||||
switch err.(type) {
|
||||
case *BlockFailedProcessingErr:
|
||||
t.Log("Block failed processing as expected")
|
||||
default:
|
||||
t.Errorf("Unexpected block processing error: %v", err)
|
||||
}
|
||||
|
||||
savedBlock, err := db.Block(blockRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if savedBlock != nil {
|
||||
t.Errorf("Expected bad block to have been deleted, received: %v", savedBlock)
|
||||
}
|
||||
// We also verify the block has been blacklisted.
|
||||
if !db.IsEvilBlockHash(blockRoot) {
|
||||
t.Error("Expected block root to have been blacklisted")
|
||||
}
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
EnableCheckBlockStateRoot: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestReceiveBlock_CheckBlockStateRoot_GoodState(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
attsService := attestation.NewAttestationService(
|
||||
context.Background(),
|
||||
&attestation.Config{BeaconDB: db})
|
||||
chainService := setupBeaconChain(t, db, attsService)
|
||||
deposits, privKeys := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveHistoricalState(ctx, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
parentHash, genesisBlock := setupGenesisBlock(t, chainService)
|
||||
beaconState.Slot++
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, genesisBlock, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.Slot++
|
||||
goodStateBlock := &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot,
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: createRandaoReveal(t, beaconState, privKeys),
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
beaconState.Slot--
|
||||
initBlockStateRoot(t, goodStateBlock, chainService)
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(goodStateBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = chainService.ReceiveBlock(context.Background(), goodStateBlock)
|
||||
if err != nil {
|
||||
t.Fatalf("error exists for good block %v", err)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Executing state transition")
|
||||
}
|
||||
|
||||
func TestReceiveBlock_CheckBlockStateRoot_BadState(t *testing.T) {
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
deposits, privKeys := setupInitialDeposits(t, 100)
|
||||
ctx := context.Background()
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveHistoricalState(ctx, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
parentHash, genesisBlock := setupGenesisBlock(t, chainService)
|
||||
beaconState.Slot++
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, genesisBlock, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.Slot++
|
||||
invalidStateBlock := &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot,
|
||||
StateRootHash32: []byte{'b', 'a', 'd', ' ', 'h', 'a', 's', 'h'},
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: createRandaoReveal(t, beaconState, privKeys),
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
beaconState.Slot--
|
||||
|
||||
_, err = chainService.ReceiveBlock(context.Background(), invalidStateBlock)
|
||||
if err == nil {
|
||||
t.Fatal("no error for wrong block state root")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "beacon state root is not equal to block state root: ") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReceiveBlock_RemovesPendingDeposits(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
attsService := attestation.NewAttestationService(
|
||||
context.Background(),
|
||||
&attestation.Config{BeaconDB: db})
|
||||
chainService := setupBeaconChain(t, db, attsService)
|
||||
deposits, privKeys := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveJustifiedState(beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveFinalizedState(beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not tree hash state: %v", err)
|
||||
}
|
||||
parentHash, genesisBlock := setupGenesisBlock(t, chainService)
|
||||
beaconState.Slot++
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, genesisBlock, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
currentSlot := params.BeaconConfig().GenesisSlot
|
||||
randaoReveal := createRandaoReveal(t, beaconState, privKeys)
|
||||
|
||||
pendingDeposits := []*pb.Deposit{
|
||||
createPreChainStartDeposit(t, []byte{'F'}, beaconState.DepositIndex),
|
||||
}
|
||||
pendingDepositsData := make([][]byte, len(pendingDeposits))
|
||||
for i, pd := range pendingDeposits {
|
||||
pendingDepositsData[i] = pd.DepositData
|
||||
}
|
||||
depositTrie, err := trieutil.GenerateTrieFromItems(pendingDepositsData, int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not generate deposit trie: %v", err)
|
||||
}
|
||||
for i := range pendingDeposits {
|
||||
pendingDeposits[i].MerkleTreeIndex = 0
|
||||
proof, err := depositTrie.MerkleProof(int(pendingDeposits[i].MerkleTreeIndex))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not generate proof: %v", err)
|
||||
}
|
||||
pendingDeposits[i].MerkleProofHash32S = proof
|
||||
}
|
||||
depositRoot := depositTrie.Root()
|
||||
beaconState.LatestEth1Data.DepositRootHash32 = depositRoot[:]
|
||||
if err := db.SaveHistoricalState(context.Background(), beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: currentSlot + 1,
|
||||
StateRootHash32: stateRoot[:],
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte("a"),
|
||||
BlockHash32: []byte("b"),
|
||||
},
|
||||
Body: &pb.BeaconBlockBody{
|
||||
Deposits: pendingDeposits,
|
||||
},
|
||||
}
|
||||
|
||||
beaconState.Slot--
|
||||
beaconState.DepositIndex = 0
|
||||
if err := chainService.beaconDB.SaveState(ctx, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
initBlockStateRoot(t, block, chainService)
|
||||
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
log.Fatalf("could not hash block: %v", err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveJustifiedBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveFinalizedBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, dep := range pendingDeposits {
|
||||
db.InsertPendingDeposit(chainService.ctx, dep, big.NewInt(0))
|
||||
}
|
||||
|
||||
if len(db.PendingDeposits(chainService.ctx, nil)) != len(pendingDeposits) || len(pendingDeposits) == 0 {
|
||||
t.Fatalf("Expected %d pending deposits", len(pendingDeposits))
|
||||
}
|
||||
|
||||
beaconState.Slot--
|
||||
if err := chainService.beaconDB.SaveState(ctx, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveHistoricalState(context.Background(), beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
computedState, err := chainService.ReceiveBlock(context.Background(), block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i := 0; i < len(beaconState.ValidatorRegistry); i++ {
|
||||
pubKey := bytesutil.ToBytes48(beaconState.ValidatorRegistry[i].Pubkey)
|
||||
attsService.InsertAttestationIntoStore(pubKey, &pb.Attestation{
|
||||
Data: &pb.AttestationData{
|
||||
BeaconBlockRootHash32: blockRoot[:],
|
||||
}},
|
||||
)
|
||||
}
|
||||
if err := chainService.ApplyForkChoiceRule(context.Background(), block, computedState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(db.PendingDeposits(chainService.ctx, nil)) != 0 {
|
||||
t.Fatalf("Expected 0 pending deposits, but there are %+v", db.PendingDeposits(chainService.ctx, nil))
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Executing state transition")
|
||||
}
|
||||
|
||||
// Scenario graph: http://bit.ly/2K1k2KZ
|
||||
//
|
||||
//digraph G {
|
||||
// rankdir=LR;
|
||||
// node [shape="none"];
|
||||
//
|
||||
// subgraph blocks {
|
||||
// rankdir=LR;
|
||||
// node [shape="box"];
|
||||
// a->b;
|
||||
// b->c;
|
||||
// c->e;
|
||||
// c->f;
|
||||
// f->g;
|
||||
// e->h;
|
||||
// }
|
||||
//
|
||||
// { rank=same; 1; a;}
|
||||
// { rank=same; 2; b;}
|
||||
// { rank=same; 3; c;}
|
||||
// { rank=same; 5; e;}
|
||||
// { rank=same; 6; f;}
|
||||
// { rank=same; 7; g;}
|
||||
// { rank=same; 8; h;}
|
||||
//
|
||||
// 1->2->3->4->5->6->7->8->9[arrowhead=none];
|
||||
//}
|
||||
func TestReceiveBlock_OnChainSplit(t *testing.T) {
|
||||
// The scenario to test is that we think that the canonical head is block H
|
||||
// and then we receive block G. We don't have block F, so we request it. Then
|
||||
// we process F, the G. The expected behavior is that we load the historical
|
||||
// state from slot 3 where the common ancestor block C is present.
|
||||
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
deposits, privKeys := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not tree hash state: %v", err)
|
||||
}
|
||||
parentHash, genesisBlock := setupGenesisBlock(t, chainService)
|
||||
if err := db.UpdateChainHead(ctx, genesisBlock, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveFinalizedState(beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genesisSlot := params.BeaconConfig().GenesisSlot
|
||||
|
||||
// Top chain slots (see graph)
|
||||
blockSlots := []uint64{1, 2, 3, 5, 8}
|
||||
for _, slot := range blockSlots {
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: genesisSlot + slot,
|
||||
StateRootHash32: stateRoot[:],
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: createRandaoReveal(t, beaconState, privKeys),
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
initBlockStateRoot(t, block, chainService)
|
||||
computedState, err := chainService.ReceiveBlock(ctx, block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stateRoot, err = hashutil.HashProto(computedState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = db.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = db.UpdateChainHead(ctx, block, computedState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
parentHash, err = hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Common ancestor is block at slot 3
|
||||
commonAncestor, err := db.BlockBySlot(ctx, genesisSlot+3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
parentHash, err = hashutil.HashBeaconBlock(commonAncestor)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState, err = db.HistoricalStateFromSlot(ctx, commonAncestor.Slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stateRoot, err = hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Then we receive the block `f` from slot 6
|
||||
blockF := &pb.BeaconBlock{
|
||||
Slot: genesisSlot + 6,
|
||||
ParentRootHash32: parentHash[:],
|
||||
StateRootHash32: stateRoot[:],
|
||||
RandaoReveal: createRandaoReveal(t, beaconState, privKeys),
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
initBlockStateRoot(t, blockF, chainService)
|
||||
|
||||
computedState, err := chainService.ReceiveBlock(ctx, blockF)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateRoot, err = hashutil.HashProto(computedState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.SaveBlock(blockF); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
parentHash, err = hashutil.HashBeaconBlock(blockF)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then we apply block `g` from slot 7
|
||||
blockG := &pb.BeaconBlock{
|
||||
Slot: genesisSlot + 7,
|
||||
ParentRootHash32: parentHash[:],
|
||||
StateRootHash32: stateRoot[:],
|
||||
RandaoReveal: createRandaoReveal(t, computedState, privKeys),
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
initBlockStateRoot(t, blockG, chainService)
|
||||
|
||||
computedState, err = chainService.ReceiveBlock(ctx, blockG)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if computedState.Slot != blockG.Slot {
|
||||
t.Errorf("Unexpect state slot %d, wanted %d", computedState.Slot, blockG.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsBlockReadyForProcessing_ValidBlock(t *testing.T) {
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
unixTime := uint64(time.Now().Unix())
|
||||
deposits, privKeys := setupInitialDeposits(t, 100)
|
||||
if err := db.InitializeState(context.Background(), unixTime, deposits, &pb.Eth1Data{}); err != nil {
|
||||
t.Fatalf("Could not initialize beacon state to disk: %v", err)
|
||||
}
|
||||
beaconState, err := db.HeadState(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't get genesis state: %v", err)
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
ParentRootHash32: []byte{'a'},
|
||||
}
|
||||
|
||||
if err := chainService.VerifyBlockValidity(ctx, block, beaconState); err == nil {
|
||||
t.Fatal("block processing succeeded despite block having no parent saved")
|
||||
}
|
||||
|
||||
beaconState.Slot = params.BeaconConfig().GenesisSlot + 10
|
||||
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not tree hash state: %v", err)
|
||||
}
|
||||
genesis := b.NewGenesisBlock([]byte{})
|
||||
if err := chainService.beaconDB.SaveBlock(genesis); err != nil {
|
||||
t.Fatalf("cannot save block: %v", err)
|
||||
}
|
||||
parentRoot, err := hashutil.HashBeaconBlock(genesis)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get root of canonical head: %v", err)
|
||||
}
|
||||
|
||||
beaconState.LatestEth1Data = &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{2},
|
||||
BlockHash32: []byte{3},
|
||||
}
|
||||
beaconState.Slot = params.BeaconConfig().GenesisSlot
|
||||
|
||||
currentSlot := params.BeaconConfig().GenesisSlot + 1
|
||||
attestationSlot := params.BeaconConfig().GenesisSlot
|
||||
|
||||
randaoReveal := createRandaoReveal(t, beaconState, privKeys)
|
||||
block2 := &pb.BeaconBlock{
|
||||
Slot: currentSlot,
|
||||
StateRootHash32: stateRoot[:],
|
||||
ParentRootHash32: parentRoot[:],
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte("a"),
|
||||
BlockHash32: []byte("b"),
|
||||
},
|
||||
Body: &pb.BeaconBlockBody{
|
||||
Attestations: []*pb.Attestation{{
|
||||
AggregationBitfield: []byte{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: attestationSlot,
|
||||
JustifiedBlockRootHash32: parentRoot[:],
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
if err := chainService.VerifyBlockValidity(ctx, block2, beaconState); err != nil {
|
||||
t.Fatalf("block processing failed despite being a valid block: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteValidatorIdx_DeleteWorks(t *testing.T) {
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
epoch := uint64(2)
|
||||
v.InsertActivatedVal(epoch+1, []uint64{0, 1, 2})
|
||||
v.InsertExitedVal(epoch+1, []uint64{0, 2})
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 3; i++ {
|
||||
pubKeyBuf := make([]byte, params.BeaconConfig().BLSPubkeyLength)
|
||||
binary.PutUvarint(pubKeyBuf, uint64(i))
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: pubKeyBuf,
|
||||
})
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: validators,
|
||||
Slot: epoch * params.BeaconConfig().SlotsPerEpoch,
|
||||
}
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
if err := chainService.saveValidatorIdx(state); err != nil {
|
||||
t.Fatalf("Could not save validator idx: %v", err)
|
||||
}
|
||||
if err := chainService.deleteValidatorIdx(state); err != nil {
|
||||
t.Fatalf("Could not delete validator idx: %v", err)
|
||||
}
|
||||
wantedIdx := uint64(1)
|
||||
idx, err := chainService.beaconDB.ValidatorIndex(validators[wantedIdx].Pubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not get validator index: %v", err)
|
||||
}
|
||||
if wantedIdx != idx {
|
||||
t.Errorf("Wanted: %d, got: %d", wantedIdx, idx)
|
||||
}
|
||||
|
||||
wantedIdx = uint64(2)
|
||||
if chainService.beaconDB.HasValidator(validators[wantedIdx].Pubkey) {
|
||||
t.Errorf("Validator index %d should have been deleted", wantedIdx)
|
||||
}
|
||||
if v.ExitedValFromEpoch(epoch) != nil {
|
||||
t.Errorf("Activated validators mapping for epoch %d still there", epoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveValidatorIdx_SaveRetrieveWorks(t *testing.T) {
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
epoch := uint64(1)
|
||||
v.InsertActivatedVal(epoch+1, []uint64{0, 1, 2})
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 3; i++ {
|
||||
pubKeyBuf := make([]byte, params.BeaconConfig().BLSPubkeyLength)
|
||||
binary.PutUvarint(pubKeyBuf, uint64(i))
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: pubKeyBuf,
|
||||
})
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: validators,
|
||||
Slot: epoch * params.BeaconConfig().SlotsPerEpoch,
|
||||
}
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
if err := chainService.saveValidatorIdx(state); err != nil {
|
||||
t.Fatalf("Could not save validator idx: %v", err)
|
||||
}
|
||||
|
||||
wantedIdx := uint64(2)
|
||||
idx, err := chainService.beaconDB.ValidatorIndex(validators[wantedIdx].Pubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not get validator index: %v", err)
|
||||
}
|
||||
if wantedIdx != idx {
|
||||
t.Errorf("Wanted: %d, got: %d", wantedIdx, idx)
|
||||
}
|
||||
|
||||
if v.ActivatedValFromEpoch(epoch) != nil {
|
||||
t.Errorf("Activated validators mapping for epoch %d still there", epoch)
|
||||
}
|
||||
}
|
||||
120
beacon-chain/blockchain/chain_info.go
Normal file
120
beacon-chain/blockchain/chain_info.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// ChainInfoFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves chain info related data.
|
||||
type ChainInfoFetcher interface {
|
||||
HeadFetcher
|
||||
CanonicalRootFetcher
|
||||
FinalizationFetcher
|
||||
}
|
||||
|
||||
// GenesisTimeFetcher retrieves the Eth2 genesis timestamp.
|
||||
type GenesisTimeFetcher interface {
|
||||
GenesisTime() time.Time
|
||||
}
|
||||
|
||||
// HeadFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves head related data.
|
||||
type HeadFetcher interface {
|
||||
HeadSlot() uint64
|
||||
HeadRoot() []byte
|
||||
HeadBlock() *ethpb.BeaconBlock
|
||||
HeadState() *pb.BeaconState
|
||||
}
|
||||
|
||||
// CanonicalRootFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves canonical roots related data.
|
||||
type CanonicalRootFetcher interface {
|
||||
CanonicalRoot(slot uint64) []byte
|
||||
}
|
||||
|
||||
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
|
||||
type ForkFetcher interface {
|
||||
CurrentFork() *pb.Fork
|
||||
}
|
||||
|
||||
// FinalizationFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves finalization related data.
|
||||
type FinalizationFetcher interface {
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint tracked in fork choice service.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.forkChoiceStore.FinalizedCheckpt()
|
||||
if cp != nil {
|
||||
return cp
|
||||
}
|
||||
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
// HeadSlot returns the slot of the head of the chain.
|
||||
func (s *Service) HeadSlot() uint64 {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.headSlot
|
||||
}
|
||||
|
||||
// HeadRoot returns the root of the head of the chain.
|
||||
func (s *Service) HeadRoot() []byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
root := s.canonicalRoots[s.headSlot]
|
||||
if len(root) != 0 {
|
||||
return root
|
||||
}
|
||||
|
||||
return params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
|
||||
// HeadBlock returns the head block of the chain.
|
||||
func (s *Service) HeadBlock() *ethpb.BeaconBlock {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return proto.Clone(s.headBlock).(*ethpb.BeaconBlock)
|
||||
}
|
||||
|
||||
// HeadState returns the head state of the chain.
|
||||
func (s *Service) HeadState() *pb.BeaconState {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return proto.Clone(s.headState).(*pb.BeaconState)
|
||||
}
|
||||
|
||||
// CanonicalRoot returns the canonical root of a given slot.
|
||||
func (s *Service) CanonicalRoot(slot uint64) []byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.canonicalRoots[slot]
|
||||
}
|
||||
|
||||
// GenesisTime returns the genesis time of beacon chain.
|
||||
func (s *Service) GenesisTime() time.Time {
|
||||
return s.genesisTime
|
||||
}
|
||||
|
||||
// CurrentFork retrieves the latest fork information of the beacon chain.
|
||||
func (s *Service) CurrentFork() *pb.Fork {
|
||||
if s.headState == nil {
|
||||
return &pb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
}
|
||||
return proto.Clone(s.headState.Fork).(*pb.Fork)
|
||||
}
|
||||
77
beacon-chain/blockchain/chain_info_norace_test.go
Normal file
77
beacon-chain/blockchain/chain_info_norace_test.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
)
|
||||
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.HeadSlot()
|
||||
}
|
||||
|
||||
func TestHeadRoot_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.HeadRoot()
|
||||
}
|
||||
|
||||
func TestHeadBlock_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.HeadBlock()
|
||||
}
|
||||
|
||||
func TestHeadState_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.HeadState()
|
||||
}
|
||||
109
beacon-chain/blockchain/chain_info_test.go
Normal file
109
beacon-chain/blockchain/chain_info_test.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// Ensure Service implements chain info interface.
|
||||
var _ = ChainInfoFetcher(&Service{})
|
||||
var _ = GenesisTimeFetcher(&Service{})
|
||||
var _ = ForkFetcher(&Service{})
|
||||
|
||||
func TestFinalizedCheckpt_Nil(t *testing.T) {
|
||||
c := setupBeaconChain(t, nil)
|
||||
if !bytes.Equal(c.FinalizedCheckpt().Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
t.Error("Incorrect pre chain start value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
c := setupBeaconChain(t, nil)
|
||||
if !bytes.Equal(c.HeadRoot(), params.BeaconConfig().ZeroHash[:]) {
|
||||
t.Error("Incorrect pre chain start value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
c := setupBeaconChain(t, db)
|
||||
|
||||
if err := c.forkChoiceStore.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if c.FinalizedCheckpt().Epoch != 0 {
|
||||
t.Errorf("Finalized epoch at genesis should be 0, got: %d", c.FinalizedCheckpt().Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
c.headSlot = 100
|
||||
if c.HeadSlot() != 100 {
|
||||
t.Errorf("Wanted head slot: %d, got: %d", 100, c.HeadSlot())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{canonicalRoots: make(map[uint64][]byte)}
|
||||
c.headSlot = 100
|
||||
c.canonicalRoots[c.headSlot] = []byte{'A'}
|
||||
if !bytes.Equal([]byte{'A'}, c.HeadRoot()) {
|
||||
t.Errorf("Wanted head root: %v, got: %d", []byte{'A'}, c.HeadRoot())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
b := ðpb.BeaconBlock{Slot: 1}
|
||||
c := &Service{headBlock: b}
|
||||
if !reflect.DeepEqual(b, c.HeadBlock()) {
|
||||
t.Error("incorrect head block received")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
s := &pb.BeaconState{Slot: 2}
|
||||
c := &Service{headState: s}
|
||||
if !reflect.DeepEqual(s, c.HeadState()) {
|
||||
t.Error("incorrect head state received")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenesisTime_CanRetrieve(t *testing.T) {
|
||||
c := &Service{genesisTime: time.Unix(999, 0)}
|
||||
wanted := time.Unix(999, 0)
|
||||
if c.GenesisTime() != wanted {
|
||||
t.Error("Did not get wanted genesis time")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||
f := &pb.Fork{Epoch: 999}
|
||||
s := &pb.BeaconState{Fork: f}
|
||||
c := &Service{headState: s}
|
||||
if !reflect.DeepEqual(c.CurrentFork(), f) {
|
||||
t.Error("Recieved incorrect fork version")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanonicalRoot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{canonicalRoots: make(map[uint64][]byte)}
|
||||
slot := uint64(123)
|
||||
r := []byte{'B'}
|
||||
c.canonicalRoots[slot] = r
|
||||
if !bytes.Equal(r, c.CanonicalRoot(slot)) {
|
||||
t.Errorf("Wanted head root: %v, got: %d", []byte{'A'}, c.CanonicalRoot(slot))
|
||||
}
|
||||
}
|
||||
@@ -1,470 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
reorgCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "reorg_counter",
|
||||
Help: "The number of chain reorganization events that have happened in the fork choice rule",
|
||||
})
|
||||
)
|
||||
var blkAncestorCache = cache.NewBlockAncestorCache()
|
||||
|
||||
// ForkChoice interface defines the methods for applying fork choice rule
|
||||
// operations to the blockchain.
|
||||
type ForkChoice interface {
|
||||
ApplyForkChoiceRule(ctx context.Context, block *pb.BeaconBlock, computedState *pb.BeaconState) error
|
||||
}
|
||||
|
||||
// updateFFGCheckPts checks whether the existing FFG check points saved in DB
|
||||
// are not older than the ones just processed in state. If it's older, we update
|
||||
// the db with the latest FFG check points, both justification and finalization.
|
||||
func (c *ChainService) updateFFGCheckPts(ctx context.Context, state *pb.BeaconState) error {
|
||||
lastJustifiedSlot := helpers.StartSlot(state.JustifiedEpoch)
|
||||
savedJustifiedBlock, err := c.beaconDB.JustifiedBlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the last processed justification slot in state is greater than
|
||||
// the slot of justified block saved in DB.
|
||||
if lastJustifiedSlot > savedJustifiedBlock.Slot {
|
||||
// Retrieve the new justified block from DB using the new justified slot and save it.
|
||||
newJustifiedBlock, err := c.beaconDB.BlockBySlot(ctx, lastJustifiedSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the new justified slot is a skip slot in db then we keep getting it's ancestors
|
||||
// until we can get a block.
|
||||
lastAvailBlkSlot := lastJustifiedSlot
|
||||
for newJustifiedBlock == nil {
|
||||
log.WithField("slot", lastAvailBlkSlot-params.BeaconConfig().GenesisSlot).Debug("Missing block in DB, looking one slot back")
|
||||
lastAvailBlkSlot--
|
||||
newJustifiedBlock, err = c.beaconDB.BlockBySlot(ctx, lastAvailBlkSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch justified state from historical states db.
|
||||
newJustifiedState, err := c.beaconDB.HistoricalStateFromSlot(ctx, newJustifiedBlock.Slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.beaconDB.SaveJustifiedBlock(newJustifiedBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.beaconDB.SaveJustifiedState(newJustifiedState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
lastFinalizedSlot := helpers.StartSlot(state.FinalizedEpoch)
|
||||
savedFinalizedBlock, err := c.beaconDB.FinalizedBlock()
|
||||
// If the last processed finalized slot in state is greater than
|
||||
// the slot of finalized block saved in DB.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if lastFinalizedSlot > savedFinalizedBlock.Slot {
|
||||
// Retrieve the new finalized block from DB using the new finalized slot and save it.
|
||||
newFinalizedBlock, err := c.beaconDB.BlockBySlot(ctx, lastFinalizedSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the new finalized slot is a skip slot in db then we keep getting it's ancestors
|
||||
// until we can get a block.
|
||||
lastAvailBlkSlot := lastFinalizedSlot
|
||||
for newFinalizedBlock == nil {
|
||||
log.WithField("slot", lastAvailBlkSlot-params.BeaconConfig().GenesisSlot).Debug("Missing block in DB, looking one slot back")
|
||||
lastAvailBlkSlot--
|
||||
newFinalizedBlock, err = c.beaconDB.BlockBySlot(ctx, lastAvailBlkSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Generate the new finalized state with using new finalized block and
|
||||
// save it.
|
||||
newFinalizedState, err := c.beaconDB.HistoricalStateFromSlot(ctx, lastFinalizedSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.beaconDB.SaveFinalizedBlock(newFinalizedBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.beaconDB.SaveFinalizedState(newFinalizedState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyForkChoiceRule determines the current beacon chain head using LMD
|
||||
// GHOST as a block-vote weighted function to select a canonical head in
|
||||
// Ethereum Serenity. The inputs are the the recently processed block and its
|
||||
// associated state.
|
||||
func (c *ChainService) ApplyForkChoiceRule(
|
||||
ctx context.Context,
|
||||
block *pb.BeaconBlock,
|
||||
postState *pb.BeaconState,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ApplyForkChoiceRule")
|
||||
defer span.End()
|
||||
log.Info("Applying LMD-GHOST Fork Choice Rule")
|
||||
|
||||
justifiedState, err := c.beaconDB.JustifiedState()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve justified state: %v", err)
|
||||
}
|
||||
attestationTargets, err := c.attestationTargets(justifiedState)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve attestation target: %v", err)
|
||||
}
|
||||
justifiedHead, err := c.beaconDB.JustifiedBlock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve justified head: %v", err)
|
||||
}
|
||||
|
||||
newHead, err := c.lmdGhost(ctx, justifiedHead, justifiedState, attestationTargets)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not run fork choice: %v", err)
|
||||
}
|
||||
newHeadRoot, err := hashutil.HashBeaconBlock(newHead)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not hash head block: %v", err)
|
||||
}
|
||||
c.canonicalBlocksLock.Lock()
|
||||
defer c.canonicalBlocksLock.Unlock()
|
||||
c.canonicalBlocks[newHead.Slot] = newHeadRoot[:]
|
||||
|
||||
currentHead, err := c.beaconDB.ChainHead()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve chain head: %v", err)
|
||||
}
|
||||
|
||||
isDescendant, err := c.isDescendant(currentHead, newHead)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not check if block is descendant: %v", err)
|
||||
}
|
||||
|
||||
newState := postState
|
||||
if !isDescendant {
|
||||
log.Warnf("Reorg happened, last head at slot %d, new head block at slot %d",
|
||||
currentHead.Slot-params.BeaconConfig().GenesisSlot, newHead.Slot-params.BeaconConfig().GenesisSlot)
|
||||
|
||||
// Only regenerate head state if there was a reorg.
|
||||
newState, err = c.beaconDB.HistoricalStateFromSlot(ctx, newHead.Slot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not gen state: %v", err)
|
||||
}
|
||||
|
||||
for revertedSlot := currentHead.Slot; revertedSlot > newHead.Slot; revertedSlot-- {
|
||||
delete(c.canonicalBlocks, revertedSlot)
|
||||
}
|
||||
reorgCount.Inc()
|
||||
}
|
||||
|
||||
// If we receive forked blocks.
|
||||
if newHead.Slot != newState.Slot {
|
||||
newState, err = c.beaconDB.HistoricalStateFromSlot(ctx, newHead.Slot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not gen state: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.beaconDB.UpdateChainHead(ctx, newHead, newState); err != nil {
|
||||
return fmt.Errorf("failed to update chain: %v", err)
|
||||
}
|
||||
h, err := hashutil.HashBeaconBlock(newHead)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not hash head: %v", err)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"headRoot": fmt.Sprintf("%#x", bytesutil.Trunc(h[:])),
|
||||
"headSlot": newHead.Slot - params.BeaconConfig().GenesisSlot,
|
||||
"stateSlot": newState.Slot - params.BeaconConfig().GenesisSlot,
|
||||
}).Info("Chain head block and state updated")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// lmdGhost applies the Latest Message Driven, Greediest Heaviest Observed Sub-Tree
|
||||
// fork-choice rule defined in the Ethereum Serenity specification for the beacon chain.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) -> BeaconBlock:
|
||||
// """
|
||||
// Execute the LMD-GHOST algorithm to find the head ``BeaconBlock``.
|
||||
// """
|
||||
// validators = start_state.validator_registry
|
||||
// active_validator_indices = get_active_validator_indices(validators, slot_to_epoch(start_state.slot))
|
||||
// attestation_targets = [
|
||||
// (validator_index, get_latest_attestation_target(store, validator_index))
|
||||
// for validator_index in active_validator_indices
|
||||
// ]
|
||||
//
|
||||
// def get_vote_count(block: BeaconBlock) -> int:
|
||||
// return sum(
|
||||
// get_effective_balance(start_state.validator_balances[validator_index]) // FORK_CHOICE_BALANCE_INCREMENT
|
||||
// for validator_index, target in attestation_targets
|
||||
// if get_ancestor(store, target, block.slot) == block
|
||||
// )
|
||||
//
|
||||
// head = start_block
|
||||
// while 1:
|
||||
// children = get_children(store, head)
|
||||
// if len(children) == 0:
|
||||
// return head
|
||||
// head = max(children, key=get_vote_count)
|
||||
func (c *ChainService) lmdGhost(
|
||||
ctx context.Context,
|
||||
startBlock *pb.BeaconBlock,
|
||||
startState *pb.BeaconState,
|
||||
voteTargets map[uint64]*pb.AttestationTarget,
|
||||
) (*pb.BeaconBlock, error) {
|
||||
highestSlot := c.beaconDB.HighestBlockSlot()
|
||||
head := startBlock
|
||||
for {
|
||||
children, err := c.blockChildren(ctx, head, highestSlot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch block children: %v", err)
|
||||
}
|
||||
if len(children) == 0 {
|
||||
return head, nil
|
||||
}
|
||||
maxChild := children[0]
|
||||
|
||||
maxChildVotes, err := VoteCount(maxChild, startState, voteTargets, c.beaconDB)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to determine vote count for block: %v", err)
|
||||
}
|
||||
for i := 1; i < len(children); i++ {
|
||||
candidateChildVotes, err := VoteCount(children[i], startState, voteTargets, c.beaconDB)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to determine vote count for block: %v", err)
|
||||
}
|
||||
maxChildRoot, err := hashutil.HashBeaconBlock(maxChild)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
candidateChildRoot, err := hashutil.HashBeaconBlock(children[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if candidateChildVotes > maxChildVotes ||
|
||||
(candidateChildVotes == maxChildVotes && bytesutil.LowerThan(maxChildRoot[:], candidateChildRoot[:])) {
|
||||
maxChild = children[i]
|
||||
}
|
||||
}
|
||||
head = maxChild
|
||||
}
|
||||
}
|
||||
|
||||
// blockChildren returns the child blocks of the given block up to a given
|
||||
// highest slot.
|
||||
//
|
||||
// ex:
|
||||
// /- C - E
|
||||
// A - B - D - F
|
||||
// \- G
|
||||
// Input: B. Output: [C, D, G]
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// get_children(store: Store, block: BeaconBlock) -> List[BeaconBlock]
|
||||
// returns the child blocks of the given block.
|
||||
func (c *ChainService) blockChildren(ctx context.Context, block *pb.BeaconBlock, highestSlot uint64) ([]*pb.BeaconBlock, error) {
|
||||
var children []*pb.BeaconBlock
|
||||
|
||||
currentRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not tree hash incoming block: %v", err)
|
||||
}
|
||||
startSlot := block.Slot + 1
|
||||
for i := startSlot; i <= highestSlot; i++ {
|
||||
block, err := c.beaconDB.BlockBySlot(ctx, i)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get block by slot: %v", err)
|
||||
}
|
||||
// Continue if there's a skip block.
|
||||
if block == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
parentRoot := bytesutil.ToBytes32(block.ParentRootHash32)
|
||||
if currentRoot == parentRoot {
|
||||
children = append(children, block)
|
||||
}
|
||||
}
|
||||
return children, nil
|
||||
}
|
||||
|
||||
// isDescendant checks if the new head block is a descendant block of the current head.
|
||||
func (c *ChainService) isDescendant(currentHead *pb.BeaconBlock, newHead *pb.BeaconBlock) (bool, error) {
|
||||
currentHeadRoot, err := hashutil.HashBeaconBlock(currentHead)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
for newHead.Slot > currentHead.Slot {
|
||||
if bytesutil.ToBytes32(newHead.ParentRootHash32) == currentHeadRoot {
|
||||
return true, nil
|
||||
}
|
||||
newHead, err = c.beaconDB.Block(bytesutil.ToBytes32(newHead.ParentRootHash32))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newHead == nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// attestationTargets retrieves the list of attestation targets since last finalized epoch,
|
||||
// each attestation target consists of validator index and its attestation target (i.e. the block
|
||||
// which the validator attested to)
|
||||
func (c *ChainService) attestationTargets(state *pb.BeaconState) (map[uint64]*pb.AttestationTarget, error) {
|
||||
indices := helpers.ActiveValidatorIndices(state.ValidatorRegistry, helpers.CurrentEpoch(state))
|
||||
attestationTargets := make(map[uint64]*pb.AttestationTarget)
|
||||
for i, index := range indices {
|
||||
target, err := c.attsService.LatestAttestationTarget(state, index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not retrieve attestation target: %v", err)
|
||||
}
|
||||
if target == nil {
|
||||
continue
|
||||
}
|
||||
attestationTargets[uint64(i)] = target
|
||||
}
|
||||
return attestationTargets, nil
|
||||
}
|
||||
|
||||
// VoteCount determines the number of votes on a beacon block by counting the number
|
||||
// of target blocks that have such beacon block as a common ancestor.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_vote_count(block: BeaconBlock) -> int:
|
||||
// return sum(
|
||||
// get_effective_balance(start_state.validator_balances[validator_index]) // FORK_CHOICE_BALANCE_INCREMENT
|
||||
// for validator_index, target in attestation_targets
|
||||
// if get_ancestor(store, target, block.slot) == block
|
||||
// )
|
||||
func VoteCount(block *pb.BeaconBlock, state *pb.BeaconState, targets map[uint64]*pb.AttestationTarget, beaconDB *db.BeaconDB) (int, error) {
|
||||
balances := 0
|
||||
var ancestorRoot []byte
|
||||
var err error
|
||||
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for validatorIndex, target := range targets {
|
||||
ancestorRoot, err = cachedAncestor(target, block.Slot, beaconDB)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// This covers the following case, we start at B5, and want to process B6 and B7
|
||||
// B6 can be processed, B7 can not be processed because it's pointed to the
|
||||
// block older than current block 5.
|
||||
// B4 - B5 - B6
|
||||
// \ - - - - - B7
|
||||
if ancestorRoot == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if bytes.Equal(blockRoot[:], ancestorRoot) {
|
||||
balances += int(helpers.EffectiveBalance(state, validatorIndex))
|
||||
}
|
||||
}
|
||||
return balances, nil
|
||||
}
|
||||
|
||||
// BlockAncestor obtains the ancestor at of a block at a certain slot.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock:
|
||||
// """
|
||||
// Get the ancestor of ``block`` with slot number ``slot``; return ``None`` if not found.
|
||||
// """
|
||||
// if block.slot == slot:
|
||||
// return block
|
||||
// elif block.slot < slot:
|
||||
// return None
|
||||
// else:
|
||||
// return get_ancestor(store, store.get_parent(block), slot)
|
||||
func BlockAncestor(targetBlock *pb.AttestationTarget, slot uint64, beaconDB *db.BeaconDB) ([]byte, error) {
|
||||
if targetBlock.Slot == slot {
|
||||
return targetBlock.BlockRoot[:], nil
|
||||
}
|
||||
if targetBlock.Slot < slot {
|
||||
return nil, nil
|
||||
}
|
||||
parentRoot := bytesutil.ToBytes32(targetBlock.ParentRoot)
|
||||
parent, err := beaconDB.Block(parentRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get parent block: %v", err)
|
||||
}
|
||||
if parent == nil {
|
||||
return nil, fmt.Errorf("parent block does not exist: %v", err)
|
||||
}
|
||||
newTarget := &pb.AttestationTarget{
|
||||
Slot: parent.Slot,
|
||||
BlockRoot: parentRoot[:],
|
||||
ParentRoot: parent.ParentRootHash32,
|
||||
}
|
||||
return BlockAncestor(newTarget, slot, beaconDB)
|
||||
}
|
||||
|
||||
// cachedAncestor retrieves the cached ancestor target from block ancestor cache,
|
||||
// if it's not there it looks up the block tree get it and cache it.
|
||||
func cachedAncestor(target *pb.AttestationTarget, height uint64, beaconDB *db.BeaconDB) ([]byte, error) {
|
||||
// check if the ancestor block of from a given block height was cached.
|
||||
cachedAncestorInfo, err := blkAncestorCache.AncestorBySlot(target.BlockRoot, height)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
if cachedAncestorInfo != nil {
|
||||
return cachedAncestorInfo.Target.BlockRoot, nil
|
||||
}
|
||||
|
||||
ancestorRoot, err := BlockAncestor(target, height, beaconDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ancestor, err := beaconDB.Block(bytesutil.ToBytes32(ancestorRoot))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ancestor == nil {
|
||||
return nil, nil
|
||||
}
|
||||
ancestorTarget := &pb.AttestationTarget{
|
||||
Slot: ancestor.Slot,
|
||||
BlockRoot: ancestorRoot,
|
||||
ParentRoot: ancestor.ParentRootHash32,
|
||||
}
|
||||
if err := blkAncestorCache.AddBlockAncestor(&cache.AncestorInfo{
|
||||
Height: height,
|
||||
Hash: target.BlockRoot,
|
||||
Target: ancestorTarget,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ancestorRoot, nil
|
||||
}
|
||||
@@ -1,225 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/internal"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
type mockAttestationHandler struct {
|
||||
targets map[uint64]*pb.AttestationTarget
|
||||
}
|
||||
|
||||
func (m *mockAttestationHandler) LatestAttestationTarget(beaconState *pb.BeaconState, idx uint64) (*pb.AttestationTarget, error) {
|
||||
return m.targets[idx], nil
|
||||
}
|
||||
|
||||
func (m *mockAttestationHandler) BatchUpdateLatestAttestation(ctx context.Context, atts []*pb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestApplyForkChoice_ChainSplitReorg(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
|
||||
ctx := context.Background()
|
||||
deposits, _ := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
justifiedState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB, nil)
|
||||
|
||||
// Construct a forked chain that looks as follows:
|
||||
// /------B1 ----B3 ----- B5 (current head)
|
||||
// B0 --B2 -------------B4
|
||||
blocks, roots := constructForkedChain(t, justifiedState)
|
||||
|
||||
// We then setup a canonical chain of the following blocks:
|
||||
// B0->B1->B3->B5.
|
||||
if err := chainService.beaconDB.SaveBlock(blocks[0]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
justifiedState.LatestBlock = blocks[0]
|
||||
if err := chainService.beaconDB.SaveJustifiedState(justifiedState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveJustifiedBlock(blocks[0]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, blocks[0], justifiedState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
canonicalBlockIndices := []int{1, 3, 5}
|
||||
postState := proto.Clone(justifiedState).(*pb.BeaconState)
|
||||
for _, canonicalIndex := range canonicalBlockIndices {
|
||||
postState, err = chainService.ApplyBlockStateTransition(ctx, blocks[canonicalIndex], postState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveBlock(blocks[canonicalIndex]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, blocks[canonicalIndex], postState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
chainHead, err := chainService.beaconDB.ChainHead()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if chainHead.Slot != justifiedState.Slot+5 {
|
||||
t.Errorf(
|
||||
"Expected chain head with slot %d, received %d",
|
||||
justifiedState.Slot+5-params.BeaconConfig().GenesisSlot,
|
||||
chainHead.Slot-params.BeaconConfig().GenesisSlot,
|
||||
)
|
||||
}
|
||||
|
||||
// We then save forked blocks and their historical states (but do not update chain head).
|
||||
// The fork is from B0->B2->B4.
|
||||
forkedBlockIndices := []int{2, 4}
|
||||
forkState := proto.Clone(justifiedState).(*pb.BeaconState)
|
||||
for _, forkIndex := range forkedBlockIndices {
|
||||
forkState, err = chainService.ApplyBlockStateTransition(ctx, blocks[forkIndex], forkState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveBlock(blocks[forkIndex]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveHistoricalState(ctx, forkState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Give the block from the forked chain, B4, the most votes.
|
||||
voteTargets := make(map[uint64]*pb.AttestationTarget)
|
||||
voteTargets[0] = &pb.AttestationTarget{
|
||||
Slot: blocks[5].Slot,
|
||||
BlockRoot: roots[5][:],
|
||||
ParentRoot: blocks[5].ParentRootHash32,
|
||||
}
|
||||
for i := 1; i < len(deposits); i++ {
|
||||
voteTargets[uint64(i)] = &pb.AttestationTarget{
|
||||
Slot: blocks[4].Slot,
|
||||
BlockRoot: roots[4][:],
|
||||
ParentRoot: blocks[4].ParentRootHash32,
|
||||
}
|
||||
}
|
||||
attHandler := &mockAttestationHandler{
|
||||
targets: voteTargets,
|
||||
}
|
||||
chainService.attsService = attHandler
|
||||
|
||||
block4State, err := chainService.beaconDB.HistoricalStateFromSlot(ctx, blocks[4].Slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Applying the fork choice rule should reorg to B4 successfully.
|
||||
if err := chainService.ApplyForkChoiceRule(ctx, blocks[4], block4State); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newHead, err := chainService.beaconDB.ChainHead()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !proto.Equal(newHead, blocks[4]) {
|
||||
t.Errorf(
|
||||
"Expected chain head %v, received %v",
|
||||
blocks[4],
|
||||
newHead,
|
||||
)
|
||||
}
|
||||
want := fmt.Sprintf(
|
||||
"Reorg happened, last head at slot %d, new head block at slot %d",
|
||||
blocks[5].Slot-params.BeaconConfig().GenesisSlot, blocks[4].Slot-params.BeaconConfig().GenesisSlot,
|
||||
)
|
||||
testutil.AssertLogsContain(t, hook, want)
|
||||
}
|
||||
|
||||
func constructForkedChain(t *testing.T, beaconState *pb.BeaconState) ([]*pb.BeaconBlock, [][32]byte) {
|
||||
// Construct the following chain:
|
||||
// /------B1 ----B3 ----- B5 (current head)
|
||||
// B0 --B2 -------------B4
|
||||
blocks := make([]*pb.BeaconBlock, 6)
|
||||
roots := make([][32]byte, 6)
|
||||
var err error
|
||||
blocks[0] = &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot,
|
||||
ParentRootHash32: []byte{'A'},
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
roots[0], err = hashutil.HashBeaconBlock(blocks[0])
|
||||
if err != nil {
|
||||
t.Fatalf("Could not hash block: %v", err)
|
||||
}
|
||||
|
||||
blocks[1] = &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 2,
|
||||
ParentRootHash32: roots[0][:],
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
roots[1], err = hashutil.HashBeaconBlock(blocks[1])
|
||||
if err != nil {
|
||||
t.Fatalf("Could not hash block: %v", err)
|
||||
}
|
||||
|
||||
blocks[2] = &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 1,
|
||||
ParentRootHash32: roots[0][:],
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
roots[2], err = hashutil.HashBeaconBlock(blocks[2])
|
||||
if err != nil {
|
||||
t.Fatalf("Could not hash block: %v", err)
|
||||
}
|
||||
|
||||
blocks[3] = &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 3,
|
||||
ParentRootHash32: roots[1][:],
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
roots[3], err = hashutil.HashBeaconBlock(blocks[3])
|
||||
if err != nil {
|
||||
t.Fatalf("Could not hash block: %v", err)
|
||||
}
|
||||
|
||||
blocks[4] = &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 4,
|
||||
ParentRootHash32: roots[2][:],
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
roots[4], err = hashutil.HashBeaconBlock(blocks[4])
|
||||
if err != nil {
|
||||
t.Fatalf("Could not hash block: %v", err)
|
||||
}
|
||||
|
||||
blocks[5] = &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 5,
|
||||
ParentRootHash32: roots[3][:],
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
roots[5], err = hashutil.HashBeaconBlock(blocks[5])
|
||||
if err != nil {
|
||||
t.Fatalf("Could not hash block: %v", err)
|
||||
}
|
||||
return blocks, roots
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
70
beacon-chain/blockchain/forkchoice/BUILD.bazel
Normal file
70
beacon-chain/blockchain/forkchoice/BUILD.bazel
Normal file
@@ -0,0 +1,70 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"process_attestation.go",
|
||||
"process_block.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/forkchoice",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"benchmark_test.go",
|
||||
"lmd_ghost_yaml_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"service_test.go",
|
||||
"tree_test.go",
|
||||
],
|
||||
data = ["lmd_ghost_test.yaml"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@in_gopkg_yaml_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
178
beacon-chain/blockchain/forkchoice/benchmark_test.go
Normal file
178
beacon-chain/blockchain/forkchoice/benchmark_test.go
Normal file
@@ -0,0 +1,178 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
)
|
||||
|
||||
func BenchmarkForkChoiceTree1(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(b)
|
||||
defer testDB.TeardownDB(b, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Benchmark fork choice with 1024 validators
|
||||
validators := make([]*ethpb.Validator, 1024)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Spread out the votes evenly for all 3 leaf nodes
|
||||
for i := 0; i < len(validators); i++ {
|
||||
switch {
|
||||
case i < 256:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
case i > 768:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
default:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[8]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkForkChoiceTree2(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(b)
|
||||
defer testDB.TeardownDB(b, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree2(db)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Benchmark fork choice with 1024 validators
|
||||
validators := make([]*ethpb.Validator, 1024)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Spread out the votes evenly for all the leaf nodes. 8 to 15
|
||||
nodeIndex := 8
|
||||
for i := 0; i < len(validators); i++ {
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[nodeIndex]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if i%155 == 0 {
|
||||
nodeIndex++
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkForkChoiceTree3(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(b)
|
||||
defer testDB.TeardownDB(b, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree3(db)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Benchmark fork choice with 1024 validators
|
||||
validators := make([]*ethpb.Validator, 1024)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// All validators vote on the same head
|
||||
for i := 0; i < len(validators); i++ {
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[len(roots)-1]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
9
beacon-chain/blockchain/forkchoice/doc.go
Normal file
9
beacon-chain/blockchain/forkchoice/doc.go
Normal file
@@ -0,0 +1,9 @@
|
||||
/*
|
||||
Package forkchoice implements the Latest Message Driven GHOST (Greediest Heaviest Observed
|
||||
Sub-Tree) algorithm as the Ethereum Serenity beacon chain fork choice rule. This algorithm is designed to
|
||||
properly detect the canonical chain based on validator votes even in the presence of high network
|
||||
latency, network partitions, and many conflicting blocks. To read more about fork choice, read the
|
||||
official accompanying document:
|
||||
https://github.com/ethereum/eth2.0-specs/blob/v0.8.3/specs/core/0_fork-choice.md
|
||||
*/
|
||||
package forkchoice
|
||||
59
beacon-chain/blockchain/forkchoice/lmd_ghost_test.yaml
Normal file
59
beacon-chain/blockchain/forkchoice/lmd_ghost_test.yaml
Normal file
@@ -0,0 +1,59 @@
|
||||
test_cases:
|
||||
# GHOST chooses b3 with the heaviest weight
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b1'
|
||||
- id: 'b3'
|
||||
parent: 'b1'
|
||||
weights:
|
||||
b0: 0
|
||||
b1: 0
|
||||
b2: 5
|
||||
b3: 10
|
||||
head: 'b3'
|
||||
# GHOST chooses b1 with the heaviest weight
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
- id: 'b3'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
b1: 5
|
||||
b2: 4
|
||||
b3: 3
|
||||
head: 'b1'
|
||||
# Equal weights children, GHOST chooses b2 because it is higher lexicographically than b3
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
- id: 'b3'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
b1: 5
|
||||
b2: 6
|
||||
b3: 6
|
||||
head: 'b3'
|
||||
# Equal weights children, GHOST chooses b2 because it is higher lexicographically than b1
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
b1: 0
|
||||
b2: 0
|
||||
head: 'b2'
|
||||
140
beacon-chain/blockchain/forkchoice/lmd_ghost_yaml_test.go
Normal file
140
beacon-chain/blockchain/forkchoice/lmd_ghost_yaml_test.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
TestCases []struct {
|
||||
Blocks []struct {
|
||||
ID string `yaml:"id"`
|
||||
Parent string `yaml:"parent"`
|
||||
} `yaml:"blocks"`
|
||||
Weights map[string]int `yaml:"weights"`
|
||||
Head string `yaml:"head"`
|
||||
} `yaml:"test_cases"`
|
||||
}
|
||||
|
||||
func TestGetHeadFromYaml(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
filename, _ := filepath.Abs("./lmd_ghost_test.yaml")
|
||||
yamlFile, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var c *Config
|
||||
err = yaml.Unmarshal(yamlFile, &c)
|
||||
|
||||
for _, test := range c.TestCases {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
blksRoot := make(map[int][]byte)
|
||||
// Construct block tree from yaml.
|
||||
for _, blk := range test.Blocks {
|
||||
// genesis block condition
|
||||
if blk.ID == blk.Parent {
|
||||
b := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
if err := db.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blksRoot[0] = root[:]
|
||||
} else {
|
||||
slot, err := strconv.Atoi(blk.ID[1:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
parentSlot, err := strconv.Atoi(blk.Parent[1:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := ðpb.BeaconBlock{Slot: uint64(slot), ParentRoot: blksRoot[parentSlot]}
|
||||
if err := db.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blksRoot[slot] = root[:]
|
||||
}
|
||||
}
|
||||
|
||||
// Assign validator votes to the blocks as weights.
|
||||
count := 0
|
||||
for blk, votes := range test.Weights {
|
||||
slot, err := strconv.Atoi(blk[1:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
max := count + votes
|
||||
for i := count; i < max; i++ {
|
||||
if err := db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: blksRoot[slot]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store.justifiedCheckpt.Root = blksRoot[0]
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(blksRoot[0])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
head, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
headSlot, err := strconv.Atoi(test.Head[1:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantedHead := blksRoot[headSlot]
|
||||
|
||||
if !bytes.Equal(head, wantedHead) {
|
||||
t.Errorf("wanted root %#x, got root %#x", wantedHead, head)
|
||||
}
|
||||
|
||||
helpers.ClearAllCaches()
|
||||
testDB.TeardownDB(t, db)
|
||||
|
||||
}
|
||||
}
|
||||
40
beacon-chain/blockchain/forkchoice/log.go
Normal file
40
beacon-chain/blockchain/forkchoice/log.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "forkchoice")
|
||||
|
||||
// logs epoch related data during epoch boundary.
|
||||
func logEpochData(beaconState *pb.BeaconState) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"epoch": helpers.CurrentEpoch(beaconState),
|
||||
"finalizedEpoch": beaconState.FinalizedCheckpoint.Epoch,
|
||||
"justifiedEpoch": beaconState.CurrentJustifiedCheckpoint.Epoch,
|
||||
"previousJustifiedEpoch": beaconState.PreviousJustifiedCheckpoint.Epoch,
|
||||
}).Info("Starting next epoch")
|
||||
activeVals, err := helpers.ActiveValidatorIndices(beaconState, helpers.CurrentEpoch(beaconState))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get active validator indices")
|
||||
return
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"totalValidators": len(beaconState.Validators),
|
||||
"activeValidators": len(activeVals),
|
||||
"averageBalance": fmt.Sprintf("%.5f ETH", averageBalance(beaconState.Balances)),
|
||||
}).Info("Validator registry information")
|
||||
}
|
||||
|
||||
func averageBalance(balances []uint64) float64 {
|
||||
total := uint64(0)
|
||||
for i := 0; i < len(balances); i++ {
|
||||
total += balances[i]
|
||||
}
|
||||
return float64(total) / float64(len(balances)) / float64(params.BeaconConfig().GweiPerEth)
|
||||
}
|
||||
92
beacon-chain/blockchain/forkchoice/metrics.go
Normal file
92
beacon-chain/blockchain/forkchoice/metrics.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
var (
|
||||
beaconFinalizedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_finalized_epoch",
|
||||
Help: "Last finalized epoch of the processed state",
|
||||
})
|
||||
beaconFinalizedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_finalized_root",
|
||||
Help: "Last finalized root of the processed state",
|
||||
})
|
||||
beaconCurrentJustifiedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_current_justified_epoch",
|
||||
Help: "Current justified epoch of the processed state",
|
||||
})
|
||||
beaconCurrentJustifiedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_current_justified_root",
|
||||
Help: "Current justified root of the processed state",
|
||||
})
|
||||
beaconPrevJustifiedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_previous_justified_epoch",
|
||||
Help: "Previous justified epoch of the processed state",
|
||||
})
|
||||
beaconPrevJustifiedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_previous_justified_root",
|
||||
Help: "Previous justified root of the processed state",
|
||||
})
|
||||
activeValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "state_active_validators",
|
||||
Help: "Total number of active validators",
|
||||
})
|
||||
slashedValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "state_slashed_validators",
|
||||
Help: "Total slashed validators",
|
||||
})
|
||||
withdrawnValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "state_withdrawn_validators",
|
||||
Help: "Total withdrawn validators",
|
||||
})
|
||||
totalValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_current_validators",
|
||||
Help: "Number of status=pending|active|exited|withdrawable validators in current epoch",
|
||||
})
|
||||
)
|
||||
|
||||
func reportEpochMetrics(state *pb.BeaconState) {
|
||||
currentEpoch := state.Slot / params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Validator counts
|
||||
var active float64
|
||||
var slashed float64
|
||||
var withdrawn float64
|
||||
for _, v := range state.Validators {
|
||||
if v.ActivationEpoch <= currentEpoch && currentEpoch < v.ExitEpoch {
|
||||
active++
|
||||
}
|
||||
if v.Slashed {
|
||||
slashed++
|
||||
}
|
||||
if currentEpoch >= v.ExitEpoch {
|
||||
withdrawn++
|
||||
}
|
||||
}
|
||||
activeValidatorsGauge.Set(active)
|
||||
slashedValidatorsGauge.Set(slashed)
|
||||
withdrawnValidatorsGauge.Set(withdrawn)
|
||||
totalValidatorsGauge.Set(float64(len(state.Validators)))
|
||||
|
||||
// Last justified slot
|
||||
if state.CurrentJustifiedCheckpoint != nil {
|
||||
beaconCurrentJustifiedEpoch.Set(float64(state.CurrentJustifiedCheckpoint.Epoch))
|
||||
beaconCurrentJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.CurrentJustifiedCheckpoint.Root)))
|
||||
}
|
||||
// Last previous justified slot
|
||||
if state.PreviousJustifiedCheckpoint != nil {
|
||||
beaconPrevJustifiedEpoch.Set(float64(state.PreviousJustifiedCheckpoint.Epoch))
|
||||
beaconPrevJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.PreviousJustifiedCheckpoint.Root)))
|
||||
}
|
||||
// Last finalized slot
|
||||
if state.FinalizedCheckpoint != nil {
|
||||
beaconFinalizedEpoch.Set(float64(state.FinalizedCheckpoint.Epoch))
|
||||
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(state.FinalizedCheckpoint.Root)))
|
||||
}
|
||||
}
|
||||
293
beacon-chain/blockchain/forkchoice/process_attestation.go
Normal file
293
beacon-chain/blockchain/forkchoice/process_attestation.go
Normal file
@@ -0,0 +1,293 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// OnAttestation is called whenever an attestation is received, it updates validators latest vote,
|
||||
// as well as the fork choice store struct.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def on_attestation(store: Store, attestation: Attestation) -> None:
|
||||
// target = attestation.data.target
|
||||
//
|
||||
// # Cannot calculate the current shuffling if have not seen the target
|
||||
// assert target.root in store.blocks
|
||||
//
|
||||
// # Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
|
||||
// base_state = store.block_states[target.root].copy()
|
||||
// assert store.time >= base_state.genesis_time + compute_start_slot_of_epoch(target.epoch) * SECONDS_PER_SLOT
|
||||
//
|
||||
// # Store target checkpoint state if not yet seen
|
||||
// if target not in store.checkpoint_states:
|
||||
// process_slots(base_state, compute_start_slot_of_epoch(target.epoch))
|
||||
// store.checkpoint_states[target] = base_state
|
||||
// target_state = store.checkpoint_states[target]
|
||||
//
|
||||
// # Attestations can only affect the fork choice of subsequent slots.
|
||||
// # Delay consideration in the fork choice until their slot is in the past.
|
||||
// attestation_slot = get_attestation_data_slot(target_state, attestation.data)
|
||||
// assert store.time >= (attestation_slot + 1) * SECONDS_PER_SLOT
|
||||
//
|
||||
// # Get state at the `target` to validate attestation and calculate the committees
|
||||
// indexed_attestation = get_indexed_attestation(target_state, attestation)
|
||||
// assert is_valid_indexed_attestation(target_state, indexed_attestation)
|
||||
//
|
||||
// # Update latest messages
|
||||
// for i in indexed_attestation.custody_bit_0_indices + indexed_attestation.custody_bit_1_indices:
|
||||
// if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
|
||||
// store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=attestation.data.beacon_block_root)
|
||||
func (s *Store) OnAttestation(ctx context.Context, a *ethpb.Attestation) (uint64, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.onAttestation")
|
||||
defer span.End()
|
||||
|
||||
tgt := proto.Clone(a.Data.Target).(*ethpb.Checkpoint)
|
||||
tgtSlot := helpers.StartSlot(tgt.Epoch)
|
||||
|
||||
// Verify beacon node has seen the target block before.
|
||||
if !s.db.HasBlock(ctx, bytesutil.ToBytes32(tgt.Root)) {
|
||||
return 0, fmt.Errorf("target root %#x does not exist in db", bytesutil.Trunc(tgt.Root))
|
||||
}
|
||||
|
||||
// Verify attestation target has had a valid pre state produced by the target block.
|
||||
baseState, err := s.verifyAttPreState(ctx, tgt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Verify Attestations cannot be from future epochs.
|
||||
if err := helpers.VerifySlotTime(baseState.GenesisTime, tgtSlot); err != nil {
|
||||
return 0, errors.Wrap(err, "could not verify attestation target slot")
|
||||
}
|
||||
|
||||
// Store target checkpoint state if not yet seen.
|
||||
baseState, err = s.saveCheckpointState(ctx, baseState, tgt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Delay attestation processing until the subsequent slot.
|
||||
if err := s.waitForAttInclDelay(ctx, a, baseState); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := helpers.VerifySlotTime(baseState.GenesisTime, a.Data.Slot+1); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
s.attsQueueLock.Lock()
|
||||
defer s.attsQueueLock.Unlock()
|
||||
atts := make([]*ethpb.Attestation, 0, len(s.attsQueue))
|
||||
for root, a := range s.attsQueue {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"AggregatedBitfield": fmt.Sprintf("%08b", a.AggregationBits),
|
||||
"Root": fmt.Sprintf("%#x", root),
|
||||
})
|
||||
log.Debug("Updating latest votes")
|
||||
|
||||
// Use the target state to to validate attestation and calculate the committees.
|
||||
indexedAtt, err := s.verifyAttestation(ctx, baseState, a)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Removing attestation from queue.")
|
||||
delete(s.attsQueue, root)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update every validator's latest vote.
|
||||
if err := s.updateAttVotes(ctx, indexedAtt, tgt.Root, tgt.Epoch); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Mark attestation as seen we don't update votes when it appears in block.
|
||||
if err := s.setSeenAtt(a); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
delete(s.attsQueue, root)
|
||||
att, err := s.aggregatedAttestations(ctx, a)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
atts = append(atts, att...)
|
||||
}
|
||||
|
||||
if err := s.db.SaveAttestations(ctx, atts); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return tgtSlot, nil
|
||||
}
|
||||
|
||||
// verifyAttPreState validates input attested check point has a valid pre-state.
|
||||
func (s *Store) verifyAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*pb.BeaconState, error) {
|
||||
baseState, err := s.db.State(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
if baseState == nil {
|
||||
return nil, fmt.Errorf("pre state of target block %d does not exist", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
return baseState, nil
|
||||
}
|
||||
|
||||
// saveCheckpointState saves and returns the processed state with the associated check point.
|
||||
func (s *Store) saveCheckpointState(ctx context.Context, baseState *pb.BeaconState, c *ethpb.Checkpoint) (*pb.BeaconState, error) {
|
||||
s.checkpointStateLock.Lock()
|
||||
defer s.checkpointStateLock.Unlock()
|
||||
cachedState, err := s.checkpointState.StateByCheckpoint(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get cached checkpoint state")
|
||||
}
|
||||
if cachedState != nil {
|
||||
return cachedState, nil
|
||||
}
|
||||
|
||||
// Advance slots only when it's higher than current state slot.
|
||||
if helpers.StartSlot(c.Epoch) > baseState.Slot {
|
||||
stateCopy := proto.Clone(baseState).(*pb.BeaconState)
|
||||
baseState, err = state.ProcessSlots(ctx, stateCopy, helpers.StartSlot(c.Epoch))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to %d", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: c,
|
||||
State: baseState,
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
|
||||
return baseState, nil
|
||||
}
|
||||
|
||||
// waitForAttInclDelay waits until the next slot because attestation can only affect
|
||||
// fork choice of subsequent slot. This is to delay attestation inclusion for fork choice
|
||||
// until the attested slot is in the past.
|
||||
func (s *Store) waitForAttInclDelay(ctx context.Context, a *ethpb.Attestation, targetState *pb.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.forkchoice.waitForAttInclDelay")
|
||||
defer span.End()
|
||||
|
||||
nextSlot := a.Data.Slot + 1
|
||||
duration := time.Duration(nextSlot*params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
timeToInclude := time.Unix(int64(targetState.GenesisTime), 0).Add(duration)
|
||||
|
||||
if err := s.aggregateAttestation(ctx, a); err != nil {
|
||||
return errors.Wrap(err, "could not aggregate attestation")
|
||||
}
|
||||
|
||||
time.Sleep(time.Until(timeToInclude))
|
||||
return nil
|
||||
}
|
||||
|
||||
// aggregateAttestation aggregates the attestations in the pending queue.
|
||||
func (s *Store) aggregateAttestation(ctx context.Context, att *ethpb.Attestation) error {
|
||||
s.attsQueueLock.Lock()
|
||||
defer s.attsQueueLock.Unlock()
|
||||
root, err := ssz.HashTreeRoot(att.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if a, ok := s.attsQueue[root]; ok {
|
||||
a, err := helpers.AggregateAttestation(a, att)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
s.attsQueue[root] = a
|
||||
return nil
|
||||
}
|
||||
s.attsQueue[root] = proto.Clone(att).(*ethpb.Attestation)
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyAttestation validates input attestation is valid.
|
||||
func (s *Store) verifyAttestation(ctx context.Context, baseState *pb.BeaconState, a *ethpb.Attestation) (*ethpb.IndexedAttestation, error) {
|
||||
indexedAtt, err := blocks.ConvertToIndexed(ctx, baseState, a)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert attestation to indexed attestation")
|
||||
}
|
||||
if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify indexed attestation")
|
||||
}
|
||||
return indexedAtt, nil
|
||||
}
|
||||
|
||||
// updateAttVotes updates validator's latest votes based on the incoming attestation.
|
||||
func (s *Store) updateAttVotes(
|
||||
ctx context.Context,
|
||||
indexedAtt *ethpb.IndexedAttestation,
|
||||
tgtRoot []byte,
|
||||
tgtEpoch uint64) error {
|
||||
|
||||
indices := append(indexedAtt.CustodyBit_0Indices, indexedAtt.CustodyBit_1Indices...)
|
||||
newVoteIndices := make([]uint64, 0, len(indices))
|
||||
newVotes := make([]*pb.ValidatorLatestVote, 0, len(indices))
|
||||
for _, i := range indices {
|
||||
vote, err := s.db.ValidatorLatestVote(ctx, i)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get latest vote for validator %d", i)
|
||||
}
|
||||
if vote == nil || tgtEpoch > vote.Epoch {
|
||||
newVotes = append(newVotes, &pb.ValidatorLatestVote{
|
||||
Epoch: tgtEpoch,
|
||||
Root: tgtRoot,
|
||||
})
|
||||
newVoteIndices = append(newVoteIndices, i)
|
||||
}
|
||||
}
|
||||
return s.db.SaveValidatorLatestVotes(ctx, newVoteIndices, newVotes)
|
||||
}
|
||||
|
||||
// setSeenAtt sets the attestation hash in seen attestation map to true.
|
||||
func (s *Store) setSeenAtt(a *ethpb.Attestation) error {
|
||||
s.seenAttsLock.Lock()
|
||||
defer s.seenAttsLock.Unlock()
|
||||
|
||||
r, err := hashutil.HashProto(a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.seenAtts[r] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// aggregatedAttestation returns the aggregated attestation after checking saved one in db.
|
||||
func (s *Store) aggregatedAttestations(ctx context.Context, att *ethpb.Attestation) ([]*ethpb.Attestation, error) {
|
||||
r, err := ssz.HashTreeRoot(att.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
saved, err := s.db.AttestationsByDataRoot(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if saved == nil {
|
||||
return []*ethpb.Attestation{att}, nil
|
||||
}
|
||||
|
||||
aggregated, err := helpers.AggregateAttestations(append(saved, att))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return aggregated, nil
|
||||
}
|
||||
260
beacon-chain/blockchain/forkchoice/process_attestation_test.go
Normal file
260
beacon-chain/blockchain/forkchoice/process_attestation_test.go
Normal file
@@ -0,0 +1,260 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
func TestStore_OnAttestation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
_, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
BlkWithOutState := ðpb.BeaconBlock{Slot: 0}
|
||||
if err := db.SaveBlock(ctx, BlkWithOutState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithOutStateRoot, _ := ssz.SigningRoot(BlkWithOutState)
|
||||
|
||||
BlkWithStateBadAtt := ðpb.BeaconBlock{Slot: 1}
|
||||
if err := db.SaveBlock(ctx, BlkWithStateBadAtt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithStateBadAttRoot, _ := ssz.SigningRoot(BlkWithStateBadAtt)
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, BlkWithStateBadAttRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
BlkWithValidState := ðpb.BeaconBlock{Slot: 2}
|
||||
if err := db.SaveBlock(ctx, BlkWithValidState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithValidStateRoot, _ := ssz.SigningRoot(BlkWithValidState)
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{
|
||||
Fork: &pb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
}, BlkWithValidStateRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
s *pb.BeaconState
|
||||
wantErr bool
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "attestation's target root not in db",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: []byte{'A'}}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "target root 0x41 does not exist in db",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "pre state of target block 0 does not exist",
|
||||
},
|
||||
{
|
||||
name: "process attestation from future epoch",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Epoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Root: BlkWithStateBadAttRoot[:]}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "could not process slot from the future",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err := store.OnAttestation(ctx, tt.a)
|
||||
if tt.wantErr {
|
||||
if !strings.Contains(err.Error(), tt.wantErrString) {
|
||||
t.Errorf("Store.OnAttestation() error = %v, wantErr = %v", err, tt.wantErrString)
|
||||
}
|
||||
} else {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseDemoBeaconConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
s := &pb.BeaconState{
|
||||
Fork: &pb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
StateRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{},
|
||||
JustificationBits: []byte{0},
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
}
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
|
||||
s1, err := store.saveCheckpointState(ctx, s, cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s1.Slot != 1*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot)
|
||||
}
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: []byte{'B'}}
|
||||
s2, err := store.saveCheckpointState(ctx, s, cp2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s2.Slot != 2*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot)
|
||||
}
|
||||
|
||||
s1, err = store.saveCheckpointState(ctx, nil, cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s1.Slot != 1*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot)
|
||||
}
|
||||
|
||||
s1, err = store.checkpointState.StateByCheckpoint(cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s1.Slot != 1*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot)
|
||||
}
|
||||
|
||||
s2, err = store.checkpointState.StateByCheckpoint(cp2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s2.Slot != 2*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot)
|
||||
}
|
||||
|
||||
s.Slot = params.BeaconConfig().SlotsPerEpoch + 1
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'C'}}
|
||||
s3, err := store.saveCheckpointState(ctx, s, cp3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s3.Slot != s.Slot {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", s.Slot, s3.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_AggregateAttestation(t *testing.T) {
|
||||
_, _, privKeys := testutil.SetupInitialDeposits(t, 100)
|
||||
f := &pb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
}
|
||||
domain := helpers.Domain(f, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
sig := privKeys[0].Sign([]byte{}, domain)
|
||||
|
||||
store := &Store{attsQueue: make(map[[32]byte]*ethpb.Attestation)}
|
||||
|
||||
b1 := bitfield.NewBitlist(8)
|
||||
b1.SetBitAt(0, true)
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: b1, Signature: sig.Marshal()}
|
||||
|
||||
if err := store.aggregateAttestation(context.Background(), a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, _ := ssz.HashTreeRoot(a.Data)
|
||||
if !bytes.Equal(store.attsQueue[r].AggregationBits, b1) {
|
||||
t.Error("Received incorrect aggregation bitfield")
|
||||
}
|
||||
|
||||
b2 := bitfield.NewBitlist(8)
|
||||
b2.SetBitAt(1, true)
|
||||
a = ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: b2, Signature: sig.Marshal()}
|
||||
if err := store.aggregateAttestation(context.Background(), a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(store.attsQueue[r].AggregationBits, []byte{3, 1}) {
|
||||
t.Error("Received incorrect aggregation bitfield")
|
||||
}
|
||||
|
||||
b3 := bitfield.NewBitlist(8)
|
||||
b3.SetBitAt(7, true)
|
||||
a = ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: b3, Signature: sig.Marshal()}
|
||||
if err := store.aggregateAttestation(context.Background(), a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(store.attsQueue[r].AggregationBits, []byte{131, 1}) {
|
||||
t.Error("Received incorrect aggregation bitfield")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_ReturnAggregatedAttestation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
a1 := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0x02}}
|
||||
err := store.db.SaveAttestation(ctx, a1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a2 := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0x03}}
|
||||
saved, err := store.aggregatedAttestations(ctx, a2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
}
|
||||
442
beacon-chain/blockchain/forkchoice/process_block.go
Normal file
442
beacon-chain/blockchain/forkchoice/process_block.go
Normal file
@@ -0,0 +1,442 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// OnBlock is called when a gossip block is received. It runs regular state transition on the block and
|
||||
// update fork choice store.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def on_block(store: Store, block: BeaconBlock) -> None:
|
||||
// # Make a copy of the state to avoid mutability issues
|
||||
// assert block.parent_root in store.block_states
|
||||
// pre_state = store.block_states[block.parent_root].copy()
|
||||
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
|
||||
// assert store.time >= pre_state.genesis_time + block.slot * SECONDS_PER_SLOT
|
||||
// # Add new block to the store
|
||||
// store.blocks[signing_root(block)] = block
|
||||
// # Check block is a descendant of the finalized block
|
||||
// assert (
|
||||
// get_ancestor(store, signing_root(block), store.blocks[store.finalized_checkpoint.root].slot) ==
|
||||
// store.finalized_checkpoint.root
|
||||
// )
|
||||
// # Check that block is later than the finalized epoch slot
|
||||
// assert block.slot > compute_start_slot_of_epoch(store.finalized_checkpoint.epoch)
|
||||
// # Check the block is valid and compute the post-state
|
||||
// state = state_transition(pre_state, block)
|
||||
// # Add new state for this block to the store
|
||||
// store.block_states[signing_root(block)] = state
|
||||
//
|
||||
// # Update justified checkpoint
|
||||
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
//
|
||||
// # Update finalized checkpoint
|
||||
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
// store.finalized_checkpoint = state.finalized_checkpoint
|
||||
func (s *Store) OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.onBlock")
|
||||
defer span.End()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preStateValidatorCount := len(preState.Validators)
|
||||
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Slot,
|
||||
"root": fmt.Sprintf("0x%s...", hex.EncodeToString(root[:])[:8]),
|
||||
}).Info("Executing state transition on block")
|
||||
postState, err := state.ExecuteStateTransition(ctx, preState, b)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
if err := s.updateBlockAttestationsVotes(ctx, b.Body.Attestations); err != nil {
|
||||
return errors.Wrap(err, "could not update votes for attestations in block")
|
||||
}
|
||||
|
||||
if err := s.db.SaveBlock(ctx, b); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
|
||||
}
|
||||
if err := s.db.SaveState(ctx, postState, root); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
if postState.CurrentJustifiedCheckpoint.Epoch > s.JustifiedCheckpt().Epoch {
|
||||
s.justifiedCheckpt = postState.CurrentJustifiedCheckpoint
|
||||
if err := s.db.SaveJustifiedCheckpoint(ctx, postState.CurrentJustifiedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save justified checkpoint")
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
// Prune the block cache and helper caches on every new finalized epoch.
|
||||
if postState.FinalizedCheckpoint.Epoch > s.finalizedCheckpt.Epoch {
|
||||
s.clearSeenAtts()
|
||||
helpers.ClearAllCaches()
|
||||
if err := s.db.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
|
||||
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch) + 1
|
||||
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
|
||||
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
|
||||
startSlot, endSlot+params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint
|
||||
}
|
||||
|
||||
// Update validator indices in database as needed.
|
||||
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
// Save the unseen attestations from block to db.
|
||||
if err := s.saveNewBlockAttestations(ctx, b.Body.Attestations); err != nil {
|
||||
return errors.Wrap(err, "could not save attestations")
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
if helpers.IsEpochStart(postState.Slot) {
|
||||
logEpochData(postState)
|
||||
reportEpochMetrics(postState)
|
||||
|
||||
// Update committee shuffled indices at the end of every epoch
|
||||
if featureconfig.Get().EnableNewCache {
|
||||
if err := helpers.UpdateCommitteeCache(postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnBlockNoVerifyStateTransition is called when an initial sync block is received.
|
||||
// It runs state transition on the block and without any BLS verification. The BLS verification
|
||||
// includes proposer signature, randao and attestation's aggregated signature.
|
||||
func (s *Store) OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.onBlock")
|
||||
defer span.End()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preStateValidatorCount := len(preState.Validators)
|
||||
|
||||
log.WithField("slot", b.Slot).Debug("Executing state transition on block")
|
||||
|
||||
postState, err := state.ExecuteStateTransitionNoVerify(ctx, preState, b)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
|
||||
if err := s.db.SaveBlock(ctx, b); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
|
||||
}
|
||||
if err := s.db.SaveState(ctx, postState, root); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
if postState.CurrentJustifiedCheckpoint.Epoch > s.JustifiedCheckpt().Epoch {
|
||||
s.justifiedCheckpt = postState.CurrentJustifiedCheckpoint
|
||||
if err := s.db.SaveJustifiedCheckpoint(ctx, postState.CurrentJustifiedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save justified checkpoint")
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
// Prune the block cache and helper caches on every new finalized epoch.
|
||||
if postState.FinalizedCheckpoint.Epoch > s.finalizedCheckpt.Epoch {
|
||||
s.clearSeenAtts()
|
||||
helpers.ClearAllCaches()
|
||||
|
||||
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch) + 1
|
||||
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
|
||||
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
|
||||
startSlot, endSlot+params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
if err := s.db.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint
|
||||
}
|
||||
|
||||
// Update validator indices in database as needed.
|
||||
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
// Save the unseen attestations from block to db.
|
||||
if err := s.saveNewBlockAttestations(ctx, b.Body.Attestations); err != nil {
|
||||
return errors.Wrap(err, "could not save attestations")
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
if helpers.IsEpochStart(postState.Slot) {
|
||||
reportEpochMetrics(postState)
|
||||
|
||||
// Update committee shuffled indices at the end of every epoch
|
||||
if featureconfig.Get().EnableNewCache {
|
||||
if err := helpers.UpdateCommitteeCache(postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
func (s *Store) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*pb.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.getBlockPreState")
|
||||
defer span.End()
|
||||
|
||||
// Verify incoming block has a valid pre state.
|
||||
preState, err := s.verifyBlkPreState(ctx, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify block slot time is not from the feature.
|
||||
if err := helpers.VerifySlotTime(preState.GenesisTime, b.Slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify block is a descendent of a finalized block.
|
||||
if err := s.verifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot), b.Slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify block is later than the finalized epoch slot.
|
||||
if err := s.verifyBlkFinalizedSlot(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
// updateBlockAttestationsVotes checks the attestations in block and filter out the seen ones,
|
||||
// the unseen ones get passed to updateBlockAttestationVote for updating fork choice votes.
|
||||
func (s *Store) updateBlockAttestationsVotes(ctx context.Context, atts []*ethpb.Attestation) error {
|
||||
s.seenAttsLock.Lock()
|
||||
defer s.seenAttsLock.Unlock()
|
||||
|
||||
for _, att := range atts {
|
||||
// If we have not seen the attestation yet
|
||||
r, err := hashutil.HashProto(att)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.seenAtts[r] {
|
||||
continue
|
||||
}
|
||||
if err := s.updateBlockAttestationVote(ctx, att); err != nil {
|
||||
log.WithError(err).Warn("Attestation failed to update vote")
|
||||
}
|
||||
s.seenAtts[r] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBlockAttestationVotes checks the attestation to update validator's latest votes.
|
||||
func (s *Store) updateBlockAttestationVote(ctx context.Context, att *ethpb.Attestation) error {
|
||||
tgt := att.Data.Target
|
||||
baseState, err := s.db.State(ctx, bytesutil.ToBytes32(tgt.Root))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get state for attestation tgt root")
|
||||
}
|
||||
if baseState == nil {
|
||||
return errors.New("no state found in db with attestation tgt root")
|
||||
}
|
||||
indexedAtt, err := blocks.ConvertToIndexed(ctx, baseState, att)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert attestation to indexed attestation")
|
||||
}
|
||||
for _, i := range append(indexedAtt.CustodyBit_0Indices, indexedAtt.CustodyBit_1Indices...) {
|
||||
vote, err := s.db.ValidatorLatestVote(ctx, i)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get latest vote for validator %d", i)
|
||||
}
|
||||
if vote == nil || tgt.Epoch > vote.Epoch {
|
||||
if err := s.db.SaveValidatorLatestVote(ctx, i, &pb.ValidatorLatestVote{
|
||||
Epoch: tgt.Epoch,
|
||||
Root: tgt.Root,
|
||||
}); err != nil {
|
||||
return errors.Wrapf(err, "could not save latest vote for validator %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyBlkPreState validates input block has a valid pre-state.
|
||||
func (s *Store) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) (*pb.BeaconState, error) {
|
||||
preState, err := s.db.State(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
|
||||
}
|
||||
if preState == nil {
|
||||
return nil, fmt.Errorf("pre state of slot %d does not exist", b.Slot)
|
||||
}
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
// verifyBlkDescendant validates input block root is a descendant of the
|
||||
// current finalized block root.
|
||||
func (s *Store) verifyBlkDescendant(ctx context.Context, root [32]byte, slot uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.verifyBlkDescendant")
|
||||
defer span.End()
|
||||
|
||||
finalizedBlk, err := s.db.Block(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
||||
if err != nil || finalizedBlk == nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block root")
|
||||
}
|
||||
if !bytes.Equal(bFinalizedRoot, s.finalizedCheckpt.Root) {
|
||||
err := fmt.Errorf("block from slot %d is not a descendent of the current finalized block slot %d, %#x != %#x",
|
||||
slot, finalizedBlk.Slot, bytesutil.Trunc(bFinalizedRoot), bytesutil.Trunc(s.finalizedCheckpt.Root))
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
||||
// to current finalized slot.
|
||||
func (s *Store) verifyBlkFinalizedSlot(b *ethpb.BeaconBlock) error {
|
||||
finalizedSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if finalizedSlot >= b.Slot {
|
||||
return fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot, finalizedSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveNewValidators saves newly added validator index from state to db. Does nothing if validator count has not
|
||||
// changed.
|
||||
func (s *Store) saveNewValidators(ctx context.Context, preStateValidatorCount int, postState *pb.BeaconState) error {
|
||||
postStateValidatorCount := len(postState.Validators)
|
||||
if preStateValidatorCount != postStateValidatorCount {
|
||||
for i := preStateValidatorCount; i < postStateValidatorCount; i++ {
|
||||
pubKey := postState.Validators[i].PublicKey
|
||||
if err := s.db.SaveValidatorIndex(ctx, bytesutil.ToBytes48(pubKey), uint64(i)); err != nil {
|
||||
return errors.Wrapf(err, "could not save activated validator: %d", i)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"index": i,
|
||||
"pubKey": hex.EncodeToString(bytesutil.Trunc(pubKey)),
|
||||
"totalValidatorCount": i + 1,
|
||||
}).Info("New validator index saved in DB")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveNewBlockAttestations saves the new attestations in block to DB.
|
||||
func (s *Store) saveNewBlockAttestations(ctx context.Context, atts []*ethpb.Attestation) error {
|
||||
attestations := make([]*ethpb.Attestation, 0, len(atts))
|
||||
for _, att := range atts {
|
||||
aggregated, err := s.aggregatedAttestations(ctx, att)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
attestations = append(attestations, aggregated...)
|
||||
}
|
||||
if err := s.db.SaveAttestations(ctx, atts); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// clearSeenAtts clears seen attestations map, it gets called upon new finalization.
|
||||
func (s *Store) clearSeenAtts() {
|
||||
s.seenAttsLock.Lock()
|
||||
s.seenAttsLock.Unlock()
|
||||
s.seenAtts = make(map[[32]byte]bool)
|
||||
}
|
||||
|
||||
// rmStatesOlderThanLastFinalized deletes the states in db since last finalized check point.
|
||||
func (s *Store) rmStatesOlderThanLastFinalized(ctx context.Context, startSlot uint64, endSlot uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.rmStatesBySlots")
|
||||
defer span.End()
|
||||
|
||||
if !featureconfig.Get().PruneFinalizedStates {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure finalized slot is not a skipped slot.
|
||||
for i := endSlot; i > 0; i-- {
|
||||
filter := filters.NewFilter().SetStartSlot(i).SetEndSlot(i)
|
||||
b, err := s.db.Blocks(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
endSlot = i - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Do not remove genesis state
|
||||
if startSlot == 0 {
|
||||
startSlot++
|
||||
}
|
||||
|
||||
// Do not remove finalized state that's in the middle of slot ranges.
|
||||
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot)
|
||||
roots, err := s.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.DeleteStates(ctx, roots); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
343
beacon-chain/blockchain/forkchoice/process_block_test.go
Normal file
343
beacon-chain/blockchain/forkchoice/process_block_test.go
Normal file
@@ -0,0 +1,343 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fc := featureconfig.Get()
|
||||
fc.PruneFinalizedStates = true
|
||||
featureconfig.Init(fc)
|
||||
}
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
randomParentRoot := []byte{'a'}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, bytesutil.ToBytes32(randomParentRoot)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
randomParentRoot2 := roots[1]
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, bytesutil.ToBytes32(randomParentRoot2)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
validGenesisRoot := []byte{'g'}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, bytesutil.ToBytes32(validGenesisRoot)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk *ethpb.BeaconBlock
|
||||
s *pb.BeaconState
|
||||
time uint64
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "parent block root does not have a state",
|
||||
blk: ðpb.BeaconBlock{},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "pre state of slot 0 does not exist",
|
||||
},
|
||||
{
|
||||
name: "block is from the feature",
|
||||
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot, Slot: params.BeaconConfig().FarFutureEpoch},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "could not process slot from the future",
|
||||
},
|
||||
{
|
||||
name: "could not get finalized block",
|
||||
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "block from slot 0 is not a descendent of the current finalized block",
|
||||
},
|
||||
{
|
||||
name: "same slot as finalized block",
|
||||
blk: ðpb.BeaconBlock{Slot: 0, ParentRoot: randomParentRoot2},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.finalizedCheckpt.Root = roots[0]
|
||||
|
||||
err := store.OnBlock(ctx, tt.blk)
|
||||
if !strings.Contains(err.Error(), tt.wantErrString) {
|
||||
t.Errorf("Store.OnBlock() error = %v, wantErr = %v", err, tt.wantErrString)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SaveNewValidators(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
preCount := 2 // validators 0 and validators 1
|
||||
s := &pb.BeaconState{Validators: []*ethpb.Validator{
|
||||
{PublicKey: []byte{0}}, {PublicKey: []byte{1}},
|
||||
{PublicKey: []byte{2}}, {PublicKey: []byte{3}},
|
||||
}}
|
||||
if err := store.saveNewValidators(ctx, preCount, s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !db.HasValidatorIndex(ctx, bytesutil.ToBytes48([]byte{2})) {
|
||||
t.Error("Wanted validator saved in db")
|
||||
}
|
||||
if !db.HasValidatorIndex(ctx, bytesutil.ToBytes48([]byte{3})) {
|
||||
t.Error("Wanted validator saved in db")
|
||||
}
|
||||
if db.HasValidatorIndex(ctx, bytesutil.ToBytes48([]byte{1})) {
|
||||
t.Error("validator not suppose to be saved in db")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_UpdateBlockAttestationVote(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, uint64(0), ðpb.Eth1Data{BlockHash: make([]byte, 32)})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
r := [32]byte{'A'}
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: r[:]},
|
||||
},
|
||||
AggregationBits: []byte{255},
|
||||
CustodyBits: []byte{255},
|
||||
}
|
||||
if err := store.db.SaveState(ctx, beaconState, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
indices, err := blocks.ConvertToIndexed(ctx, beaconState, att)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var attestedIndices []uint64
|
||||
for _, k := range append(indices.CustodyBit_0Indices, indices.CustodyBit_1Indices...) {
|
||||
attestedIndices = append(attestedIndices, k)
|
||||
}
|
||||
|
||||
if err := store.updateBlockAttestationVote(ctx, att); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, i := range attestedIndices {
|
||||
v, err := store.db.ValidatorLatestVote(ctx, i)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(v.Root, r[:]) {
|
||||
t.Error("Attested roots don't match")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_UpdateBlockAttestationsVote(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, uint64(0), ðpb.Eth1Data{BlockHash: make([]byte, 32)})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
r := [32]byte{'A'}
|
||||
atts := make([]*ethpb.Attestation, 5)
|
||||
hashes := make([][32]byte, 5)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: r[:]},
|
||||
},
|
||||
AggregationBits: []byte{255},
|
||||
CustodyBits: []byte{255},
|
||||
}
|
||||
h, _ := hashutil.HashProto(atts[i])
|
||||
hashes[i] = h
|
||||
}
|
||||
|
||||
if err := store.db.SaveState(ctx, beaconState, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.updateBlockAttestationsVotes(ctx, atts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, h := range hashes {
|
||||
if !store.seenAtts[h] {
|
||||
t.Error("Seen attestation did not get recorded")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SavesNewBlockAttestations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
a1 := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b101}, CustodyBits: bitfield.NewBitlist(2)}
|
||||
a2 := ðpb.Attestation{Data: ðpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b110}, CustodyBits: bitfield.NewBitlist(2)}
|
||||
r1, _ := ssz.HashTreeRoot(a1.Data)
|
||||
r2, _ := ssz.HashTreeRoot(a2.Data)
|
||||
|
||||
if err := store.saveNewBlockAttestations(ctx, []*ethpb.Attestation{a1, a2}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
saved, err := store.db.AttestationsByDataRoot(ctx, r1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a1}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
|
||||
saved, err = store.db.AttestationsByDataRoot(ctx, r2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
|
||||
a1 = ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b111}, CustodyBits: bitfield.NewBitlist(2)}
|
||||
a2 = ðpb.Attestation{Data: ðpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b111}, CustodyBits: bitfield.NewBitlist(2)}
|
||||
|
||||
if err := store.saveNewBlockAttestations(ctx, []*ethpb.Attestation{a1, a2}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
saved, err = store.db.AttestationsByDataRoot(ctx, r1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a1}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
|
||||
saved, err = store.db.AttestationsByDataRoot(ctx, r2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveStateSinceLastFinalized(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
// Save 100 blocks in DB, each has a state.
|
||||
numBlocks := 100
|
||||
totalBlocks := make([]*ethpb.BeaconBlock, numBlocks)
|
||||
blockRoots := make([][32]byte, 0)
|
||||
for i := 0; i < len(totalBlocks); i++ {
|
||||
totalBlocks[i] = ðpb.BeaconBlock{
|
||||
Slot: uint64(i),
|
||||
}
|
||||
r, err := ssz.SigningRoot(totalBlocks[i])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{Slot: uint64(i)}, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveBlock(ctx, totalBlocks[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blockRoots = append(blockRoots, r)
|
||||
}
|
||||
|
||||
// New finalized epoch: 1
|
||||
finalizedEpoch := uint64(1)
|
||||
finalizedSlot := finalizedEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
endSlot := helpers.StartSlot(finalizedEpoch+1) - 1 // Inclusive
|
||||
if err := store.rmStatesOlderThanLastFinalized(ctx, 0, endSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, r := range blockRoots {
|
||||
s, err := store.db.State(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Also verifies genesis state didnt get deleted
|
||||
if s != nil && s.Slot != finalizedSlot && s.Slot != 0 && s.Slot < endSlot {
|
||||
t.Errorf("State with slot %d should not be in DB", s.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
// New finalized epoch: 5
|
||||
newFinalizedEpoch := uint64(5)
|
||||
newFinalizedSlot := newFinalizedEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
endSlot = helpers.StartSlot(newFinalizedEpoch+1) - 1 // Inclusive
|
||||
if err := store.rmStatesOlderThanLastFinalized(ctx, helpers.StartSlot(finalizedEpoch+1)-1, endSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, r := range blockRoots {
|
||||
s, err := store.db.State(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Also verifies genesis state didnt get deleted
|
||||
if s != nil && s.Slot != newFinalizedSlot && s.Slot != finalizedSlot && s.Slot != 0 && s.Slot < endSlot {
|
||||
t.Errorf("State with slot %d should not be in DB", s.Slot)
|
||||
}
|
||||
}
|
||||
}
|
||||
257
beacon-chain/blockchain/forkchoice/service.go
Normal file
257
beacon-chain/blockchain/forkchoice/service.go
Normal file
@@ -0,0 +1,257 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ForkChoicer defines a common interface for methods useful for directly applying fork choice
|
||||
// to beacon blocks to compute head.
|
||||
type ForkChoicer interface {
|
||||
Head(ctx context.Context) ([]byte, error)
|
||||
OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error
|
||||
OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.BeaconBlock) error
|
||||
OnAttestation(ctx context.Context, a *ethpb.Attestation) (uint64, error)
|
||||
GenesisStore(ctx context.Context, justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) error
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
}
|
||||
|
||||
// Store represents a service struct that handles the forkchoice
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Store struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
db db.Database
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
checkpointState *cache.CheckpointStateCache
|
||||
checkpointStateLock sync.Mutex
|
||||
attsQueue map[[32]byte]*ethpb.Attestation
|
||||
attsQueueLock sync.Mutex
|
||||
seenAtts map[[32]byte]bool
|
||||
seenAttsLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewForkChoiceService instantiates a new service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewForkChoiceService(ctx context.Context, db db.Database) *Store {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Store{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
db: db,
|
||||
checkpointState: cache.NewCheckpointStateCache(),
|
||||
attsQueue: make(map[[32]byte]*ethpb.Attestation),
|
||||
seenAtts: make(map[[32]byte]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// GenesisStore initializes the store struct before beacon chain
|
||||
// starts to advance.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_genesis_store(genesis_state: BeaconState) -> Store:
|
||||
// genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))
|
||||
// root = signing_root(genesis_block)
|
||||
// justified_checkpoint = Checkpoint(epoch=GENESIS_EPOCH, root=root)
|
||||
// finalized_checkpoint = Checkpoint(epoch=GENESIS_EPOCH, root=root)
|
||||
// return Store(
|
||||
// time=genesis_state.genesis_time,
|
||||
// justified_checkpoint=justified_checkpoint,
|
||||
// finalized_checkpoint=finalized_checkpoint,
|
||||
// blocks={root: genesis_block},
|
||||
// block_states={root: genesis_state.copy()},
|
||||
// checkpoint_states={justified_checkpoint: genesis_state.copy()},
|
||||
// )
|
||||
func (s *Store) GenesisStore(
|
||||
ctx context.Context,
|
||||
justifiedCheckpoint *ethpb.Checkpoint,
|
||||
finalizedCheckpoint *ethpb.Checkpoint) error {
|
||||
|
||||
s.justifiedCheckpt = proto.Clone(justifiedCheckpoint).(*ethpb.Checkpoint)
|
||||
s.finalizedCheckpt = proto.Clone(finalizedCheckpoint).(*ethpb.Checkpoint)
|
||||
s.prevFinalizedCheckpt = proto.Clone(finalizedCheckpoint).(*ethpb.Checkpoint)
|
||||
|
||||
justifiedState, err := s.db.State(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve last justified state")
|
||||
}
|
||||
|
||||
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: s.justifiedCheckpt,
|
||||
State: justifiedState,
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state in check point cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ancestor returns the block root of an ancestry block from the input block root.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
|
||||
// block = store.blocks[root]
|
||||
// if block.slot > slot:
|
||||
// return get_ancestor(store, block.parent_root, slot)
|
||||
// elif block.slot == slot:
|
||||
// return root
|
||||
// else:
|
||||
// return Bytes32() # root is older than queried slot: no results.
|
||||
func (s *Store) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.ancestor")
|
||||
defer span.End()
|
||||
|
||||
b, err := s.db.Block(ctx, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get ancestor block")
|
||||
}
|
||||
|
||||
// If we dont have the ancestor in the DB, simply return nil so rest of fork choice
|
||||
// operation can proceed. This is not an error condition.
|
||||
if b == nil || b.Slot < slot {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if b.Slot == slot {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
return s.ancestor(ctx, b.ParentRoot, slot)
|
||||
}
|
||||
|
||||
// latestAttestingBalance returns the staked balance of a block from the input block root.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_latest_attesting_balance(store: Store, root: Hash) -> Gwei:
|
||||
// state = store.checkpoint_states[store.justified_checkpoint]
|
||||
// active_indices = get_active_validator_indices(state, get_current_epoch(state))
|
||||
// return Gwei(sum(
|
||||
// state.validators[i].effective_balance for i in active_indices
|
||||
// if (i in store.latest_messages
|
||||
// and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
|
||||
// ))
|
||||
func (s *Store) latestAttestingBalance(ctx context.Context, root []byte) (uint64, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.latestAttestingBalance")
|
||||
defer span.End()
|
||||
|
||||
lastJustifiedState, err := s.checkpointState.StateByCheckpoint(s.JustifiedCheckpt())
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not retrieve cached state via last justified check point")
|
||||
}
|
||||
if lastJustifiedState == nil {
|
||||
return 0, errors.Wrapf(err, "could not get justified state at epoch %d", s.JustifiedCheckpt().Epoch)
|
||||
}
|
||||
|
||||
lastJustifiedEpoch := helpers.CurrentEpoch(lastJustifiedState)
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(lastJustifiedState, lastJustifiedEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get active indices for last justified checkpoint")
|
||||
}
|
||||
|
||||
wantedBlk, err := s.db.Block(ctx, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get target block")
|
||||
}
|
||||
|
||||
balances := uint64(0)
|
||||
for _, i := range activeIndices {
|
||||
vote, err := s.db.ValidatorLatestVote(ctx, i)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "could not get validator %d's latest vote", i)
|
||||
}
|
||||
if vote == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wantedRoot, err := s.ancestor(ctx, vote.Root, wantedBlk.Slot)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "could not get ancestor root for slot %d", wantedBlk.Slot)
|
||||
}
|
||||
if bytes.Equal(wantedRoot, root) {
|
||||
balances += lastJustifiedState.Validators[i].EffectiveBalance
|
||||
}
|
||||
}
|
||||
return balances, nil
|
||||
}
|
||||
|
||||
// Head returns the head of the beacon chain.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_head(store: Store) -> Hash:
|
||||
// # Execute the LMD-GHOST fork choice
|
||||
// head = store.justified_checkpoint.root
|
||||
// justified_slot = compute_start_slot_of_epoch(store.justified_checkpoint.epoch)
|
||||
// while True:
|
||||
// children = [
|
||||
// root for root in store.blocks.keys()
|
||||
// if store.blocks[root].parent_root == head and store.blocks[root].slot > justified_slot
|
||||
// ]
|
||||
// if len(children) == 0:
|
||||
// return head
|
||||
// # Sort by latest attesting balance with ties broken lexicographically
|
||||
// head = max(children, key=lambda root: (get_latest_attesting_balance(store, root), root))
|
||||
func (s *Store) Head(ctx context.Context) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.head")
|
||||
defer span.End()
|
||||
|
||||
head := s.JustifiedCheckpt().Root
|
||||
|
||||
for {
|
||||
startSlot := s.JustifiedCheckpt().Epoch * params.BeaconConfig().SlotsPerEpoch
|
||||
filter := filters.NewFilter().SetParentRoot(head).SetStartSlot(startSlot)
|
||||
children, err := s.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve children info")
|
||||
}
|
||||
|
||||
if len(children) == 0 {
|
||||
return head, nil
|
||||
}
|
||||
|
||||
// if a block has one child, then we don't have to lookup anything to
|
||||
// know that this child will be the best child.
|
||||
head = children[0][:]
|
||||
if len(children) > 1 {
|
||||
highest, err := s.latestAttestingBalance(ctx, head)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get latest balance")
|
||||
}
|
||||
for _, child := range children[1:] {
|
||||
balance, err := s.latestAttestingBalance(ctx, child[:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get latest balance")
|
||||
}
|
||||
// When there's a tie, it's broken lexicographically to favor the higher one.
|
||||
if balance > highest ||
|
||||
balance == highest && bytes.Compare(child[:], head) > 0 {
|
||||
highest = balance
|
||||
head = child[:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// JustifiedCheckpt returns the latest justified check point from fork choice store.
|
||||
func (s *Store) JustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return proto.Clone(s.justifiedCheckpt).(*ethpb.Checkpoint)
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized check point from fork choice store.
|
||||
func (s *Store) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return proto.Clone(s.finalizedCheckpt).(*ethpb.Checkpoint)
|
||||
}
|
||||
346
beacon-chain/blockchain/forkchoice/service_test.go
Normal file
346
beacon-chain/blockchain/forkchoice/service_test.go
Normal file
@@ -0,0 +1,346 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
)
|
||||
|
||||
func TestStore_GenesisStoreOk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
genesisTime := time.Unix(9999, 0)
|
||||
genesisState := &pb.BeaconState{GenesisTime: uint64(genesisTime.Unix())}
|
||||
genesisStateRoot, err := ssz.HashTreeRoot(genesisState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
genesisBlkRoot, err := ssz.SigningRoot(genesisBlk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: genesisBlkRoot[:]}
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(store.justifiedCheckpt, checkPoint) {
|
||||
t.Error("Justified check point from genesis store did not match")
|
||||
}
|
||||
if !reflect.DeepEqual(store.finalizedCheckpt, checkPoint) {
|
||||
t.Error("Finalized check point from genesis store did not match")
|
||||
}
|
||||
|
||||
cachedState, err := store.checkpointState.StateByCheckpoint(checkPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(cachedState, genesisState) {
|
||||
t.Error("Incorrect genesis state cached")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_AncestorOk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
type args struct {
|
||||
root []byte
|
||||
slot uint64
|
||||
}
|
||||
|
||||
// /- B1
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
tests := []struct {
|
||||
args *args
|
||||
want []byte
|
||||
}{
|
||||
{args: &args{roots[1], 0}, want: roots[0]},
|
||||
{args: &args{roots[8], 0}, want: roots[0]},
|
||||
{args: &args{roots[8], 4}, want: roots[4]},
|
||||
{args: &args{roots[7], 4}, want: roots[4]},
|
||||
{args: &args{roots[7], 0}, want: roots[0]},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got, err := store.ancestor(ctx, tt.args.root, tt.args.slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("Store.ancestor(ctx, ) = %v, want %v", got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_AncestorNotPartOfTheChain(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// /- B1
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
root, err := store.ancestor(ctx, roots[8], 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if root != nil {
|
||||
t.Error("block at slot 1 is not part of the chain")
|
||||
}
|
||||
root, err = store.ancestor(ctx, roots[8], 2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if root != nil {
|
||||
t.Error("block at slot 2 is not part of the chain")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LatestAttestingBalance(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
validators := make([]*ethpb.Validator, 100)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
stateRoot, err := ssz.HashTreeRoot(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := blocks.NewGenesisBlock(stateRoot[:])
|
||||
blkRoot, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, s, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: blkRoot[:]}
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// /- B1 (33 votes)
|
||||
// B0 /- B5 - B7 (33 votes)
|
||||
// \- B3 - B4 - B6 - B8 (34 votes)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
switch {
|
||||
case i < 33:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
case i > 66:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
default:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[8]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
root []byte
|
||||
want uint64
|
||||
}{
|
||||
{root: roots[0], want: 100 * 1e9},
|
||||
{root: roots[1], want: 33 * 1e9},
|
||||
{root: roots[3], want: 67 * 1e9},
|
||||
{root: roots[4], want: 67 * 1e9},
|
||||
{root: roots[7], want: 33 * 1e9},
|
||||
{root: roots[8], want: 34 * 1e9},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got, err := store.latestAttestingBalance(ctx, tt.root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("Store.latestAttestingBalance(ctx, ) = %v, want %v", got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_ChildrenBlocksFromParentRoot(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
filter := filters.NewFilter().SetParentRoot(roots[0]).SetStartSlot(0)
|
||||
children, err := store.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(children, [][32]byte{bytesutil.ToBytes32(roots[1]), bytesutil.ToBytes32(roots[3])}) {
|
||||
t.Error("Did not receive correct children roots")
|
||||
}
|
||||
|
||||
filter = filters.NewFilter().SetParentRoot(roots[0]).SetStartSlot(2)
|
||||
children, err = store.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(children, [][32]byte{bytesutil.ToBytes32(roots[3])}) {
|
||||
t.Error("Did not receive correct children roots")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetHead(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
validators := make([]*ethpb.Validator, 100)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
stateRoot, err := ssz.HashTreeRoot(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := blocks.NewGenesisBlock(stateRoot[:])
|
||||
blkRoot, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: blkRoot[:]}
|
||||
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// /- B1 (33 votes)
|
||||
// B0 /- B5 - B7 (33 votes)
|
||||
// \- B3 - B4 - B6 - B8 (34 votes)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
switch {
|
||||
case i < 33:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
case i > 66:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
default:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[8]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default head is B8
|
||||
head, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(head, roots[8]) {
|
||||
t.Error("Incorrect head")
|
||||
}
|
||||
|
||||
// 1 validator switches vote to B7 to gain 34%, enough to switch head
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, 50, &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
head, err = store.Head(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(head, roots[7]) {
|
||||
t.Error("Incorrect head")
|
||||
}
|
||||
|
||||
// 18 validators switches vote to B1 to gain 51%, enough to switch head
|
||||
for i := 0; i < 18; i++ {
|
||||
idx := 50 + uint64(i)
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, idx, &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
head, err = store.Head(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(head, roots[1]) {
|
||||
t.Log(head)
|
||||
t.Error("Incorrect head")
|
||||
}
|
||||
}
|
||||
144
beacon-chain/blockchain/forkchoice/tree_test.go
Normal file
144
beacon-chain/blockchain/forkchoice/tree_test.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
)
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
// /- B1
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
// (B1, and B3 are all from the same slots)
|
||||
func blockTree1(db db.Database) ([][]byte, error) {
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
r0, _ := ssz.SigningRoot(b0)
|
||||
b1 := ðpb.BeaconBlock{Slot: 1, ParentRoot: r0[:]}
|
||||
r1, _ := ssz.SigningRoot(b1)
|
||||
b3 := ðpb.BeaconBlock{Slot: 3, ParentRoot: r0[:]}
|
||||
r3, _ := ssz.SigningRoot(b3)
|
||||
b4 := ðpb.BeaconBlock{Slot: 4, ParentRoot: r3[:]}
|
||||
r4, _ := ssz.SigningRoot(b4)
|
||||
b5 := ðpb.BeaconBlock{Slot: 5, ParentRoot: r4[:]}
|
||||
r5, _ := ssz.SigningRoot(b5)
|
||||
b6 := ðpb.BeaconBlock{Slot: 6, ParentRoot: r4[:]}
|
||||
r6, _ := ssz.SigningRoot(b6)
|
||||
b7 := ðpb.BeaconBlock{Slot: 7, ParentRoot: r5[:]}
|
||||
r7, _ := ssz.SigningRoot(b7)
|
||||
b8 := ðpb.BeaconBlock{Slot: 8, ParentRoot: r6[:]}
|
||||
r8, _ := ssz.SigningRoot(b8)
|
||||
for _, b := range []*ethpb.BeaconBlock{b0, b1, b3, b4, b5, b6, b7, b8} {
|
||||
if err := db.SaveBlock(context.Background(), b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return [][]byte{r0[:], r1[:], nil, r3[:], r4[:], r5[:], r6[:], r7[:], r8[:]}, nil
|
||||
}
|
||||
|
||||
// blockTree2 constructs the following tree:
|
||||
// Scenario graph: shorturl.at/loyP6
|
||||
//
|
||||
//digraph G {
|
||||
// rankdir=LR;
|
||||
// node [shape="none"];
|
||||
//
|
||||
// subgraph blocks {
|
||||
// rankdir=LR;
|
||||
// node [shape="box"];
|
||||
// a->b;
|
||||
// a->c;
|
||||
// b->d;
|
||||
// b->e;
|
||||
// c->f;
|
||||
// c->g;
|
||||
// d->h
|
||||
// d->i
|
||||
// d->j
|
||||
// d->k
|
||||
// h->l
|
||||
// h->m
|
||||
// g->n
|
||||
// g->o
|
||||
// e->p
|
||||
// }
|
||||
//}
|
||||
func blockTree2(db db.Database) ([][]byte, error) {
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
r0, _ := ssz.SigningRoot(b0)
|
||||
b1 := ðpb.BeaconBlock{Slot: 1, ParentRoot: r0[:]}
|
||||
r1, _ := ssz.SigningRoot(b1)
|
||||
b2 := ðpb.BeaconBlock{Slot: 2, ParentRoot: r0[:]}
|
||||
r2, _ := ssz.SigningRoot(b2)
|
||||
b3 := ðpb.BeaconBlock{Slot: 3, ParentRoot: r1[:]}
|
||||
r3, _ := ssz.SigningRoot(b3)
|
||||
b4 := ðpb.BeaconBlock{Slot: 4, ParentRoot: r1[:]}
|
||||
r4, _ := ssz.SigningRoot(b4)
|
||||
b5 := ðpb.BeaconBlock{Slot: 5, ParentRoot: r2[:]}
|
||||
r5, _ := ssz.SigningRoot(b5)
|
||||
b6 := ðpb.BeaconBlock{Slot: 6, ParentRoot: r2[:]}
|
||||
r6, _ := ssz.SigningRoot(b6)
|
||||
b7 := ðpb.BeaconBlock{Slot: 7, ParentRoot: r3[:]}
|
||||
r7, _ := ssz.SigningRoot(b7)
|
||||
b8 := ðpb.BeaconBlock{Slot: 8, ParentRoot: r3[:]}
|
||||
r8, _ := ssz.SigningRoot(b8)
|
||||
b9 := ðpb.BeaconBlock{Slot: 9, ParentRoot: r3[:]}
|
||||
r9, _ := ssz.SigningRoot(b9)
|
||||
b10 := ðpb.BeaconBlock{Slot: 10, ParentRoot: r3[:]}
|
||||
r10, _ := ssz.SigningRoot(b10)
|
||||
b11 := ðpb.BeaconBlock{Slot: 11, ParentRoot: r4[:]}
|
||||
r11, _ := ssz.SigningRoot(b11)
|
||||
b12 := ðpb.BeaconBlock{Slot: 12, ParentRoot: r6[:]}
|
||||
r12, _ := ssz.SigningRoot(b12)
|
||||
b13 := ðpb.BeaconBlock{Slot: 13, ParentRoot: r6[:]}
|
||||
r13, _ := ssz.SigningRoot(b13)
|
||||
b14 := ðpb.BeaconBlock{Slot: 14, ParentRoot: r7[:]}
|
||||
r14, _ := ssz.SigningRoot(b14)
|
||||
b15 := ðpb.BeaconBlock{Slot: 15, ParentRoot: r7[:]}
|
||||
r15, _ := ssz.SigningRoot(b15)
|
||||
for _, b := range []*ethpb.BeaconBlock{b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} {
|
||||
if err := db.SaveBlock(context.Background(), b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return [][]byte{r0[:], r1[:], r2[:], r3[:], r4[:], r5[:], r6[:], r7[:], r8[:], r9[:], r10[:], r11[:], r12[:], r13[:], r14[:], r15[:]}, nil
|
||||
}
|
||||
|
||||
// blockTree3 constructs a tree that is 512 blocks in a row.
|
||||
// B0 - B1 - B2 - B3 - .... - B512
|
||||
func blockTree3(db db.Database) ([][]byte, error) {
|
||||
blkCount := 512
|
||||
roots := make([][]byte, 0, blkCount)
|
||||
blks := make([]*ethpb.BeaconBlock, 0, blkCount)
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
r0, _ := ssz.SigningRoot(b0)
|
||||
roots = append(roots, r0[:])
|
||||
blks = append(blks, b0)
|
||||
|
||||
for i := 1; i < blkCount; i++ {
|
||||
b := ðpb.BeaconBlock{Slot: uint64(i), ParentRoot: roots[len(roots)-1]}
|
||||
r, _ := ssz.SigningRoot(b)
|
||||
roots = append(roots, r[:])
|
||||
blks = append(blks, b)
|
||||
}
|
||||
|
||||
for _, b := range blks {
|
||||
if err := db.SaveBlock(context.Background(), b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
57
beacon-chain/blockchain/info.go
Normal file
57
beacon-chain/blockchain/info.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const latestSlotCount = 10
|
||||
|
||||
// HeadsHandler is a handler to serve /heads page in metrics.
|
||||
func (s *Service) HeadsHandler(w http.ResponseWriter, _ *http.Request) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
if _, err := fmt.Fprintf(w, "\n %s\t%s\t", "Head slot", "Head root"); err != nil {
|
||||
logrus.WithError(err).Error("Failed to render chain heads page")
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(w, "\n %s\t%s\t", "---------", "---------"); err != nil {
|
||||
logrus.WithError(err).Error("Failed to render chain heads page")
|
||||
return
|
||||
}
|
||||
|
||||
slots := s.latestHeadSlots()
|
||||
for _, slot := range slots {
|
||||
r := hex.EncodeToString(bytesutil.Trunc(s.canonicalRoots[uint64(slot)]))
|
||||
if _, err := fmt.Fprintf(w, "\n %d\t\t%s\t", slot, r); err != nil {
|
||||
logrus.WithError(err).Error("Failed to render chain heads page")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if _, err := w.Write(buf.Bytes()); err != nil {
|
||||
log.WithError(err).Error("Failed to render chain heads page")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// This returns the latest head slots in a slice and up to latestSlotCount
|
||||
func (s *Service) latestHeadSlots() []int {
|
||||
slots := make([]int, 0, len(s.canonicalRoots))
|
||||
for k := range s.canonicalRoots {
|
||||
slots = append(slots, int(k))
|
||||
}
|
||||
sort.Ints(slots)
|
||||
if (len(slots)) > latestSlotCount {
|
||||
return slots[len(slots)-latestSlotCount:]
|
||||
}
|
||||
return slots
|
||||
}
|
||||
17
beacon-chain/blockchain/log.go
Normal file
17
beacon-chain/blockchain/log.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "blockchain")
|
||||
|
||||
// logs state transition related data every slot.
|
||||
func logStateTransitionData(b *ethpb.BeaconBlock, r []byte) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Slot,
|
||||
"attestations": len(b.Body.Attestations),
|
||||
"deposits": len(b.Body.Deposits),
|
||||
}).Info("Finished applying state transition")
|
||||
}
|
||||
56
beacon-chain/blockchain/metrics.go
Normal file
56
beacon-chain/blockchain/metrics.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
)
|
||||
|
||||
var (
|
||||
beaconSlot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_slot",
|
||||
Help: "Latest slot of the beacon chain state",
|
||||
})
|
||||
beaconHeadSlot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_head_slot",
|
||||
Help: "Slot of the head block of the beacon chain",
|
||||
})
|
||||
beaconHeadRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_head_root",
|
||||
Help: "Root of the head block of the beacon chain, it returns the lowest 8 bytes interpreted as little endian",
|
||||
})
|
||||
competingAtts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "competing_attestations",
|
||||
Help: "The # of attestations received and processed from a competing chain",
|
||||
})
|
||||
competingBlks = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "competing_blocks",
|
||||
Help: "The # of blocks received and processed from a competing chain",
|
||||
})
|
||||
processedBlkNoPubsub = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_no_pubsub_block_counter",
|
||||
Help: "The # of processed block without pubsub, this usually means the blocks from sync",
|
||||
})
|
||||
processedBlkNoPubsubForkchoice = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_no_pubsub_forkchoice_block_counter",
|
||||
Help: "The # of processed block without pubsub and forkchoice, this means indicate blocks from initial sync",
|
||||
})
|
||||
processedBlk = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_block_counter",
|
||||
Help: "The # of total processed in block chain service, with fork choice and pubsub",
|
||||
})
|
||||
processedAttNoPubsub = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_no_pubsub_attestation_counter",
|
||||
Help: "The # of processed attestation without pubsub, this usually means the attestations from sync",
|
||||
})
|
||||
processedAtt = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_attestation_counter",
|
||||
Help: "The # of processed attestation with pubsub and fork choice, this ususally means attestations from rpc",
|
||||
})
|
||||
)
|
||||
|
||||
func (s *Service) reportSlotMetrics(currentSlot uint64) {
|
||||
beaconSlot.Set(float64(currentSlot))
|
||||
beaconHeadSlot.Set(float64(s.HeadSlot()))
|
||||
beaconHeadRoot.Set(float64(bytesutil.ToLowInt64(s.HeadRoot())))
|
||||
}
|
||||
112
beacon-chain/blockchain/receive_attestation.go
Normal file
112
beacon-chain/blockchain/receive_attestation.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
type AttestationReceiver interface {
|
||||
ReceiveAttestation(ctx context.Context, att *ethpb.Attestation) error
|
||||
ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error
|
||||
}
|
||||
|
||||
// ReceiveAttestation is a function that defines the operations that are preformed on
|
||||
// attestation that is received from regular sync. The operations consist of:
|
||||
// 1. Gossip attestation to other peers
|
||||
// 2. Validate attestation, update validator's latest vote
|
||||
// 3. Apply fork choice to the processed attestation
|
||||
// 4. Save latest head info
|
||||
func (s *Service) ReceiveAttestation(ctx context.Context, att *ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveAttestation")
|
||||
defer span.End()
|
||||
|
||||
// Broadcast the new attestation to the network.
|
||||
if err := s.p2p.Broadcast(ctx, att); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast attestation")
|
||||
}
|
||||
|
||||
attDataRoot, err := ssz.HashTreeRoot(att.Data)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to hash attestation")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"attRoot": fmt.Sprintf("%#x", attDataRoot),
|
||||
"blockRoot": fmt.Sprintf("%#x", att.Data.BeaconBlockRoot),
|
||||
}).Debug("Broadcasting attestation")
|
||||
|
||||
if err := s.ReceiveAttestationNoPubsub(ctx, att); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
processedAtt.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveAttestationNoPubsub is a function that defines the operations that are preformed on
|
||||
// attestation that is received from regular sync. The operations consist of:
|
||||
// 1. Validate attestation, update validator's latest vote
|
||||
// 2. Apply fork choice to the processed attestation
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveAttestationNoPubsub")
|
||||
defer span.End()
|
||||
|
||||
// Update forkchoice store for the new attestation
|
||||
attSlot, err := s.forkChoiceStore.OnAttestation(ctx, att)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process attestation from fork choice service")
|
||||
}
|
||||
|
||||
// Run fork choice for head block after updating fork choice store.
|
||||
headRoot, err := s.forkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head from fork choice service")
|
||||
}
|
||||
// Only save head if it's different than the current head.
|
||||
if !bytes.Equal(headRoot, s.HeadRoot()) {
|
||||
headBlk, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(headRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute state from block head")
|
||||
}
|
||||
if err := s.saveHead(ctx, headBlk, bytesutil.ToBytes32(headRoot)); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
// Skip checking for competing attestation's target roots at epoch boundary.
|
||||
if !helpers.IsEpochStart(attSlot) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
targetRoot, err := helpers.BlockRoot(s.headState, att.Data.Target.Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get target root for epoch %d", att.Data.Target.Epoch)
|
||||
}
|
||||
isCompetingAtts(targetRoot, att.Data.Target.Root[:])
|
||||
}
|
||||
|
||||
processedAttNoPubsub.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// This checks if the attestation is from a competing chain, emits warning and updates metrics.
|
||||
func isCompetingAtts(headTargetRoot []byte, attTargetRoot []byte) {
|
||||
if !bytes.Equal(attTargetRoot, headTargetRoot) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"attTargetRoot": hex.EncodeToString(attTargetRoot),
|
||||
"headTargetRoot": hex.EncodeToString(headTargetRoot),
|
||||
}).Warn("target heads different from new attestation")
|
||||
competingAtts.Inc()
|
||||
}
|
||||
}
|
||||
113
beacon-chain/blockchain/receive_attestation_test.go
Normal file
113
beacon-chain/blockchain/receive_attestation_test.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestReceiveAttestation_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
r, _ := ssz.SigningRoot(ðpb.BeaconBlock{})
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
|
||||
b := ðpb.BeaconBlock{}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: root[:]},
|
||||
}}
|
||||
if err := chainService.ReceiveAttestation(ctx, a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Saved new head info")
|
||||
testutil.AssertLogsContain(t, hook, "Broadcasting attestation")
|
||||
}
|
||||
|
||||
func TestReceiveAttestation_SameHead(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
r, _ := ssz.SigningRoot(ðpb.BeaconBlock{})
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
chainService.canonicalRoots[0] = r[:]
|
||||
|
||||
b := ðpb.BeaconBlock{}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: root[:]},
|
||||
}}
|
||||
if err := chainService.ReceiveAttestation(ctx, a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Saved new head info")
|
||||
testutil.AssertLogsContain(t, hook, "Broadcasting attestation")
|
||||
}
|
||||
|
||||
func TestReceiveAttestationNoPubsub_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
r, _ := ssz.SigningRoot(ðpb.BeaconBlock{})
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
|
||||
b := ðpb.BeaconBlock{}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: root[:]},
|
||||
}}
|
||||
if err := chainService.ReceiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Saved new head info")
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Broadcasting attestation")
|
||||
}
|
||||
227
beacon-chain/blockchain/receive_block.go
Normal file
227
beacon-chain/blockchain/receive_block.go
Normal file
@@ -0,0 +1,227 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service receive and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) error
|
||||
ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconBlock) error
|
||||
ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.BeaconBlock) error
|
||||
ReceiveBlockNoVerify(ctx context.Context, block *ethpb.BeaconBlock) error
|
||||
}
|
||||
|
||||
// ReceiveBlock is a function that defines the operations that are preformed on
|
||||
// blocks that is received from rpc service. The operations consists of:
|
||||
// 1. Gossip block to other peers
|
||||
// 2. Validate block, apply state transition and update check points
|
||||
// 3. Apply fork choice to the processed block
|
||||
// 4. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlock")
|
||||
defer span.End()
|
||||
|
||||
root, err := ssz.SigningRoot(block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received block")
|
||||
}
|
||||
|
||||
// Broadcast the new block to the network.
|
||||
if err := s.p2p.Broadcast(ctx, block); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast block")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(root[:]),
|
||||
}).Debug("Broadcasting block")
|
||||
|
||||
if err := s.ReceiveBlockNoPubsub(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
processedBlk.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoPubsub is a function that defines the the operations (minus pubsub)
|
||||
// that are preformed on blocks that is received from regular sync service. The operations consists of:
|
||||
// 1. Validate block, apply state transition and update check points
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoPubsub")
|
||||
defer span.End()
|
||||
blockCopy := proto.Clone(block).(*ethpb.BeaconBlock)
|
||||
|
||||
// Apply state transition on the new block.
|
||||
if err := s.forkChoiceStore.OnBlock(ctx, blockCopy); err != nil {
|
||||
err := errors.Wrap(err, "could not process block from fork choice service")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
root, err := ssz.SigningRoot(blockCopy)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received block")
|
||||
}
|
||||
|
||||
// Run fork choice after applying state transition on the new block.
|
||||
headRoot, err := s.forkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head from fork choice service")
|
||||
}
|
||||
headBlk, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(headRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute state from block head")
|
||||
}
|
||||
|
||||
// Only save head if it's different than the current head.
|
||||
if !bytes.Equal(headRoot, s.HeadRoot()) {
|
||||
if err := s.saveHead(ctx, headBlk, bytesutil.ToBytes32(headRoot)); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
// Remove block's contained deposits, attestations, and other operations from persistent storage.
|
||||
if err := s.cleanupBlockOperations(ctx, blockCopy); err != nil {
|
||||
return errors.Wrap(err, "could not clean up block deposits, attestations, and other operations")
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
s.reportSlotMetrics(blockCopy.Slot)
|
||||
|
||||
// Log if block is a competing block.
|
||||
isCompetingBlock(root[:], blockCopy.Slot, headRoot, headBlk.Slot)
|
||||
|
||||
// Log state transition data.
|
||||
logStateTransitionData(blockCopy, root[:])
|
||||
|
||||
processedBlkNoPubsub.Inc()
|
||||
|
||||
// We write the latest saved head root to a feed for consumption by other services.
|
||||
s.headUpdatedFeed.Send(bytesutil.ToBytes32(headRoot))
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoPubsubForkchoice is a function that defines the all operations (minus pubsub and forkchoice)
|
||||
// that are preformed blocks that is received from initial sync service. The operations consists of:
|
||||
// 1. Validate block, apply state transition and update check points
|
||||
// 2. Save latest head info
|
||||
func (s *Service) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoForkchoice")
|
||||
defer span.End()
|
||||
blockCopy := proto.Clone(block).(*ethpb.BeaconBlock)
|
||||
|
||||
// Apply state transition on the incoming newly received block.
|
||||
if err := s.forkChoiceStore.OnBlock(ctx, blockCopy); err != nil {
|
||||
err := errors.Wrap(err, "could not process block from fork choice service")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
root, err := ssz.SigningRoot(blockCopy)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received block")
|
||||
}
|
||||
|
||||
if !bytes.Equal(root[:], s.HeadRoot()) {
|
||||
if err := s.saveHead(ctx, blockCopy, root); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
// Remove block's contained deposits, attestations, and other operations from persistent storage.
|
||||
if err := s.cleanupBlockOperations(ctx, blockCopy); err != nil {
|
||||
return errors.Wrap(err, "could not clean up block deposits, attestations, and other operations")
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
s.reportSlotMetrics(blockCopy.Slot)
|
||||
|
||||
// Log state transition data.
|
||||
logStateTransitionData(blockCopy, root[:])
|
||||
|
||||
// We write the latest saved head root to a feed for consumption by other services.
|
||||
s.headUpdatedFeed.Send(root)
|
||||
processedBlkNoPubsubForkchoice.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoVerify runs state transition on a input block without verifying the block's BLS contents.
|
||||
// Depends on the security model, this is the "minimal" work a node can do to sync the chain.
|
||||
// It simulates light client behavior and assumes 100% trust with the syncing peer.
|
||||
func (s *Service) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoVerify")
|
||||
defer span.End()
|
||||
blockCopy := proto.Clone(block).(*ethpb.BeaconBlock)
|
||||
|
||||
// Apply state transition on the incoming newly received blockCopy without verifying its BLS contents.
|
||||
if err := s.forkChoiceStore.OnBlockNoVerifyStateTransition(ctx, blockCopy); err != nil {
|
||||
return errors.Wrap(err, "could not process blockCopy from fork choice service")
|
||||
}
|
||||
root, err := ssz.SigningRoot(blockCopy)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received blockCopy")
|
||||
}
|
||||
|
||||
if !bytes.Equal(root[:], s.HeadRoot()) {
|
||||
if err := s.saveHead(ctx, blockCopy, root); err != nil {
|
||||
err := errors.Wrap(err, "could not save head")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
s.reportSlotMetrics(blockCopy.Slot)
|
||||
|
||||
// Log state transition data.
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blockCopy.Slot,
|
||||
"attestations": len(blockCopy.Body.Attestations),
|
||||
"deposits": len(blockCopy.Body.Deposits),
|
||||
}).Debug("Finished applying state transition")
|
||||
|
||||
// We write the latest saved head root to a feed for consumption by other services.
|
||||
s.headUpdatedFeed.Send(root)
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupBlockOperations processes and cleans up any block operations relevant to the beacon node
|
||||
// such as attestations, exits, and deposits. We update the latest seen attestation by validator
|
||||
// in the local node's runtime, cleanup and remove pending deposits which have been included in the block
|
||||
// from our node's local cache, and process validator exits and more.
|
||||
func (s *Service) cleanupBlockOperations(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
// Forward processed block to operation pool to remove individual operation from DB.
|
||||
if s.opsPoolService.IncomingProcessedBlockFeed().Send(block) == 0 {
|
||||
log.Error("Sent processed block to no subscribers")
|
||||
}
|
||||
|
||||
// Remove pending deposits from the deposit queue.
|
||||
for _, dep := range block.Body.Deposits {
|
||||
s.depositCache.RemovePendingDeposit(ctx, dep)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This checks if the block is from a competing chain, emits warning and updates metrics.
|
||||
func isCompetingBlock(root []byte, slot uint64, headRoot []byte, headSlot uint64) {
|
||||
if !bytes.Equal(root[:], headRoot) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blkSlot": slot,
|
||||
"blkRoot": hex.EncodeToString(root[:]),
|
||||
"headSlot": headSlot,
|
||||
"headRoot": hex.EncodeToString(headRoot),
|
||||
}).Warn("Calculated head diffs from new block")
|
||||
competingBlks.Inc()
|
||||
}
|
||||
}
|
||||
275
beacon-chain/blockchain/receive_block_test.go
Normal file
275
beacon-chain/blockchain/receive_block_test.go
Normal file
@@ -0,0 +1,275 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestReceiveBlock_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
deposits, _, privKeys := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, ðpb.Eth1Data{BlockHash: make([]byte, 32)})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Eth1Data.BlockHash = nil
|
||||
beaconState.Eth1DepositIndex = 100
|
||||
stateRoot, err := ssz.HashTreeRoot(beaconState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
bodyRoot, err := ssz.HashTreeRoot(genesis.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
genesisBlkRoot, err := ssz.SigningRoot(genesis)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cp := ðpb.Checkpoint{Root: genesisBlkRoot[:]}
|
||||
if err := chainService.forkChoiceStore.GenesisStore(ctx, cp, cp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.LatestBlockHeader = ðpb.BeaconBlockHeader{
|
||||
Slot: genesis.Slot,
|
||||
ParentRoot: genesis.ParentRoot,
|
||||
BodyRoot: bodyRoot[:],
|
||||
StateRoot: genesis.StateRoot,
|
||||
}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Fatalf("Could not save block to db: %v", err)
|
||||
}
|
||||
parentRoot, err := ssz.SigningRoot(genesis)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.SaveState(ctx, beaconState, parentRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
slot := beaconState.Slot + 1
|
||||
epoch := helpers.SlotToEpoch(slot)
|
||||
beaconState.Slot++
|
||||
randaoReveal, err := testutil.CreateRandaoReveal(beaconState, epoch, privKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Slot--
|
||||
|
||||
block := ðpb.BeaconBlock{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositCount: uint64(len(deposits)),
|
||||
DepositRoot: []byte("a"),
|
||||
BlockHash: []byte("b"),
|
||||
},
|
||||
RandaoReveal: randaoReveal[:],
|
||||
Attestations: nil,
|
||||
},
|
||||
}
|
||||
|
||||
stateRootCandidate, err := state.ExecuteStateTransitionNoVerify(context.Background(), beaconState, block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateRoot, err = ssz.HashTreeRoot(stateRootCandidate)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
block.StateRoot = stateRoot[:]
|
||||
|
||||
block, err = testutil.SignBlock(beaconState, block, privKeys)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.ReceiveBlock(context.Background(), block); err != nil {
|
||||
t.Errorf("Block failed processing: %v", err)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Finished applying state transition")
|
||||
}
|
||||
|
||||
func TestReceiveReceiveBlockNoPubsub_CanSaveHeadInfo(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
headBlk := ðpb.BeaconBlock{Slot: 100}
|
||||
if err := db.SaveBlock(ctx, headBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, err := ssz.SigningRoot(headBlk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
|
||||
if err := chainService.ReceiveBlockNoPubsub(ctx, ðpb.BeaconBlock{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBody{}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(r[:], chainService.HeadRoot()) {
|
||||
t.Error("Incorrect head root saved")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(headBlk, chainService.HeadBlock()) {
|
||||
t.Error("Incorrect head block saved")
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Saved new head info")
|
||||
}
|
||||
|
||||
func TestReceiveReceiveBlockNoPubsub_SameHead(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
headBlk := ðpb.BeaconBlock{}
|
||||
if err := db.SaveBlock(ctx, headBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
newBlk := ðpb.BeaconBlock{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBody{}}
|
||||
newRoot, _ := ssz.SigningRoot(newBlk)
|
||||
if err := db.SaveBlock(ctx, newBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
chainService.forkChoiceStore = &store{headRoot: newRoot[:]}
|
||||
chainService.canonicalRoots[0] = newRoot[:]
|
||||
|
||||
if err := chainService.ReceiveBlockNoPubsub(ctx, newBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Saved new head info")
|
||||
}
|
||||
|
||||
func TestReceiveBlockNoPubsubForkchoice_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
deposits, _, privKeys := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, ðpb.Eth1Data{BlockHash: make([]byte, 32)})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Eth1DepositIndex = 100
|
||||
stateRoot, err := ssz.HashTreeRoot(beaconState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
bodyRoot, err := ssz.HashTreeRoot(genesis.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.forkChoiceStore.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.LatestBlockHeader = ðpb.BeaconBlockHeader{
|
||||
Slot: genesis.Slot,
|
||||
ParentRoot: genesis.ParentRoot,
|
||||
BodyRoot: bodyRoot[:],
|
||||
StateRoot: genesis.StateRoot,
|
||||
}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Fatalf("Could not save block to db: %v", err)
|
||||
}
|
||||
parentRoot, err := ssz.SigningRoot(genesis)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.SaveState(ctx, beaconState, parentRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
slot := beaconState.Slot + 1
|
||||
epoch := helpers.SlotToEpoch(slot)
|
||||
beaconState.Slot++
|
||||
randaoReveal, err := testutil.CreateRandaoReveal(beaconState, epoch, privKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Slot--
|
||||
|
||||
block := ðpb.BeaconBlock{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositCount: uint64(len(deposits)),
|
||||
DepositRoot: []byte("a"),
|
||||
BlockHash: []byte("b"),
|
||||
},
|
||||
RandaoReveal: randaoReveal[:],
|
||||
Attestations: nil,
|
||||
},
|
||||
}
|
||||
|
||||
stateRootCandidate, err := state.ExecuteStateTransitionNoVerify(context.Background(), beaconState, block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateRoot, err = ssz.HashTreeRoot(stateRootCandidate)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
block.StateRoot = stateRoot[:]
|
||||
|
||||
block, err = testutil.SignBlock(beaconState, block, privKeys)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.ReceiveBlockNoPubsubForkchoice(context.Background(), block); err != nil {
|
||||
t.Errorf("Block failed processing: %v", err)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Finished applying state transition")
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Finished fork choice")
|
||||
}
|
||||
@@ -4,107 +4,136 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/attestation"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2p"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "blockchain")
|
||||
|
||||
// ChainFeeds interface defines the methods of the ChainService which provide
|
||||
// information feeds.
|
||||
// ChainFeeds interface defines the methods of the Service which provide state related
|
||||
// information feeds to consumers.
|
||||
type ChainFeeds interface {
|
||||
StateInitializedFeed() *event.Feed
|
||||
}
|
||||
|
||||
// ChainService represents a service that handles the internal
|
||||
// NewHeadNotifier defines a struct which can notify many consumers of a new,
|
||||
// canonical chain head event occuring in the node.
|
||||
type NewHeadNotifier interface {
|
||||
HeadUpdatedFeed() *event.Feed
|
||||
}
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type ChainService struct {
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB *db.BeaconDB
|
||||
web3Service *powchain.Web3Service
|
||||
attsService attestation.TargetHandler
|
||||
beaconDB db.Database
|
||||
depositCache *depositcache.DepositCache
|
||||
chainStartFetcher powchain.ChainStartFetcher
|
||||
opsPoolService operations.OperationFeeds
|
||||
forkChoiceStore forkchoice.ForkChoicer
|
||||
chainStartChan chan time.Time
|
||||
canonicalBlockFeed *event.Feed
|
||||
genesisTime time.Time
|
||||
finalizedEpoch uint64
|
||||
stateInitializedFeed *event.Feed
|
||||
headUpdatedFeed *event.Feed
|
||||
p2p p2p.Broadcaster
|
||||
canonicalBlocks map[uint64][]byte
|
||||
canonicalBlocksLock sync.RWMutex
|
||||
receiveBlockLock sync.Mutex
|
||||
maxRoutines int64
|
||||
headSlot uint64
|
||||
headBlock *ethpb.BeaconBlock
|
||||
headState *pb.BeaconState
|
||||
canonicalRoots map[uint64][]byte
|
||||
headLock sync.RWMutex
|
||||
}
|
||||
|
||||
// Config options for the service.
|
||||
type Config struct {
|
||||
BeaconBlockBuf int
|
||||
Web3Service *powchain.Web3Service
|
||||
AttsService attestation.TargetHandler
|
||||
BeaconDB *db.BeaconDB
|
||||
OpsPoolService operations.OperationFeeds
|
||||
DevMode bool
|
||||
P2p p2p.Broadcaster
|
||||
BeaconBlockBuf int
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
BeaconDB db.Database
|
||||
DepositCache *depositcache.DepositCache
|
||||
OpsPoolService operations.OperationFeeds
|
||||
P2p p2p.Broadcaster
|
||||
MaxRoutines int64
|
||||
}
|
||||
|
||||
// NewChainService instantiates a new service instance that will
|
||||
// NewService instantiates a new block service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewChainService(ctx context.Context, cfg *Config) (*ChainService, error) {
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &ChainService{
|
||||
store := forkchoice.NewForkChoiceService(ctx, cfg.BeaconDB)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
web3Service: cfg.Web3Service,
|
||||
depositCache: cfg.DepositCache,
|
||||
chainStartFetcher: cfg.ChainStartFetcher,
|
||||
opsPoolService: cfg.OpsPoolService,
|
||||
attsService: cfg.AttsService,
|
||||
canonicalBlockFeed: new(event.Feed),
|
||||
forkChoiceStore: store,
|
||||
chainStartChan: make(chan time.Time),
|
||||
stateInitializedFeed: new(event.Feed),
|
||||
headUpdatedFeed: new(event.Feed),
|
||||
p2p: cfg.P2p,
|
||||
canonicalBlocks: make(map[uint64][]byte),
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
maxRoutines: cfg.MaxRoutines,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start a blockchain service's main event loop.
|
||||
func (c *ChainService) Start() {
|
||||
beaconState, err := c.beaconDB.HeadState(c.ctx)
|
||||
func (s *Service) Start() {
|
||||
ctx := context.TODO()
|
||||
beaconState, err := s.beaconDB.HeadState(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state: %v", err)
|
||||
}
|
||||
// If the chain has already been initialized, simply start the block processing routine.
|
||||
if beaconState != nil {
|
||||
log.Info("Beacon chain data already exists, starting service")
|
||||
c.genesisTime = time.Unix(int64(beaconState.GenesisTime), 0)
|
||||
c.finalizedEpoch = beaconState.FinalizedEpoch
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(beaconState.GenesisTime), 0)
|
||||
if err := s.initializeChainInfo(ctx); err != nil {
|
||||
log.Fatalf("Could not set up chain info: %v", err)
|
||||
}
|
||||
justifiedCheckpoint, err := s.beaconDB.JustifiedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get justified checkpoint: %v", err)
|
||||
}
|
||||
finalizedCheckpoint, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get finalized checkpoint: %v", err)
|
||||
}
|
||||
if err := s.forkChoiceStore.GenesisStore(ctx, justifiedCheckpoint, finalizedCheckpoint); err != nil {
|
||||
log.Fatalf("Could not start fork choice service: %v", err)
|
||||
}
|
||||
s.stateInitializedFeed.Send(s.genesisTime)
|
||||
} else {
|
||||
log.Info("Waiting for ChainStart log from the Validator Deposit Contract to start the beacon chain...")
|
||||
if c.web3Service == nil {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.chainStartFetcher == nil {
|
||||
log.Fatal("Not configured web3Service for POW chain")
|
||||
return // return need for TestStartUninitializedChainWithoutConfigPOWChain.
|
||||
}
|
||||
subChainStart := c.web3Service.ChainStartFeed().Subscribe(c.chainStartChan)
|
||||
subChainStart := s.chainStartFetcher.ChainStartFeed().Subscribe(s.chainStartChan)
|
||||
go func() {
|
||||
genesisTime := <-c.chainStartChan
|
||||
c.processChainStartTime(genesisTime, subChainStart)
|
||||
genesisTime := <-s.chainStartChan
|
||||
s.processChainStartTime(ctx, genesisTime, subChainStart)
|
||||
return
|
||||
}()
|
||||
}
|
||||
@@ -112,171 +141,180 @@ func (c *ChainService) Start() {
|
||||
|
||||
// processChainStartTime initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (c *ChainService) processChainStartTime(genesisTime time.Time, chainStartSub event.Subscription) {
|
||||
initialDepositsData := c.web3Service.ChainStartDeposits()
|
||||
initialDeposits := make([]*pb.Deposit, len(initialDepositsData))
|
||||
for i := range initialDepositsData {
|
||||
initialDeposits[i] = &pb.Deposit{DepositData: initialDepositsData[i]}
|
||||
}
|
||||
|
||||
beaconState, err := c.initializeBeaconChain(genesisTime, initialDeposits, c.web3Service.ChainStartETH1Data())
|
||||
if err != nil {
|
||||
func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time, chainStartSub event.Subscription) {
|
||||
initialDeposits := s.chainStartFetcher.ChainStartDeposits()
|
||||
if err := s.initializeBeaconChain(ctx, genesisTime, initialDeposits, s.chainStartFetcher.ChainStartEth1Data()); err != nil {
|
||||
log.Fatalf("Could not initialize beacon chain: %v", err)
|
||||
}
|
||||
c.finalizedEpoch = beaconState.FinalizedEpoch
|
||||
c.stateInitializedFeed.Send(genesisTime)
|
||||
s.stateInitializedFeed.Send(genesisTime)
|
||||
chainStartSub.Unsubscribe()
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
// based on a genesis timestamp value obtained from the ChainStart event emitted
|
||||
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
|
||||
func (c *ChainService) initializeBeaconChain(genesisTime time.Time, deposits []*pb.Deposit,
|
||||
eth1data *pb.Eth1Data) (*pb.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(context.Background(), "beacon-chain.ChainService.initializeBeaconChain")
|
||||
func (s *Service) initializeBeaconChain(
|
||||
ctx context.Context,
|
||||
genesisTime time.Time,
|
||||
deposits []*ethpb.Deposit,
|
||||
eth1data *ethpb.Eth1Data) error {
|
||||
_, span := trace.StartSpan(context.Background(), "beacon-chain.Service.initializeBeaconChain")
|
||||
defer span.End()
|
||||
log.Info("ChainStart time reached, starting the beacon chain!")
|
||||
c.genesisTime = genesisTime
|
||||
log.Info("Genesis time reached, starting the beacon chain")
|
||||
s.genesisTime = genesisTime
|
||||
unixTime := uint64(genesisTime.Unix())
|
||||
if err := c.beaconDB.InitializeState(c.ctx, unixTime, deposits, eth1data); err != nil {
|
||||
return nil, fmt.Errorf("could not initialize beacon state to disk: %v", err)
|
||||
}
|
||||
beaconState, err := c.beaconDB.HeadState(c.ctx)
|
||||
|
||||
genesisState, err := state.GenesisBeaconState(deposits, unixTime, eth1data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not attempt fetch beacon state: %v", err)
|
||||
return errors.Wrap(err, "could not initialize genesis state")
|
||||
}
|
||||
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not hash beacon state: %v", err)
|
||||
}
|
||||
genBlock := b.NewGenesisBlock(stateRoot[:])
|
||||
genBlockRoot, err := hashutil.HashBeaconBlock(genBlock)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not hash beacon block: %v", err)
|
||||
if err := s.saveGenesisData(ctx, genesisState); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis data")
|
||||
}
|
||||
|
||||
// TODO(#2011): Remove this in state caching.
|
||||
beaconState.LatestBlock = genBlock
|
||||
// Update committee shuffled indices for genesis epoch.
|
||||
if featureconfig.Get().EnableNewCache {
|
||||
if err := helpers.UpdateCommitteeCache(genesisState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.beaconDB.SaveBlock(genBlock); err != nil {
|
||||
return nil, fmt.Errorf("could not save genesis block to disk: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveAttestationTarget(ctx, &pb.AttestationTarget{
|
||||
Slot: genBlock.Slot,
|
||||
BlockRoot: genBlockRoot[:],
|
||||
ParentRoot: genBlock.ParentRootHash32,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("failed to save attestation target: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.UpdateChainHead(ctx, genBlock, beaconState); err != nil {
|
||||
return nil, fmt.Errorf("could not set chain head, %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveJustifiedBlock(genBlock); err != nil {
|
||||
return nil, fmt.Errorf("could not save gensis block as justified block: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveFinalizedBlock(genBlock); err != nil {
|
||||
return nil, fmt.Errorf("could not save gensis block as finalized block: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveJustifiedState(beaconState); err != nil {
|
||||
return nil, fmt.Errorf("could not save gensis state as justified state: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveFinalizedState(beaconState); err != nil {
|
||||
return nil, fmt.Errorf("could not save gensis state as finalized state: %v", err)
|
||||
}
|
||||
return beaconState, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
func (c *ChainService) Stop() error {
|
||||
defer c.cancel()
|
||||
|
||||
log.Info("Stopping service")
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status always returns nil.
|
||||
// TODO(1202): Add service health checks.
|
||||
func (c *ChainService) Status() error {
|
||||
// Status always returns nil unless there is an error condition that causes
|
||||
// this service to be unhealthy.
|
||||
func (s *Service) Status() error {
|
||||
if runtime.NumGoroutine() > int(s.maxRoutines) {
|
||||
return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CanonicalBlockFeed returns a channel that is written to
|
||||
// whenever a new block is determined to be canonical in the chain.
|
||||
func (c *ChainService) CanonicalBlockFeed() *event.Feed {
|
||||
return c.canonicalBlockFeed
|
||||
}
|
||||
|
||||
// StateInitializedFeed returns a feed that is written to
|
||||
// when the beacon state is first initialized.
|
||||
func (c *ChainService) StateInitializedFeed() *event.Feed {
|
||||
return c.stateInitializedFeed
|
||||
func (s *Service) StateInitializedFeed() *event.Feed {
|
||||
return s.stateInitializedFeed
|
||||
}
|
||||
|
||||
// ChainHeadRoot returns the hash root of the last beacon block processed by the
|
||||
// block chain service.
|
||||
func (c *ChainService) ChainHeadRoot() ([32]byte, error) {
|
||||
head, err := c.beaconDB.ChainHead()
|
||||
// HeadUpdatedFeed is a feed containing the head block root and
|
||||
// is written to when a new head block is saved to DB.
|
||||
func (s *Service) HeadUpdatedFeed() *event.Feed {
|
||||
return s.headUpdatedFeed
|
||||
}
|
||||
|
||||
// This gets called to update canonical root mapping.
|
||||
func (s *Service) saveHead(ctx context.Context, b *ethpb.BeaconBlock, r [32]byte) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
s.headSlot = b.Slot
|
||||
|
||||
s.canonicalRoots[b.Slot] = r[:]
|
||||
|
||||
if err := s.beaconDB.SaveHeadBlockRoot(ctx, r); err != nil {
|
||||
return errors.Wrap(err, "could not save head root in DB")
|
||||
}
|
||||
s.headBlock = b
|
||||
|
||||
headState, err := s.beaconDB.State(ctx, r)
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("could not retrieve chain head: %v", err)
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
s.headState = headState
|
||||
|
||||
root, err := hashutil.HashBeaconBlock(head)
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("could not tree hash parent block: %v", err)
|
||||
}
|
||||
return root, nil
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Slot,
|
||||
"headRoot": fmt.Sprintf("%#x", r),
|
||||
}).Debug("Saved new head info")
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsCanonical returns true if the input block hash of the corresponding slot
|
||||
// is part of the canonical chain. False otherwise.
|
||||
func (c *ChainService) IsCanonical(slot uint64, hash []byte) bool {
|
||||
c.canonicalBlocksLock.RLock()
|
||||
defer c.canonicalBlocksLock.RUnlock()
|
||||
if canonicalHash, ok := c.canonicalBlocks[slot]; ok {
|
||||
return bytes.Equal(canonicalHash, hash)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CanonicalBlock returns canonical block of a given slot, it returns nil
|
||||
// if there's no canonical block saved of a given slot.
|
||||
func (c *ChainService) CanonicalBlock(slot uint64) (*pb.BeaconBlock, error) {
|
||||
c.canonicalBlocksLock.RLock()
|
||||
defer c.canonicalBlocksLock.RUnlock()
|
||||
root, exists := c.canonicalBlocks[slot]
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
return c.beaconDB.Block(bytesutil.ToBytes32(root))
|
||||
}
|
||||
|
||||
// RecentCanonicalRoots returns the latest block slot and root of the canonical block chain,
|
||||
// the block slots and roots are sorted and in descending order. Input count determines
|
||||
// the number of block slots and roots to return.
|
||||
func (c *ChainService) RecentCanonicalRoots(count uint64) []*pbrpc.BlockRoot {
|
||||
c.canonicalBlocksLock.RLock()
|
||||
defer c.canonicalBlocksLock.RUnlock()
|
||||
var slots []int
|
||||
for s := range c.canonicalBlocks {
|
||||
slots = append(slots, int(s))
|
||||
}
|
||||
|
||||
// Return the all the canonical blocks if the input count is greater than
|
||||
// the depth of the block tree.
|
||||
totalRoots := uint64(len(slots))
|
||||
if count > totalRoots {
|
||||
count = totalRoots
|
||||
}
|
||||
|
||||
sort.Sort(sort.Reverse(sort.IntSlice(slots)))
|
||||
blockRoots := make([]*pbrpc.BlockRoot, count)
|
||||
for i := 0; i < int(count); i++ {
|
||||
slot := uint64(slots[i])
|
||||
blockRoots[i] = &pbrpc.BlockRoot{
|
||||
Slot: slot,
|
||||
Root: c.canonicalBlocks[slot],
|
||||
// This gets called when beacon chain is first initialized to save validator indices and pubkeys in db
|
||||
func (s *Service) saveGenesisValidators(ctx context.Context, state *pb.BeaconState) error {
|
||||
for i, v := range state.Validators {
|
||||
if err := s.beaconDB.SaveValidatorIndex(ctx, bytesutil.ToBytes48(v.PublicKey), uint64(i)); err != nil {
|
||||
return errors.Wrapf(err, "could not save validator index: %d", i)
|
||||
}
|
||||
}
|
||||
return blockRoots
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState *pb.BeaconState) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
stateRoot, err := ssz.HashTreeRoot(genesisState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash genesis state")
|
||||
}
|
||||
genesisBlk := blocks.NewGenesisBlock(stateRoot[:])
|
||||
genesisBlkRoot, err := ssz.SigningRoot(genesisBlk)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
|
||||
if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis block")
|
||||
}
|
||||
if err := s.beaconDB.SaveHeadBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head block root")
|
||||
}
|
||||
if err := s.beaconDB.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could save genesis block root")
|
||||
}
|
||||
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state")
|
||||
}
|
||||
if err := s.saveGenesisValidators(ctx, genesisState); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis validators")
|
||||
}
|
||||
|
||||
genesisCheckpoint := ðpb.Checkpoint{Root: genesisBlkRoot[:]}
|
||||
if err := s.forkChoiceStore.GenesisStore(ctx, genesisCheckpoint, genesisCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "Could not start fork choice service: %v")
|
||||
}
|
||||
|
||||
s.headBlock = genesisBlk
|
||||
s.headState = genesisState
|
||||
s.canonicalRoots[genesisState.Slot] = genesisBlkRoot[:]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called to initialize chain info variables using the finalized checkpoint stored in DB
|
||||
func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
finalized, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint from db")
|
||||
}
|
||||
if finalized == nil {
|
||||
// This should never happen. At chain start, the finalized checkpoint
|
||||
// would be the genesis state and block.
|
||||
return errors.New("no finalized epoch in the database")
|
||||
}
|
||||
s.headState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(finalized.Root))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
s.headBlock, err = s.beaconDB.Block(ctx, bytesutil.ToBytes32(finalized.Root))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block from db")
|
||||
}
|
||||
|
||||
s.headSlot = s.headState.Slot
|
||||
s.canonicalRoots[s.headSlot] = finalized.Root
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
37
beacon-chain/blockchain/service_norace_test.go
Normal file
37
beacon-chain/blockchain/service_norace_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
}
|
||||
|
||||
func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 888},
|
||||
[32]byte{},
|
||||
)
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
@@ -11,40 +11,63 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethTypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/attestation"
|
||||
ssz "github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/internal"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/forkutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2p"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
// Ensure ChainService implements interfaces.
|
||||
var _ = ChainFeeds(&ChainService{})
|
||||
// Ensure Service implements interfaces.
|
||||
var _ = ChainFeeds(&Service{})
|
||||
var _ = NewHeadNotifier(&Service{})
|
||||
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
EnableCrosslinks: true,
|
||||
EnableCheckBlockStateRoot: true,
|
||||
})
|
||||
}
|
||||
|
||||
type store struct {
|
||||
headRoot []byte
|
||||
}
|
||||
|
||||
func (s *store) OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) OnAttestation(ctx context.Context, a *ethpb.Attestation) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (s *store) GenesisStore(ctx context.Context, justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) Head(ctx context.Context) ([]byte, error) {
|
||||
return s.headRoot, nil
|
||||
}
|
||||
|
||||
type mockOperationService struct{}
|
||||
@@ -152,103 +175,50 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) {
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ = p2p.Broadcaster(&mockBroadcaster{})
|
||||
|
||||
func setupInitialDeposits(t *testing.T, numDeposits int) ([]*pb.Deposit, []*bls.SecretKey) {
|
||||
privKeys := make([]*bls.SecretKey, numDeposits)
|
||||
deposits := make([]*pb.Deposit, numDeposits)
|
||||
for i := 0; i < len(deposits); i++ {
|
||||
priv, err := bls.RandKey(rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
depositInput := &pb.DepositInput{
|
||||
Pubkey: priv.PublicKey().Marshal(),
|
||||
}
|
||||
balance := params.BeaconConfig().MaxDepositAmount
|
||||
depositData, err := helpers.EncodeDepositData(depositInput, balance, time.Now().Unix())
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot encode data: %v", err)
|
||||
}
|
||||
deposits[i] = &pb.Deposit{
|
||||
DepositData: depositData,
|
||||
MerkleTreeIndex: uint64(i),
|
||||
}
|
||||
privKeys[i] = priv
|
||||
}
|
||||
return deposits, privKeys
|
||||
}
|
||||
|
||||
func createPreChainStartDeposit(t *testing.T, pk []byte, index uint64) *pb.Deposit {
|
||||
depositInput := &pb.DepositInput{Pubkey: pk}
|
||||
balance := params.BeaconConfig().MaxDepositAmount
|
||||
depositData, err := helpers.EncodeDepositData(depositInput, balance, time.Now().Unix())
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot encode data: %v", err)
|
||||
}
|
||||
return &pb.Deposit{DepositData: depositData, MerkleTreeIndex: index}
|
||||
}
|
||||
|
||||
func createRandaoReveal(t *testing.T, beaconState *pb.BeaconState, privKeys []*bls.SecretKey) []byte {
|
||||
// We fetch the proposer's index as that is whom the RANDAO will be verified against.
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(beaconState, beaconState.Slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
epoch := helpers.SlotToEpoch(beaconState.Slot)
|
||||
buf := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(buf, epoch)
|
||||
domain := forkutil.DomainVersion(beaconState.Fork, epoch, params.BeaconConfig().DomainRandao)
|
||||
// We make the previous validator's index sign the message instead of the proposer.
|
||||
epochSignature := privKeys[proposerIdx].Sign(buf, domain)
|
||||
return epochSignature.Marshal()
|
||||
}
|
||||
|
||||
func setupGenesisBlock(t *testing.T, cs *ChainService) ([32]byte, *pb.BeaconBlock) {
|
||||
func setupGenesisBlock(t *testing.T, cs *Service) ([32]byte, *ethpb.BeaconBlock) {
|
||||
genesis := b.NewGenesisBlock([]byte{})
|
||||
if err := cs.beaconDB.SaveBlock(genesis); err != nil {
|
||||
if err := cs.beaconDB.SaveBlock(context.Background(), genesis); err != nil {
|
||||
t.Fatalf("could not save block to db: %v", err)
|
||||
}
|
||||
parentHash, err := hashutil.HashBeaconBlock(genesis)
|
||||
parentHash, err := ssz.SigningRoot(genesis)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get tree hash root of canonical head: %v", err)
|
||||
}
|
||||
return parentHash, genesis
|
||||
}
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB *db.BeaconDB, attsService *attestation.Service) *ChainService {
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
endpoint := "ws://127.0.0.1"
|
||||
ctx := context.Background()
|
||||
var web3Service *powchain.Web3Service
|
||||
var web3Service *powchain.Service
|
||||
var err error
|
||||
client := &mockClient{}
|
||||
web3Service, err = powchain.NewWeb3Service(ctx, &powchain.Web3ServiceConfig{
|
||||
Endpoint: endpoint,
|
||||
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
|
||||
ETH1Endpoint: endpoint,
|
||||
DepositContract: common.Address{},
|
||||
Reader: client,
|
||||
Client: client,
|
||||
Logger: client,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unable to set up web3 service: %v", err)
|
||||
}
|
||||
|
||||
cfg := &Config{
|
||||
BeaconBlockBuf: 0,
|
||||
BeaconDB: beaconDB,
|
||||
Web3Service: web3Service,
|
||||
OpsPoolService: &mockOperationService{},
|
||||
AttsService: attsService,
|
||||
P2p: &mockBroadcaster{},
|
||||
BeaconBlockBuf: 0,
|
||||
BeaconDB: beaconDB,
|
||||
DepositCache: depositcache.NewDepositCache(),
|
||||
ChainStartFetcher: web3Service,
|
||||
OpsPoolService: &mockOperationService{},
|
||||
P2p: &mockBroadcaster{},
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("could not register blockchain service: %v", err)
|
||||
}
|
||||
chainService, err := NewChainService(ctx, cfg)
|
||||
chainService, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to setup chain service: %v", err)
|
||||
}
|
||||
@@ -256,21 +226,12 @@ func setupBeaconChain(t *testing.T, beaconDB *db.BeaconDB, attsService *attestat
|
||||
return chainService
|
||||
}
|
||||
|
||||
func SetSlotInState(service *ChainService, slot uint64) error {
|
||||
bState, err := service.beaconDB.HeadState(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bState.Slot = slot
|
||||
return service.beaconDB.SaveState(context.Background(), bState)
|
||||
}
|
||||
|
||||
func TestChainStartStop_Uninitialized(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
hook := logTest.NewGlobal()
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
// Test the start function.
|
||||
genesisChan := make(chan time.Time, 0)
|
||||
@@ -291,7 +252,7 @@ func TestChainStartStop_Uninitialized(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if beaconState == nil || beaconState.Slot != params.BeaconConfig().GenesisSlot {
|
||||
if beaconState == nil || beaconState.Slot != 0 {
|
||||
t.Error("Expected canonical state feed to send a state with genesis block")
|
||||
}
|
||||
if err := chainService.Stop(); err != nil {
|
||||
@@ -301,23 +262,39 @@ func TestChainStartStop_Uninitialized(t *testing.T) {
|
||||
if chainService.ctx.Err() != context.Canceled {
|
||||
t.Error("Context was not canceled")
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Waiting for ChainStart log from the Validator Deposit Contract to start the beacon chain...")
|
||||
testutil.AssertLogsContain(t, hook, "ChainStart time reached, starting the beacon chain!")
|
||||
testutil.AssertLogsContain(t, hook, "Waiting")
|
||||
testutil.AssertLogsContain(t, hook, "Genesis time reached")
|
||||
}
|
||||
|
||||
func TestChainStartStop_Initialized(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
unixTime := uint64(time.Now().Unix())
|
||||
deposits, _ := setupInitialDeposits(t, 100)
|
||||
if err := db.InitializeState(context.Background(), unixTime, deposits, &pb.Eth1Data{}); err != nil {
|
||||
t.Fatalf("Could not initialize beacon state to disk: %v", err)
|
||||
genesisBlk := b.NewGenesisBlock([]byte{})
|
||||
blkRoot, err := ssz.SigningRoot(genesisBlk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
setupGenesisBlock(t, chainService)
|
||||
if err := db.SaveBlock(ctx, genesisBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveHeadBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, &pb.BeaconState{Slot: 1}, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
@@ -329,98 +306,95 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
if chainService.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Beacon chain data already exists, starting service")
|
||||
testutil.AssertLogsContain(t, hook, "data already exists")
|
||||
}
|
||||
|
||||
func TestRecentCanonicalRoots_CanFilter(t *testing.T) {
|
||||
service := setupBeaconChain(t, nil, nil)
|
||||
blks := map[uint64][]byte{
|
||||
1: {'A'},
|
||||
50: {'E'},
|
||||
2: {'B'},
|
||||
99: {'F'},
|
||||
30: {'D'},
|
||||
3: {'C'},
|
||||
}
|
||||
service.canonicalBlocks = blks
|
||||
func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
want := []*pbrpc.BlockRoot{{Slot: 99, Root: []byte{'F'}}}
|
||||
roots := service.RecentCanonicalRoots(1)
|
||||
if !reflect.DeepEqual(want, roots) {
|
||||
t.Log("Incorrect block roots received")
|
||||
bc := setupBeaconChain(t, db)
|
||||
|
||||
// Set up 10 deposits pre chain start for validators to register
|
||||
count := uint64(10)
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, count)
|
||||
if err := bc.initializeBeaconChain(ctx, time.Unix(0, 0), deposits, ðpb.Eth1Data{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
want = []*pbrpc.BlockRoot{
|
||||
{Slot: 99, Root: []byte{'F'}},
|
||||
{Slot: 50, Root: []byte{'E'}},
|
||||
{Slot: 30, Root: []byte{'D'}},
|
||||
}
|
||||
roots = service.RecentCanonicalRoots(3)
|
||||
if !reflect.DeepEqual(want, roots) {
|
||||
t.Log("Incorrect block roots received")
|
||||
s, err := bc.beaconDB.State(ctx, bytesutil.ToBytes32(bc.canonicalRoots[0]))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
want = []*pbrpc.BlockRoot{
|
||||
{Slot: 99, Root: []byte{'F'}},
|
||||
{Slot: 50, Root: []byte{'E'}},
|
||||
{Slot: 30, Root: []byte{'D'}},
|
||||
{Slot: 3, Root: []byte{'C'}},
|
||||
{Slot: 2, Root: []byte{'B'}},
|
||||
{Slot: 1, Root: []byte{'A'}},
|
||||
}
|
||||
roots = service.RecentCanonicalRoots(100)
|
||||
if !reflect.DeepEqual(want, roots) {
|
||||
t.Log("Incorrect block roots received")
|
||||
for _, v := range s.Validators {
|
||||
if !db.HasValidatorIndex(ctx, bytesutil.ToBytes48(v.PublicKey)) {
|
||||
t.Errorf("Validator %s missing from db", hex.EncodeToString(v.PublicKey))
|
||||
}
|
||||
}
|
||||
|
||||
if bc.HeadState() == nil {
|
||||
t.Error("Head state can't be nil after initialize beacon chain")
|
||||
}
|
||||
if bc.HeadBlock() == nil {
|
||||
t.Error("Head state can't be nil after initialize beacon chain")
|
||||
}
|
||||
if bc.CanonicalRoot(0) == nil {
|
||||
t.Error("Canonical root for slot 0 can't be nil after initialize beacon chain")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanonicalBlock_CanGet(t *testing.T) {
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
service := setupBeaconChain(t, db, nil)
|
||||
func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
blk1 := &pb.BeaconBlock{Slot: 500}
|
||||
blk1Root, err := hashutil.HashBeaconBlock(blk1)
|
||||
genesis := b.NewGenesisBlock([]byte{})
|
||||
genesisRoot, err := ssz.SigningRoot(genesis)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveBlock(blk1); err != nil {
|
||||
if err := db.SaveGenesisBlockRoot(ctx, genesisRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blk2 := &pb.BeaconBlock{Slot: 600}
|
||||
blk2Root, _ := hashutil.HashBeaconBlock(blk2)
|
||||
if err != nil {
|
||||
if err := db.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveBlock(blk2); err != nil {
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := ðpb.BeaconBlock{Slot: finalizedSlot, ParentRoot: genesisRoot[:]}
|
||||
headState := &pb.BeaconState{Slot: finalizedSlot}
|
||||
headRoot, _ := ssz.SigningRoot(headBlock)
|
||||
if err := db.SaveState(ctx, headState, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cMap := map[uint64][]byte{
|
||||
blk1.Slot: blk1Root[:],
|
||||
blk2.Slot: blk2Root[:],
|
||||
700: {'A'},
|
||||
}
|
||||
service.canonicalBlocks = cMap
|
||||
blk1Db, err := service.CanonicalBlock(blk1.Slot)
|
||||
if err != nil {
|
||||
if err := db.SaveBlock(ctx, headBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(blk1, blk1Db) {
|
||||
t.Error("block 1 don't match")
|
||||
}
|
||||
blk2Db, err := service.CanonicalBlock(blk2.Slot)
|
||||
if err != nil {
|
||||
if err := db.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Epoch: helpers.SlotToEpoch(finalizedSlot),
|
||||
Root: headRoot[:],
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(blk2, blk2Db) {
|
||||
t.Error("block 2 don't match")
|
||||
}
|
||||
blk3Db, err := service.CanonicalBlock(999)
|
||||
if err != nil {
|
||||
if err := db.SaveBlock(ctx, headBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if blk3Db != nil {
|
||||
t.Error("block 3 is suppose to be nil")
|
||||
c := &Service{beaconDB: db, canonicalRoots: make(map[uint64][]byte)}
|
||||
if err := c.initializeChainInfo(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(c.HeadBlock(), headBlock) {
|
||||
t.Error("head block incorrect")
|
||||
}
|
||||
if !reflect.DeepEqual(c.HeadState(), headState) {
|
||||
t.Error("head block incorrect")
|
||||
}
|
||||
if headBlock.Slot != c.HeadSlot() {
|
||||
t.Error("head slot incorrect")
|
||||
}
|
||||
if !bytes.Equal(headRoot[:], c.HeadRoot()) {
|
||||
t.Error("head slot incorrect")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["state_generator.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/stategenerator",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["state_generator_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/chaintest/backend:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,177 +0,0 @@
|
||||
package stategenerator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "stategenerator")
|
||||
|
||||
// GenerateStateFromBlock generates state from the last finalized state to the input slot.
|
||||
// Ex:
|
||||
// 1A - 2B(finalized) - 3C - 4 - 5D - 6 - 7F (letters mean there's a block).
|
||||
// Input: slot 6.
|
||||
// Output: resulting state of state transition function after applying block C and D.
|
||||
// along with skipped slot 4 and 6.
|
||||
func GenerateStateFromBlock(ctx context.Context, db *db.BeaconDB, slot uint64) (*pb.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.stategenerator.GenerateStateFromBlock")
|
||||
defer span.End()
|
||||
fState, err := db.HistoricalStateFromSlot(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// return finalized state if it's the same as input slot.
|
||||
if fState.Slot == slot {
|
||||
return fState, nil
|
||||
}
|
||||
|
||||
// input slot can't be smaller than last finalized state's slot.
|
||||
if fState.Slot > slot {
|
||||
return nil, fmt.Errorf(
|
||||
"requested slot %d < current slot %d in the finalized beacon state",
|
||||
slot-params.BeaconConfig().GenesisSlot,
|
||||
fState.Slot-params.BeaconConfig().GenesisSlot,
|
||||
)
|
||||
}
|
||||
|
||||
if fState.LatestBlock == nil {
|
||||
return nil, fmt.Errorf("latest head in state is nil %v", err)
|
||||
}
|
||||
|
||||
fRoot, err := hashutil.HashBeaconBlock(fState.LatestBlock)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get block root %v", err)
|
||||
}
|
||||
|
||||
// from input slot, retrieve its corresponding block and call that the most recent block.
|
||||
mostRecentBlock, err := db.BlockBySlot(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if the most recent block is a skip block, we get its parent block.
|
||||
// ex:
|
||||
// 1A - 2B - 3C - 4 - 5 (letters mean there's a block).
|
||||
// input slot is 5, but slots 4 and 5 are skipped, we get block C from slot 3.
|
||||
lastSlot := slot
|
||||
for mostRecentBlock == nil {
|
||||
lastSlot--
|
||||
mostRecentBlock, err = db.BlockBySlot(ctx, lastSlot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// retrieve the block list to recompute state of the input slot.
|
||||
blocks, err := blocksSinceFinalized(ctx, db, mostRecentBlock, fRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to look up block ancestors %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Recompute state starting last finalized slot %d and ending slot %d",
|
||||
fState.Slot-params.BeaconConfig().GenesisSlot, slot-params.BeaconConfig().GenesisSlot)
|
||||
postState := fState
|
||||
root := fRoot
|
||||
// this recomputes state up to the last available block.
|
||||
// ex: 1A - 2B (finalized) - 3C - 4 - 5 - 6C - 7 - 8 (C is the last block).
|
||||
// input slot 8, this recomputes state to slot 6.
|
||||
for i := len(blocks); i > 0; i-- {
|
||||
block := blocks[i-1]
|
||||
if block.Slot <= postState.Slot {
|
||||
continue
|
||||
}
|
||||
// running state transitions for skipped slots.
|
||||
for block.Slot != fState.Slot+1 {
|
||||
postState, err = state.ExecuteStateTransition(
|
||||
ctx,
|
||||
postState,
|
||||
nil,
|
||||
root,
|
||||
&state.TransitionConfig{
|
||||
VerifySignatures: false,
|
||||
Logging: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not execute state transition %v", err)
|
||||
}
|
||||
}
|
||||
postState, err = state.ExecuteStateTransition(
|
||||
ctx,
|
||||
postState,
|
||||
block,
|
||||
root,
|
||||
&state.TransitionConfig{
|
||||
VerifySignatures: false,
|
||||
Logging: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not execute state transition %v", err)
|
||||
}
|
||||
|
||||
root, err = hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get block root %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// this recomputes state from last block to last slot if there's skipp slots after.
|
||||
// ex: 1A - 2B (finalized) - 3C - 4 - 5 - 6C - 7 - 8 (7 and 8 are skipped slots).
|
||||
// input slot 8, this recomputes state from 6C to 8.
|
||||
for i := postState.Slot; i < slot; i++ {
|
||||
postState, err = state.ExecuteStateTransition(
|
||||
ctx,
|
||||
postState,
|
||||
nil,
|
||||
root,
|
||||
&state.TransitionConfig{
|
||||
VerifySignatures: false,
|
||||
Logging: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not execute state transition %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Finished recompute state with slot %d and finalized epoch %d",
|
||||
postState.Slot-params.BeaconConfig().GenesisSlot, postState.FinalizedEpoch-params.BeaconConfig().GenesisEpoch)
|
||||
|
||||
return postState, nil
|
||||
}
|
||||
|
||||
// blocksSinceFinalized will return a list of linked blocks that's
|
||||
// between the input block and the last finalized block in the db.
|
||||
// The input block is also returned in the list.
|
||||
// Ex:
|
||||
// A -> B(finalized) -> C -> D -> E -> D.
|
||||
// Input: E, output: [E, D, C, B].
|
||||
func blocksSinceFinalized(ctx context.Context, db *db.BeaconDB, block *pb.BeaconBlock,
|
||||
finalizedBlockRoot [32]byte) ([]*pb.BeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.stategenerator.blocksSinceFinalized")
|
||||
defer span.End()
|
||||
blockAncestors := make([]*pb.BeaconBlock, 0)
|
||||
blockAncestors = append(blockAncestors, block)
|
||||
parentRoot := bytesutil.ToBytes32(block.ParentRootHash32)
|
||||
// looking up ancestors, until the finalized block.
|
||||
for parentRoot != finalizedBlockRoot {
|
||||
retblock, err := db.Block(parentRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockAncestors = append(blockAncestors, retblock)
|
||||
parentRoot = bytesutil.ToBytes32(retblock.ParentRootHash32)
|
||||
}
|
||||
return blockAncestors, nil
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
package stategenerator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/stategenerator"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/chaintest/backend"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func init() {
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
CacheTreeHash: false,
|
||||
})
|
||||
}
|
||||
func TestGenerateState_OK(t *testing.T) {
|
||||
b, err := backend.NewSimulatedBackend()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create a new simulated backend %v", err)
|
||||
}
|
||||
privKeys, err := b.SetupBackend(100)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not set up backend %v", err)
|
||||
}
|
||||
beaconDb := b.DB()
|
||||
defer b.Shutdown()
|
||||
defer db.TeardownDB(beaconDb)
|
||||
ctx := context.Background()
|
||||
|
||||
slotLimit := uint64(30)
|
||||
|
||||
// Run the simulated chain for 30 slots, to get a state that we can save as finalized.
|
||||
for i := uint64(0); i < slotLimit; i++ {
|
||||
if err := b.GenerateBlockAndAdvanceChain(&backend.SimulatedObjects{}, privKeys); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, b.State().Slot+1)
|
||||
}
|
||||
inMemBlocks := b.InMemoryBlocks()
|
||||
if err := beaconDb.SaveBlock(inMemBlocks[len(inMemBlocks)-1]); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
if err := beaconDb.UpdateChainHead(ctx, inMemBlocks[len(inMemBlocks)-1], b.State()); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
if err := beaconDb.SaveFinalizedBlock(inMemBlocks[len(inMemBlocks)-1]); err != nil {
|
||||
t.Fatalf("Unable to save finalized state: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := beaconDb.SaveFinalizedState(b.State()); err != nil {
|
||||
t.Fatalf("Unable to save finalized state: %v", err)
|
||||
}
|
||||
|
||||
// Run the chain for another 30 slots so that we can have this at the current head.
|
||||
for i := uint64(0); i < slotLimit; i++ {
|
||||
if err := b.GenerateBlockAndAdvanceChain(&backend.SimulatedObjects{}, privKeys); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, b.State().Slot+1)
|
||||
}
|
||||
inMemBlocks := b.InMemoryBlocks()
|
||||
if err := beaconDb.SaveBlock(inMemBlocks[len(inMemBlocks)-1]); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
if err := beaconDb.UpdateChainHead(ctx, inMemBlocks[len(inMemBlocks)-1], b.State()); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ran 30 slots to save finalized slot then ran another 30 slots.
|
||||
slotToGenerateTill := params.BeaconConfig().GenesisSlot + slotLimit*2
|
||||
newState, err := stategenerator.GenerateStateFromBlock(context.Background(), beaconDb, slotToGenerateTill)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to generate new state from previous finalized state %v", err)
|
||||
}
|
||||
|
||||
if newState.Slot != b.State().Slot {
|
||||
t.Fatalf("The generated state and the current state do not have the same slot, expected: %d but got %d",
|
||||
b.State().Slot, newState.Slot)
|
||||
}
|
||||
|
||||
if !proto.Equal(newState, b.State()) {
|
||||
t.Error("Generated and saved states are unequal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateState_WithNilBlocksOK(t *testing.T) {
|
||||
b, err := backend.NewSimulatedBackend()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create a new simulated backend %v", err)
|
||||
}
|
||||
privKeys, err := b.SetupBackend(100)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not set up backend %v", err)
|
||||
}
|
||||
beaconDb := b.DB()
|
||||
defer b.Shutdown()
|
||||
defer db.TeardownDB(beaconDb)
|
||||
ctx := context.Background()
|
||||
|
||||
slotLimit := uint64(30)
|
||||
|
||||
// Run the simulated chain for 30 slots, to get a state that we can save as finalized.
|
||||
for i := uint64(0); i < slotLimit; i++ {
|
||||
if err := b.GenerateBlockAndAdvanceChain(&backend.SimulatedObjects{}, privKeys); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, b.State().Slot+1)
|
||||
}
|
||||
inMemBlocks := b.InMemoryBlocks()
|
||||
if err := beaconDb.SaveBlock(inMemBlocks[len(inMemBlocks)-1]); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
if err := beaconDb.UpdateChainHead(ctx, inMemBlocks[len(inMemBlocks)-1], b.State()); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
if err := beaconDb.SaveFinalizedBlock(inMemBlocks[len(inMemBlocks)-1]); err != nil {
|
||||
t.Fatalf("Unable to save finalized state: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := beaconDb.SaveFinalizedState(b.State()); err != nil {
|
||||
t.Fatalf("Unable to save finalized state")
|
||||
}
|
||||
|
||||
slotsWithNil := uint64(10)
|
||||
|
||||
// Run the chain for 10 slots with nil blocks.
|
||||
for i := uint64(0); i < slotsWithNil; i++ {
|
||||
if err := b.GenerateNilBlockAndAdvanceChain(); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, b.State().Slot+1)
|
||||
}
|
||||
}
|
||||
|
||||
for i := uint64(0); i < slotLimit-slotsWithNil; i++ {
|
||||
if err := b.GenerateBlockAndAdvanceChain(&backend.SimulatedObjects{}, privKeys); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, b.State().Slot+1)
|
||||
}
|
||||
inMemBlocks := b.InMemoryBlocks()
|
||||
if err := beaconDb.SaveBlock(inMemBlocks[len(inMemBlocks)-1]); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
if err := beaconDb.UpdateChainHead(ctx, inMemBlocks[len(inMemBlocks)-1], b.State()); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ran 30 slots to save finalized slot then ran another 10 slots w/o blocks and 20 slots w/ blocks.
|
||||
slotToGenerateTill := params.BeaconConfig().GenesisSlot + slotLimit*2
|
||||
newState, err := stategenerator.GenerateStateFromBlock(context.Background(), beaconDb, slotToGenerateTill)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to generate new state from previous finalized state %v", err)
|
||||
}
|
||||
|
||||
if newState.Slot != b.State().Slot {
|
||||
t.Fatalf("The generated state and the current state do not have the same slot, expected: %d but got %d",
|
||||
b.State().Slot, newState.Slot)
|
||||
}
|
||||
|
||||
if !proto.Equal(newState, b.State()) {
|
||||
t.Error("generated and saved states are unequal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateState_NilLatestFinalizedBlock(t *testing.T) {
|
||||
b, err := backend.NewSimulatedBackend()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create a new simulated backend %v", err)
|
||||
}
|
||||
beaconDB := b.DB()
|
||||
defer b.Shutdown()
|
||||
defer db.TeardownDB(beaconDB)
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + params.BeaconConfig().SlotsPerEpoch*4,
|
||||
}
|
||||
if err := beaconDB.SaveFinalizedState(beaconState); err != nil {
|
||||
t.Fatalf("Unable to save finalized state")
|
||||
}
|
||||
if err := beaconDB.SaveHistoricalState(context.Background(), beaconState); err != nil {
|
||||
t.Fatalf("Unable to save finalized state")
|
||||
}
|
||||
|
||||
slot := params.BeaconConfig().GenesisSlot + 1 + params.BeaconConfig().SlotsPerEpoch*4
|
||||
want := "latest head in state is nil"
|
||||
if _, err := stategenerator.GenerateStateFromBlock(context.Background(), beaconDB, slot); !strings.Contains(err.Error(), want) {
|
||||
t.Errorf("Expected %v, received %v", want, err)
|
||||
}
|
||||
}
|
||||
18
beacon-chain/blockchain/testing/BUILD.bazel
Normal file
18
beacon-chain/blockchain/testing/BUILD.bazel
Normal file
@@ -0,0 +1,18 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = ["mock.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
129
beacon-chain/blockchain/testing/mock.go
Normal file
129
beacon-chain/blockchain/testing/mock.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ChainService defines the mock interface for testing
|
||||
type ChainService struct {
|
||||
State *pb.BeaconState
|
||||
Root []byte
|
||||
Block *ethpb.BeaconBlock
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
StateFeed *event.Feed
|
||||
BlocksReceived []*ethpb.BeaconBlock
|
||||
Genesis time.Time
|
||||
Fork *pb.Fork
|
||||
DB db.Database
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoVerify mocks ReceiveBlockNoVerify method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoPubsub mocks ReceiveBlockNoPubsub method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoPubsubForkchoice mocks ReceiveBlockNoPubsubForkchoice method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &pb.BeaconState{}
|
||||
}
|
||||
if !bytes.Equal(ms.Root, block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.ParentRoot)
|
||||
}
|
||||
ms.State.Slot = block.Slot
|
||||
ms.BlocksReceived = append(ms.BlocksReceived, block)
|
||||
signingRoot, err := ssz.SigningRoot(block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ms.DB != nil {
|
||||
if err := ms.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Slot)
|
||||
}
|
||||
ms.Root = signingRoot[:]
|
||||
ms.Block = block
|
||||
return nil
|
||||
}
|
||||
|
||||
// HeadSlot mocks HeadSlot method in chain service.
|
||||
func (ms *ChainService) HeadSlot() uint64 {
|
||||
return ms.State.Slot
|
||||
|
||||
}
|
||||
|
||||
// HeadRoot mocks HeadRoot method in chain service.
|
||||
func (ms *ChainService) HeadRoot() []byte {
|
||||
return ms.Root
|
||||
|
||||
}
|
||||
|
||||
// HeadBlock mocks HeadBlock method in chain service.
|
||||
func (ms *ChainService) HeadBlock() *ethpb.BeaconBlock {
|
||||
return ms.Block
|
||||
}
|
||||
|
||||
// HeadState mocks HeadState method in chain service.
|
||||
func (ms *ChainService) HeadState() *pb.BeaconState {
|
||||
return ms.State
|
||||
}
|
||||
|
||||
// CurrentFork mocks HeadState method in chain service.
|
||||
func (ms *ChainService) CurrentFork() *pb.Fork {
|
||||
return ms.Fork
|
||||
}
|
||||
|
||||
// FinalizedCheckpt mocks FinalizedCheckpt method in chain service.
|
||||
func (ms *ChainService) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.FinalizedCheckPoint
|
||||
}
|
||||
|
||||
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
|
||||
func (ms *ChainService) ReceiveAttestation(context.Context, *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveAttestationNoPubsub mocks ReceiveAttestationNoPubsub method in chain service.
|
||||
func (ms *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// StateInitializedFeed mocks the same method in the chain service.
|
||||
func (ms *ChainService) StateInitializedFeed() *event.Feed {
|
||||
if ms.StateFeed != nil {
|
||||
return ms.StateFeed
|
||||
}
|
||||
ms.StateFeed = new(event.Feed)
|
||||
return ms.StateFeed
|
||||
}
|
||||
|
||||
// HeadUpdatedFeed mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadUpdatedFeed() *event.Feed {
|
||||
return new(event.Feed)
|
||||
}
|
||||
|
||||
// GenesisTime mocks the same method in the chain service.
|
||||
func (ms *ChainService) GenesisTime() time.Time {
|
||||
return ms.Genesis
|
||||
}
|
||||
33
beacon-chain/cache/BUILD.bazel
vendored
33
beacon-chain/cache/BUILD.bazel
vendored
@@ -3,14 +3,25 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"block.go",
|
||||
"active_count.go",
|
||||
"active_indices.go",
|
||||
"attestation_data.go",
|
||||
"checkpoint_state.go",
|
||||
"committee.go",
|
||||
"common.go",
|
||||
"eth1_data.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/beacon/rpc/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
@@ -19,10 +30,26 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"block_test.go",
|
||||
"active_count_test.go",
|
||||
"active_indices_test.go",
|
||||
"attestation_data_test.go",
|
||||
"benchmarks_test.go",
|
||||
"checkpoint_state_test.go",
|
||||
"committee_test.go",
|
||||
"eth1_data_test.go",
|
||||
"feature_flag_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//proto/beacon/p2p/v1:go_default_library"],
|
||||
race = "on",
|
||||
deps = [
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/beacon/rpc/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
102
beacon-chain/cache/active_count.go
vendored
Normal file
102
beacon-chain/cache/active_count.go
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotActiveCountInfo will be returned when a cache object is not a pointer to
|
||||
// a ActiveCountByEpoch struct.
|
||||
ErrNotActiveCountInfo = errors.New("object is not a active count obj")
|
||||
|
||||
// maxActiveCountListSize defines the max number of active count can cache.
|
||||
maxActiveCountListSize = 1000
|
||||
|
||||
// Metrics.
|
||||
activeCountCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "active_validator_count_cache_miss",
|
||||
Help: "The number of active validator count requests that aren't present in the cache.",
|
||||
})
|
||||
activeCountCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "active_validator_count_cache_hit",
|
||||
Help: "The number of active validator count requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// ActiveCountByEpoch defines the active validator count per epoch.
|
||||
type ActiveCountByEpoch struct {
|
||||
Epoch uint64
|
||||
ActiveCount uint64
|
||||
}
|
||||
|
||||
// ActiveCountCache is a struct with 1 queue for looking up active count by epoch.
|
||||
type ActiveCountCache struct {
|
||||
activeCountCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// activeCountKeyFn takes the epoch as the key for the active count of a given epoch.
|
||||
func activeCountKeyFn(obj interface{}) (string, error) {
|
||||
aInfo, ok := obj.(*ActiveCountByEpoch)
|
||||
if !ok {
|
||||
return "", ErrNotActiveCountInfo
|
||||
}
|
||||
|
||||
return strconv.Itoa(int(aInfo.Epoch)), nil
|
||||
}
|
||||
|
||||
// NewActiveCountCache creates a new active count cache for storing/accessing active validator count.
|
||||
func NewActiveCountCache() *ActiveCountCache {
|
||||
return &ActiveCountCache{
|
||||
activeCountCache: cache.NewFIFO(activeCountKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// ActiveCountInEpoch fetches ActiveCountByEpoch by epoch. Returns true with a
|
||||
// reference to the ActiveCountInEpoch info, if exists. Otherwise returns false, nil.
|
||||
func (c *ActiveCountCache) ActiveCountInEpoch(epoch uint64) (uint64, error) {
|
||||
if !featureconfig.Get().EnableActiveCountCache {
|
||||
return params.BeaconConfig().FarFutureEpoch, nil
|
||||
}
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.activeCountCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
if err != nil {
|
||||
return params.BeaconConfig().FarFutureEpoch, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
activeCountCacheHit.Inc()
|
||||
} else {
|
||||
activeCountCacheMiss.Inc()
|
||||
return params.BeaconConfig().FarFutureEpoch, nil
|
||||
}
|
||||
|
||||
aInfo, ok := obj.(*ActiveCountByEpoch)
|
||||
if !ok {
|
||||
return params.BeaconConfig().FarFutureEpoch, ErrNotActiveCountInfo
|
||||
}
|
||||
|
||||
return aInfo.ActiveCount, nil
|
||||
}
|
||||
|
||||
// AddActiveCount adds ActiveCountByEpoch object to the cache. This method also trims the least
|
||||
// recently added ActiveCountByEpoch object if the cache size has ready the max cache size limit.
|
||||
func (c *ActiveCountCache) AddActiveCount(activeCount *ActiveCountByEpoch) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.activeCountCache.AddIfNotPresent(activeCount); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(c.activeCountCache, maxActiveCountListSize)
|
||||
return nil
|
||||
}
|
||||
83
beacon-chain/cache/active_count_test.go
vendored
Normal file
83
beacon-chain/cache/active_count_test.go
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func TestActiveCountKeyFn_OK(t *testing.T) {
|
||||
aInfo := &ActiveCountByEpoch{
|
||||
Epoch: 999,
|
||||
ActiveCount: 10,
|
||||
}
|
||||
|
||||
key, err := activeCountKeyFn(aInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != strconv.Itoa(int(aInfo.Epoch)) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, strconv.Itoa(int(aInfo.Epoch)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveCountKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := activeCountKeyFn("bad")
|
||||
if err != ErrNotActiveCountInfo {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotActiveCountInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveCountCache_ActiveCountByEpoch(t *testing.T) {
|
||||
cache := NewActiveCountCache()
|
||||
|
||||
aInfo := &ActiveCountByEpoch{
|
||||
Epoch: 99,
|
||||
ActiveCount: 11,
|
||||
}
|
||||
activeCount, err := cache.ActiveCountInEpoch(aInfo.Epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if activeCount != params.BeaconConfig().FarFutureEpoch {
|
||||
t.Error("Expected active count not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddActiveCount(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
activeCount, err = cache.ActiveCountInEpoch(aInfo.Epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(activeCount, aInfo.ActiveCount) {
|
||||
t.Errorf(
|
||||
"Expected fetched active count to be %v, got %v",
|
||||
aInfo.ActiveCount,
|
||||
activeCount,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveCount_MaxSize(t *testing.T) {
|
||||
cache := NewActiveCountCache()
|
||||
|
||||
for i := uint64(0); i < 1001; i++ {
|
||||
aInfo := &ActiveCountByEpoch{
|
||||
Epoch: i,
|
||||
}
|
||||
if err := cache.AddActiveCount(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.activeCountCache.ListKeys()) != maxActiveCountListSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxActiveCountListSize,
|
||||
len(cache.activeCountCache.ListKeys()),
|
||||
)
|
||||
}
|
||||
}
|
||||
106
beacon-chain/cache/active_indices.go
vendored
Normal file
106
beacon-chain/cache/active_indices.go
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotActiveIndicesInfo will be returned when a cache object is not a pointer to
|
||||
// a ActiveIndicesByEpoch struct.
|
||||
ErrNotActiveIndicesInfo = errors.New("object is not a active indices list")
|
||||
|
||||
// maxActiveIndicesListSize defines the max number of active indices can cache.
|
||||
maxActiveIndicesListSize = 4
|
||||
|
||||
// Metrics.
|
||||
activeIndicesCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "active_validator_indices_cache_miss",
|
||||
Help: "The number of active validator indices requests that aren't present in the cache.",
|
||||
})
|
||||
activeIndicesCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "active_validator_indices_cache_hit",
|
||||
Help: "The number of active validator indices requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// ActiveIndicesByEpoch defines the active validator indices per epoch.
|
||||
type ActiveIndicesByEpoch struct {
|
||||
Epoch uint64
|
||||
ActiveIndices []uint64
|
||||
}
|
||||
|
||||
// ActiveIndicesCache is a struct with 1 queue for looking up active indices by epoch.
|
||||
type ActiveIndicesCache struct {
|
||||
activeIndicesCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// activeIndicesKeyFn takes the epoch as the key for the active indices of a given epoch.
|
||||
func activeIndicesKeyFn(obj interface{}) (string, error) {
|
||||
aInfo, ok := obj.(*ActiveIndicesByEpoch)
|
||||
if !ok {
|
||||
return "", ErrNotActiveIndicesInfo
|
||||
}
|
||||
|
||||
return strconv.Itoa(int(aInfo.Epoch)), nil
|
||||
}
|
||||
|
||||
// NewActiveIndicesCache creates a new active indices cache for storing/accessing active validator indices.
|
||||
func NewActiveIndicesCache() *ActiveIndicesCache {
|
||||
return &ActiveIndicesCache{
|
||||
activeIndicesCache: cache.NewFIFO(activeIndicesKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// ActiveIndicesInEpoch fetches ActiveIndicesByEpoch by epoch. Returns true with a
|
||||
// reference to the ActiveIndicesInEpoch info, if exists. Otherwise returns false, nil.
|
||||
func (c *ActiveIndicesCache) ActiveIndicesInEpoch(epoch uint64) ([]uint64, error) {
|
||||
if !featureconfig.Get().EnableActiveIndicesCache {
|
||||
return nil, nil
|
||||
}
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.activeIndicesCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
activeIndicesCacheHit.Inc()
|
||||
} else {
|
||||
activeIndicesCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
aInfo, ok := obj.(*ActiveIndicesByEpoch)
|
||||
if !ok {
|
||||
return nil, ErrNotActiveIndicesInfo
|
||||
}
|
||||
|
||||
return aInfo.ActiveIndices, nil
|
||||
}
|
||||
|
||||
// AddActiveIndicesList adds ActiveIndicesByEpoch object to the cache. This method also trims the least
|
||||
// recently added ActiveIndicesByEpoch object if the cache size has ready the max cache size limit.
|
||||
func (c *ActiveIndicesCache) AddActiveIndicesList(activeIndices *ActiveIndicesByEpoch) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.activeIndicesCache.AddIfNotPresent(activeIndices); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(c.activeIndicesCache, maxActiveIndicesListSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveIndicesKeys returns the keys of the active indices cache.
|
||||
func (c *ActiveIndicesCache) ActiveIndicesKeys() []string {
|
||||
return c.activeIndicesCache.ListKeys()
|
||||
}
|
||||
82
beacon-chain/cache/active_indices_test.go
vendored
Normal file
82
beacon-chain/cache/active_indices_test.go
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestActiveIndicesKeyFn_OK(t *testing.T) {
|
||||
aInfo := &ActiveIndicesByEpoch{
|
||||
Epoch: 999,
|
||||
ActiveIndices: []uint64{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
key, err := activeIndicesKeyFn(aInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != strconv.Itoa(int(aInfo.Epoch)) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, strconv.Itoa(int(aInfo.Epoch)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveIndicesKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := activeIndicesKeyFn("bad")
|
||||
if err != ErrNotActiveIndicesInfo {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotActiveIndicesInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveIndicesCache_ActiveIndicesByEpoch(t *testing.T) {
|
||||
cache := NewActiveIndicesCache()
|
||||
|
||||
aInfo := &ActiveIndicesByEpoch{
|
||||
Epoch: 99,
|
||||
ActiveIndices: []uint64{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
activeIndices, err := cache.ActiveIndicesInEpoch(aInfo.Epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if activeIndices != nil {
|
||||
t.Error("Expected active indices not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddActiveIndicesList(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
activeIndices, err = cache.ActiveIndicesInEpoch(aInfo.Epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(activeIndices, aInfo.ActiveIndices) {
|
||||
t.Errorf(
|
||||
"Expected fetched active indices to be %v, got %v",
|
||||
aInfo.ActiveIndices,
|
||||
activeIndices,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveIndices_MaxSize(t *testing.T) {
|
||||
cache := NewActiveIndicesCache()
|
||||
|
||||
for i := uint64(0); i < 100; i++ {
|
||||
aInfo := &ActiveIndicesByEpoch{
|
||||
Epoch: i,
|
||||
}
|
||||
if err := cache.AddActiveIndicesList(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.activeIndicesCache.ListKeys()) != maxActiveIndicesListSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxActiveIndicesListSize,
|
||||
len(cache.activeIndicesCache.ListKeys()),
|
||||
)
|
||||
}
|
||||
}
|
||||
190
beacon-chain/cache/attestation_data.go
vendored
Normal file
190
beacon-chain/cache/attestation_data.go
vendored
Normal file
@@ -0,0 +1,190 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// Delay parameters
|
||||
minDelay = float64(10) // 10 nanoseconds
|
||||
maxDelay = float64(100000000) // 0.1 second
|
||||
delayFactor = 1.1
|
||||
|
||||
// Metrics
|
||||
attestationCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "attestation_cache_miss",
|
||||
Help: "The number of attestation data requests that aren't present in the cache.",
|
||||
})
|
||||
attestationCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "attestation_cache_hit",
|
||||
Help: "The number of attestation data requests that are present in the cache.",
|
||||
})
|
||||
attestationCacheSize = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "attestation_cache_size",
|
||||
Help: "The number of attestation data in the attestations cache",
|
||||
})
|
||||
)
|
||||
|
||||
// ErrAlreadyInProgress appears when attempting to mark a cache as in progress while it is
|
||||
// already in progress. The client should handle this error and wait for the in progress
|
||||
// data to resolve via Get.
|
||||
var ErrAlreadyInProgress = errors.New("already in progress")
|
||||
|
||||
// AttestationCache is used to store the cached results of an AttestationData request.
|
||||
type AttestationCache struct {
|
||||
cache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
inProgress map[string]bool
|
||||
}
|
||||
|
||||
// NewAttestationCache initializes the map and underlying cache.
|
||||
func NewAttestationCache() *AttestationCache {
|
||||
return &AttestationCache{
|
||||
cache: cache.NewFIFO(wrapperToKey),
|
||||
inProgress: make(map[string]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// Get waits for any in progress calculation to complete before returning a
|
||||
// cached response, if any.
|
||||
func (c *AttestationCache) Get(ctx context.Context, req *pb.AttestationRequest) (*ethpb.AttestationData, error) {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
// Return a miss result if cache is not enabled.
|
||||
attestationCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if req == nil {
|
||||
return nil, errors.New("nil attestation data request")
|
||||
}
|
||||
|
||||
s, e := reqToKey(req)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
delay := minDelay
|
||||
|
||||
// Another identical request may be in progress already. Let's wait until
|
||||
// any in progress request resolves or our timeout is exceeded.
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
if !c.inProgress[s] {
|
||||
c.lock.RUnlock()
|
||||
break
|
||||
}
|
||||
c.lock.RUnlock()
|
||||
|
||||
// This increasing backoff is to decrease the CPU cycles while waiting
|
||||
// for the in progress boolean to flip to false.
|
||||
time.Sleep(time.Duration(delay) * time.Nanosecond)
|
||||
delay *= delayFactor
|
||||
delay = math.Min(delay, maxDelay)
|
||||
}
|
||||
|
||||
item, exists, err := c.cache.GetByKey(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists && item != nil && item.(*attestationReqResWrapper).res != nil {
|
||||
attestationCacheHit.Inc()
|
||||
return item.(*attestationReqResWrapper).res, nil
|
||||
}
|
||||
attestationCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// MarkInProgress a request so that any other similar requests will block on
|
||||
// Get until MarkNotInProgress is called.
|
||||
func (c *AttestationCache) MarkInProgress(req *pb.AttestationRequest) error {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
s, e := reqToKey(req)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
if c.inProgress[s] {
|
||||
return ErrAlreadyInProgress
|
||||
}
|
||||
if featureconfig.Get().EnableAttestationCache {
|
||||
c.inProgress[s] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkNotInProgress will release the lock on a given request. This should be
|
||||
// called after put.
|
||||
func (c *AttestationCache) MarkNotInProgress(req *pb.AttestationRequest) error {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
s, e := reqToKey(req)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
delete(c.inProgress, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *AttestationCache) Put(ctx context.Context, req *pb.AttestationRequest, res *ethpb.AttestationData) error {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
data := &attestationReqResWrapper{
|
||||
req,
|
||||
res,
|
||||
}
|
||||
if err := c.cache.AddIfNotPresent(data); err != nil {
|
||||
return err
|
||||
}
|
||||
trim(c.cache, maxCacheSize)
|
||||
|
||||
attestationCacheSize.Set(float64(len(c.cache.List())))
|
||||
return nil
|
||||
}
|
||||
|
||||
func wrapperToKey(i interface{}) (string, error) {
|
||||
w := i.(*attestationReqResWrapper)
|
||||
if w == nil {
|
||||
return "", errors.New("nil wrapper")
|
||||
}
|
||||
if w.req == nil {
|
||||
return "", errors.New("nil wrapper.request")
|
||||
}
|
||||
return reqToKey(w.req)
|
||||
}
|
||||
|
||||
func reqToKey(req *pb.AttestationRequest) (string, error) {
|
||||
return fmt.Sprintf("%d-%d", req.CommitteeIndex, req.Slot), nil
|
||||
}
|
||||
|
||||
type attestationReqResWrapper struct {
|
||||
req *pb.AttestationRequest
|
||||
res *ethpb.AttestationData
|
||||
}
|
||||
55
beacon-chain/cache/attestation_data_test.go
vendored
Normal file
55
beacon-chain/cache/attestation_data_test.go
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
)
|
||||
|
||||
func TestAttestationCache_RoundTrip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := cache.NewAttestationCache()
|
||||
|
||||
req := &pb.AttestationRequest{
|
||||
CommitteeIndex: 0,
|
||||
Slot: 1,
|
||||
}
|
||||
|
||||
response, err := c.Get(ctx, req)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if response != nil {
|
||||
t.Errorf("Empty cache returned an object: %v", response)
|
||||
}
|
||||
|
||||
if err := c.MarkInProgress(req); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
res := ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 5},
|
||||
}
|
||||
|
||||
if err = c.Put(ctx, req, res); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := c.MarkNotInProgress(req); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
response, err = c.Get(ctx, req)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !proto.Equal(response, res) {
|
||||
t.Error("Expected equal protos to return from cache")
|
||||
}
|
||||
}
|
||||
45
beacon-chain/cache/benchmarks_test.go
vendored
Normal file
45
beacon-chain/cache/benchmarks_test.go
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var indices300k = createIndices(300000)
|
||||
var epoch = uint64(1)
|
||||
|
||||
func createIndices(count int) *ActiveIndicesByEpoch {
|
||||
indices := make([]uint64, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
indices = append(indices, uint64(i))
|
||||
}
|
||||
return &ActiveIndicesByEpoch{
|
||||
Epoch: epoch,
|
||||
ActiveIndices: indices,
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCachingAddRetrieve(b *testing.B) {
|
||||
|
||||
c := NewActiveIndicesCache()
|
||||
|
||||
b.Run("ADD300K", func(b *testing.B) {
|
||||
b.N = 10
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := c.AddActiveIndicesList(indices300k); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("RETR300K", func(b *testing.B) {
|
||||
b.N = 10
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := c.ActiveIndicesInEpoch(epoch); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
104
beacon-chain/cache/block.go
vendored
104
beacon-chain/cache/block.go
vendored
@@ -1,104 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotAncestorCacheObj will be returned when a cache object is not a pointer to
|
||||
// block ancestor cache obj.
|
||||
ErrNotAncestorCacheObj = errors.New("object is not an ancestor object for cache")
|
||||
// Metrics
|
||||
ancestorBlockCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "ancestor_block_cache_miss",
|
||||
Help: "The number of ancestor block requests that aren't present in the cache.",
|
||||
})
|
||||
ancestorBlockCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "ancestor_block_cache_hit",
|
||||
Help: "The number of ancestor block requests that are present in the cache.",
|
||||
})
|
||||
ancestorBlockCacheSize = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "ancestor_block_cache_size",
|
||||
Help: "The number of ancestor blocks in the ancestorBlock cache",
|
||||
})
|
||||
)
|
||||
|
||||
// AncestorInfo defines the cached ancestor block object for height.
|
||||
type AncestorInfo struct {
|
||||
Height uint64
|
||||
Hash []byte
|
||||
Target *pb.AttestationTarget
|
||||
}
|
||||
|
||||
// AncestorBlockCache structs with 1 queue for looking up block ancestor by height.
|
||||
type AncestorBlockCache struct {
|
||||
ancestorBlockCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// heightKeyFn takes the string representation of the block hash + height as the key
|
||||
// for the ancestor of a given block (AncestorInfo).
|
||||
func heightKeyFn(obj interface{}) (string, error) {
|
||||
aInfo, ok := obj.(*AncestorInfo)
|
||||
if !ok {
|
||||
return "", ErrNotAncestorCacheObj
|
||||
}
|
||||
|
||||
return string(aInfo.Hash) + strconv.Itoa(int(aInfo.Height)), nil
|
||||
}
|
||||
|
||||
// NewBlockAncestorCache creates a new block ancestor cache for storing/accessing block ancestor
|
||||
// from memory.
|
||||
func NewBlockAncestorCache() *AncestorBlockCache {
|
||||
return &AncestorBlockCache{
|
||||
ancestorBlockCache: cache.NewFIFO(heightKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// AncestorBySlot fetches block's ancestor by height. Returns true with a
|
||||
// reference to the ancestor block, if exists. Otherwise returns false, nil.
|
||||
func (a *AncestorBlockCache) AncestorBySlot(blockHash []byte, height uint64) (*AncestorInfo, error) {
|
||||
a.lock.RLock()
|
||||
defer a.lock.RUnlock()
|
||||
|
||||
obj, exists, err := a.ancestorBlockCache.GetByKey(string(blockHash) + strconv.Itoa(int(height)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
ancestorBlockCacheHit.Inc()
|
||||
} else {
|
||||
ancestorBlockCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
aInfo, ok := obj.(*AncestorInfo)
|
||||
if !ok {
|
||||
return nil, ErrNotACommitteeInfo
|
||||
}
|
||||
|
||||
return aInfo, nil
|
||||
}
|
||||
|
||||
// AddBlockAncestor adds block ancestor object to the cache. This method also trims the least
|
||||
// recently added ancestor if the cache size has ready the max cache size limit.
|
||||
func (a *AncestorBlockCache) AddBlockAncestor(ancestorInfo *AncestorInfo) error {
|
||||
a.lock.Lock()
|
||||
defer a.lock.Unlock()
|
||||
|
||||
if err := a.ancestorBlockCache.AddIfNotPresent(ancestorInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(a.ancestorBlockCache, maxCacheSize)
|
||||
ancestorBlockCacheSize.Set(float64(len(a.ancestorBlockCache.ListKeys())))
|
||||
return nil
|
||||
}
|
||||
111
beacon-chain/cache/block_test.go
vendored
111
beacon-chain/cache/block_test.go
vendored
@@ -1,111 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
)
|
||||
|
||||
func TestHeightHeightFn_OK(t *testing.T) {
|
||||
height := uint64(999)
|
||||
hash := []byte{'A'}
|
||||
aInfo := &AncestorInfo{
|
||||
Height: height,
|
||||
Hash: hash,
|
||||
Target: &pb.AttestationTarget{
|
||||
Slot: height,
|
||||
BlockRoot: hash,
|
||||
},
|
||||
}
|
||||
|
||||
key, err := heightKeyFn(aInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
strHeightKey := string(aInfo.Target.BlockRoot) + strconv.Itoa(int(aInfo.Target.Slot))
|
||||
if key != strHeightKey {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, strHeightKey)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeightKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := heightKeyFn("bad")
|
||||
if err != ErrNotAncestorCacheObj {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotAncestorCacheObj, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAncestorCache_AncestorInfoByHeight(t *testing.T) {
|
||||
cache := NewBlockAncestorCache()
|
||||
|
||||
height := uint64(123)
|
||||
hash := []byte{'B'}
|
||||
aInfo := &AncestorInfo{
|
||||
Height: height,
|
||||
Hash: hash,
|
||||
Target: &pb.AttestationTarget{
|
||||
Slot: height,
|
||||
BlockRoot: hash,
|
||||
},
|
||||
}
|
||||
|
||||
fetchedInfo, err := cache.AncestorBySlot(hash, height)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if fetchedInfo != nil {
|
||||
t.Error("Expected ancestor info not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddBlockAncestor(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fetchedInfo, err = cache.AncestorBySlot(hash, height)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if fetchedInfo == nil {
|
||||
t.Error("Expected ancestor info to exist")
|
||||
}
|
||||
if fetchedInfo.Height != height {
|
||||
t.Errorf(
|
||||
"Expected fetched slot number to be %d, got %d",
|
||||
aInfo.Target.Slot,
|
||||
fetchedInfo.Target.Slot,
|
||||
)
|
||||
}
|
||||
if !reflect.DeepEqual(fetchedInfo.Target, aInfo.Target) {
|
||||
t.Errorf(
|
||||
"Expected fetched info committee to be %v, got %v",
|
||||
aInfo.Target,
|
||||
fetchedInfo.Target,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockAncestor_maxSize(t *testing.T) {
|
||||
cache := NewBlockAncestorCache()
|
||||
|
||||
for i := 0; i < maxCacheSize+10; i++ {
|
||||
aInfo := &AncestorInfo{
|
||||
Height: uint64(i),
|
||||
Target: &pb.AttestationTarget{
|
||||
Slot: uint64(i),
|
||||
},
|
||||
}
|
||||
if err := cache.AddBlockAncestor(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.ancestorBlockCache.ListKeys()) != maxCacheSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxCacheSize,
|
||||
len(cache.ancestorBlockCache.ListKeys()),
|
||||
)
|
||||
}
|
||||
}
|
||||
114
beacon-chain/cache/checkpoint_state.go
vendored
Normal file
114
beacon-chain/cache/checkpoint_state.go
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotCheckpointState will be returned when a cache object is not a pointer to
|
||||
// a CheckpointState struct.
|
||||
ErrNotCheckpointState = errors.New("object is not a state by check point struct")
|
||||
|
||||
// maxCheckpointStateSize defines the max number of entries check point to state cache can contain.
|
||||
maxCheckpointStateSize = 4
|
||||
|
||||
// Metrics.
|
||||
checkpointStateMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "check_point_statecache_miss",
|
||||
Help: "The number of check point state requests that aren't present in the cache.",
|
||||
})
|
||||
checkpointStateHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "check_point_state_cache_hit",
|
||||
Help: "The number of check point state requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// CheckpointState defines the active validator indices per epoch.
|
||||
type CheckpointState struct {
|
||||
Checkpoint *ethpb.Checkpoint
|
||||
State *pb.BeaconState
|
||||
}
|
||||
|
||||
// CheckpointStateCache is a struct with 1 queue for looking up state by checkpoint.
|
||||
type CheckpointStateCache struct {
|
||||
cache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// checkpointState takes the checkpoint as the key of the resulting state.
|
||||
func checkpointState(obj interface{}) (string, error) {
|
||||
info, ok := obj.(*CheckpointState)
|
||||
if !ok {
|
||||
return "", ErrNotCheckpointState
|
||||
}
|
||||
|
||||
h, err := hashutil.HashProto(info.Checkpoint)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(h[:]), nil
|
||||
}
|
||||
|
||||
// NewCheckpointStateCache creates a new checkpoint state cache for storing/accessing processed state.
|
||||
func NewCheckpointStateCache() *CheckpointStateCache {
|
||||
return &CheckpointStateCache{
|
||||
cache: cache.NewFIFO(checkpointState),
|
||||
}
|
||||
}
|
||||
|
||||
// StateByCheckpoint fetches state by checkpoint. Returns true with a
|
||||
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*pb.BeaconState, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
h, err := hashutil.HashProto(cp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, exists, err := c.cache.GetByKey(string(h[:]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
checkpointStateHit.Inc()
|
||||
} else {
|
||||
checkpointStateMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
info, ok := obj.(*CheckpointState)
|
||||
if !ok {
|
||||
return nil, ErrNotCheckpointState
|
||||
}
|
||||
|
||||
return proto.Clone(info.State).(*pb.BeaconState), nil
|
||||
}
|
||||
|
||||
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
|
||||
// recently added CheckpointState object if the cache size has ready the max cache size limit.
|
||||
func (c *CheckpointStateCache) AddCheckpointState(cp *CheckpointState) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.cache.AddIfNotPresent(cp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(c.cache, maxCheckpointStateSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckpointStateKeys returns the keys of the state in cache.
|
||||
func (c *CheckpointStateCache) CheckpointStateKeys() []string {
|
||||
return c.cache.ListKeys()
|
||||
}
|
||||
110
beacon-chain/cache/checkpoint_state_test.go
vendored
Normal file
110
beacon-chain/cache/checkpoint_state_test.go
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
)
|
||||
|
||||
func TestCheckpointStateCacheKeyFn_OK(t *testing.T) {
|
||||
cp := ðpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
|
||||
info := &CheckpointState{
|
||||
Checkpoint: cp,
|
||||
State: &pb.BeaconState{Slot: 64},
|
||||
}
|
||||
key, err := checkpointState(info)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantedKey, err := hashutil.HashProto(cp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != string(wantedKey[:]) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, string(wantedKey[:]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointStateCacheKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := checkpointState("bad")
|
||||
if err != ErrNotCheckpointState {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotCheckpointState, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
cache := NewCheckpointStateCache()
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
|
||||
info1 := &CheckpointState{
|
||||
Checkpoint: cp1,
|
||||
State: &pb.BeaconState{Slot: 64},
|
||||
}
|
||||
state, err := cache.StateByCheckpoint(cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if state != nil {
|
||||
t.Error("Expected state not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddCheckpointState(info1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
state, err = cache.StateByCheckpoint(cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(state, info1.State) {
|
||||
t.Error("incorrectly cached state")
|
||||
}
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: []byte{'B'}}
|
||||
info2 := &CheckpointState{
|
||||
Checkpoint: cp2,
|
||||
State: &pb.BeaconState{Slot: 128},
|
||||
}
|
||||
if err := cache.AddCheckpointState(info2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
state, err = cache.StateByCheckpoint(cp2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(state, info2.State) {
|
||||
t.Error("incorrectly cached state")
|
||||
}
|
||||
|
||||
state, err = cache.StateByCheckpoint(cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(state, info1.State) {
|
||||
t.Error("incorrectly cached state")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache__MaxSize(t *testing.T) {
|
||||
c := NewCheckpointStateCache()
|
||||
|
||||
for i := 0; i < maxCheckpointStateSize+100; i++ {
|
||||
info := &CheckpointState{
|
||||
Checkpoint: ðpb.Checkpoint{Epoch: uint64(i)},
|
||||
State: &pb.BeaconState{Slot: uint64(i)},
|
||||
}
|
||||
if err := c.AddCheckpointState(info); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.cache.ListKeys()) != maxCheckpointStateSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxCheckpointStateSize,
|
||||
len(c.cache.ListKeys()),
|
||||
)
|
||||
}
|
||||
}
|
||||
219
beacon-chain/cache/committee.go
vendored
219
beacon-chain/cache/committee.go
vendored
@@ -7,121 +7,214 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotACommitteeInfo will be returned when a cache object is not a pointer to
|
||||
// a committeeInfo struct.
|
||||
ErrNotACommitteeInfo = errors.New("object is not an committee info")
|
||||
// ErrNotCommittee will be returned when a cache object is not a pointer to
|
||||
// a Committee struct.
|
||||
ErrNotCommittee = errors.New("object is not a committee struct")
|
||||
|
||||
// maxCacheSize is 4x of the epoch length for additional cache padding.
|
||||
// Requests should be only accessing committees within defined epoch length.
|
||||
maxCacheSize = int(4 * params.BeaconConfig().SlotsPerEpoch)
|
||||
// maxShuffledIndicesSize defines the max number of shuffled indices list can cache.
|
||||
// 3 for previous, current epoch and next epoch.
|
||||
maxShuffledIndicesSize = 3
|
||||
|
||||
// Metrics
|
||||
committeeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
|
||||
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "committee_cache_miss",
|
||||
Help: "The number of committee requests that aren't present in the cache.",
|
||||
})
|
||||
committeeCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
// CommitteeCacheHit tracks the number of committee requests that are in the cache.
|
||||
CommitteeCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "committee_cache_hit",
|
||||
Help: "The number of committee requests that are present in the cache.",
|
||||
})
|
||||
committeeCacheSize = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "committee_cache_size",
|
||||
Help: "The number of committees in the committee cache",
|
||||
})
|
||||
)
|
||||
|
||||
// CommitteeInfo defines the validator committee of slot and shard combinations.
|
||||
type CommitteeInfo struct {
|
||||
Committee []uint64
|
||||
Shard uint64
|
||||
// Committee defines the committee per epoch and index.
|
||||
type Committee struct {
|
||||
CommitteeCount uint64
|
||||
Epoch uint64
|
||||
Committee []uint64
|
||||
}
|
||||
|
||||
// CommitteesInSlot specifies how many CommitteeInfos are in a given slot.
|
||||
type CommitteesInSlot struct {
|
||||
Slot uint64
|
||||
Committees []*CommitteeInfo
|
||||
// CommitteeCache is a struct with 1 queue for looking up shuffled indices list by epoch and committee index.
|
||||
type CommitteeCache struct {
|
||||
CommitteeCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// CommitteesCache structs with 1 queue for looking up committees by slot.
|
||||
type CommitteesCache struct {
|
||||
committeesCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// slotKeyFn takes the string representation of the slot number as the key
|
||||
// for the committees of a given slot (CommitteesInSlot).
|
||||
func slotKeyFn(obj interface{}) (string, error) {
|
||||
cInfo, ok := obj.(*CommitteesInSlot)
|
||||
// committeeKeyFn takes the epoch as the key to retrieve shuffled indices of a committee in a given epoch.
|
||||
func committeeKeyFn(obj interface{}) (string, error) {
|
||||
info, ok := obj.(*Committee)
|
||||
if !ok {
|
||||
return "", ErrNotACommitteeInfo
|
||||
return "", ErrNotCommittee
|
||||
}
|
||||
|
||||
return strconv.Itoa(int(cInfo.Slot)), nil
|
||||
return strconv.Itoa(int(info.Epoch)), nil
|
||||
}
|
||||
|
||||
// NewCommitteesCache creates a new committee cache for storing/accessing blockInfo from
|
||||
// memory.
|
||||
func NewCommitteesCache() *CommitteesCache {
|
||||
return &CommitteesCache{
|
||||
committeesCache: cache.NewFIFO(slotKeyFn),
|
||||
// NewCommitteeCache creates a new committee cache for storing/accessing shuffled indices of a committee.
|
||||
func NewCommitteeCache() *CommitteeCache {
|
||||
return &CommitteeCache{
|
||||
CommitteeCache: cache.NewFIFO(committeeKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// CommitteesInfoBySlot fetches CommitteesInSlot by slot. Returns true with a
|
||||
// reference to the committees info, if exists. Otherwise returns false, nil.
|
||||
func (c *CommitteesCache) CommitteesInfoBySlot(slot uint64) (*CommitteesInSlot, error) {
|
||||
// ShuffledIndices fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
|
||||
func (c *CommitteeCache) ShuffledIndices(slot uint64, index uint64) ([]uint64, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return nil, nil
|
||||
}
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
obj, exists, err := c.committeesCache.GetByKey(strconv.Itoa(int(slot)))
|
||||
epoch := int(slot / params.BeaconConfig().SlotsPerEpoch)
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(strconv.Itoa(epoch))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
committeeCacheHit.Inc()
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
committeeCacheMiss.Inc()
|
||||
CommitteeCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cInfo, ok := obj.(*CommitteesInSlot)
|
||||
item, ok := obj.(*Committee)
|
||||
if !ok {
|
||||
return nil, ErrNotACommitteeInfo
|
||||
return nil, ErrNotCommittee
|
||||
}
|
||||
|
||||
return cInfo, nil
|
||||
committeeCountPerSlot := uint64(1)
|
||||
if item.CommitteeCount/params.BeaconConfig().SlotsPerEpoch > 1 {
|
||||
committeeCountPerSlot = item.CommitteeCount / params.BeaconConfig().SlotsPerEpoch
|
||||
}
|
||||
|
||||
indexOffSet := index + (slot%params.BeaconConfig().SlotsPerEpoch)*committeeCountPerSlot
|
||||
start, end := startEndIndices(item, indexOffSet)
|
||||
return item.Committee[start:end], nil
|
||||
}
|
||||
|
||||
// AddCommittees adds CommitteesInSlot object to the cache. This method also trims the least
|
||||
// recently added committeeInfo object if the cache size has ready the max cache size limit.
|
||||
func (c *CommitteesCache) AddCommittees(committees *CommitteesInSlot) error {
|
||||
// AddCommitteeShuffledList adds Committee shuffled list object to the cache. T
|
||||
// his method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *CommitteeCache) AddCommitteeShuffledList(committee *Committee) error {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return nil
|
||||
}
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if err := c.committeesCache.AddIfNotPresent(committees); err != nil {
|
||||
if err := c.CommitteeCache.AddIfNotPresent(committee); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(c.committeesCache, maxCacheSize)
|
||||
committeeCacheSize.Set(float64(len(c.committeesCache.ListKeys())))
|
||||
trim(c.CommitteeCache, maxShuffledIndicesSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// trim the FIFO queue to the maxSize.
|
||||
func trim(queue *cache.FIFO, maxSize int) {
|
||||
for s := len(queue.ListKeys()); s > maxSize; s-- {
|
||||
// #nosec G104 popProcessNoopFunc never returns an error
|
||||
_, _ = queue.Pop(popProcessNoopFunc)
|
||||
// Epochs returns the epochs stored in the committee cache. These are the keys to the cache.
|
||||
func (c *CommitteeCache) Epochs() ([]uint64, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache {
|
||||
return nil, nil
|
||||
}
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
epochs := make([]uint64, len(c.CommitteeCache.ListKeys()))
|
||||
for i, s := range c.CommitteeCache.ListKeys() {
|
||||
epoch, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epochs[i] = uint64(epoch)
|
||||
}
|
||||
return epochs, nil
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(obj interface{}) error {
|
||||
return nil
|
||||
// EpochInCache returns true if an input epoch is part of keys in cache.
|
||||
func (c *CommitteeCache) EpochInCache(wantedEpoch uint64) (bool, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return false, nil
|
||||
}
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
for _, s := range c.CommitteeCache.ListKeys() {
|
||||
epoch, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if wantedEpoch == uint64(epoch) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// CommitteeCountPerSlot returns the number of committees in a given slot as stored in cache.
|
||||
func (c *CommitteeCache) CommitteeCountPerSlot(slot uint64) (uint64, bool, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return 0, false, nil
|
||||
}
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
epoch := int(slot / params.BeaconConfig().SlotsPerEpoch)
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
CommitteeCacheMiss.Inc()
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committee)
|
||||
if !ok {
|
||||
return 0, false, ErrNotCommittee
|
||||
}
|
||||
|
||||
return item.CommitteeCount / params.BeaconConfig().SlotsPerEpoch, true, nil
|
||||
}
|
||||
|
||||
// ActiveIndices returns the active indices of a given epoch stored in cache.
|
||||
func (c *CommitteeCache) ActiveIndices(epoch uint64) ([]uint64, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
CommitteeCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committee)
|
||||
if !ok {
|
||||
return nil, ErrNotCommittee
|
||||
}
|
||||
|
||||
return item.Committee, nil
|
||||
}
|
||||
|
||||
func startEndIndices(c *Committee, index uint64) (uint64, uint64) {
|
||||
validatorCount := uint64(len(c.Committee))
|
||||
start := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index)
|
||||
end := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index+1)
|
||||
|
||||
return start, end
|
||||
}
|
||||
|
||||
193
beacon-chain/cache/committee_test.go
vendored
193
beacon-chain/cache/committee_test.go
vendored
@@ -4,93 +4,176 @@ import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func TestSlotKeyFn_OK(t *testing.T) {
|
||||
cInfo := &CommitteesInSlot{
|
||||
Slot: 999,
|
||||
Committees: []*CommitteeInfo{
|
||||
{Shard: 1, Committee: []uint64{1, 2, 3}},
|
||||
{Shard: 1, Committee: []uint64{4, 5, 6}},
|
||||
},
|
||||
func TestCommitteeKeyFn_OK(t *testing.T) {
|
||||
item := &Committee{
|
||||
Epoch: 999,
|
||||
CommitteeCount: 1,
|
||||
Committee: []uint64{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
key, err := slotKeyFn(cInfo)
|
||||
key, err := committeeKeyFn(item)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
strSlot := strconv.Itoa(int(cInfo.Slot))
|
||||
if key != strSlot {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, strSlot)
|
||||
if key != strconv.Itoa(int(item.Epoch)) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, strconv.Itoa(int(item.Epoch)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlotKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := slotKeyFn("bad")
|
||||
if err != ErrNotACommitteeInfo {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotACommitteeInfo, err)
|
||||
func TestCommitteeKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := committeeKeyFn("bad")
|
||||
if err != ErrNotCommittee {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotCommittee, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteesCache_CommitteesInfoBySlot(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
|
||||
cInfo := &CommitteesInSlot{
|
||||
Slot: 123,
|
||||
Committees: []*CommitteeInfo{{Shard: 456}},
|
||||
item := &Committee{
|
||||
Epoch: 1,
|
||||
Committee: []uint64{1, 2, 3, 4, 5, 6},
|
||||
CommitteeCount: 3,
|
||||
}
|
||||
|
||||
fetchedInfo, err := cache.CommitteesInfoBySlot(cInfo.Slot)
|
||||
slot := uint64(item.Epoch * params.BeaconConfig().SlotsPerEpoch)
|
||||
committeeIndex := uint64(1)
|
||||
indices, err := cache.ShuffledIndices(slot, committeeIndex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if fetchedInfo != nil {
|
||||
t.Error("Expected committees info not to exist in empty cache")
|
||||
if indices != nil {
|
||||
t.Error("Expected committee not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddCommittees(cInfo); err != nil {
|
||||
if err := cache.AddCommitteeShuffledList(item); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fetchedInfo, err = cache.CommitteesInfoBySlot(cInfo.Slot)
|
||||
wantedIndex := uint64(0)
|
||||
indices, err = cache.ShuffledIndices(slot, wantedIndex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if fetchedInfo == nil {
|
||||
t.Error("Expected committee info to exist")
|
||||
}
|
||||
if fetchedInfo.Slot != cInfo.Slot {
|
||||
|
||||
start, end := startEndIndices(item, wantedIndex)
|
||||
if !reflect.DeepEqual(indices, item.Committee[start:end]) {
|
||||
t.Errorf(
|
||||
"Expected fetched slot number to be %d, got %d",
|
||||
cInfo.Slot,
|
||||
fetchedInfo.Slot,
|
||||
)
|
||||
}
|
||||
if !reflect.DeepEqual(fetchedInfo.Committees, cInfo.Committees) {
|
||||
t.Errorf(
|
||||
"Expected fetched info committee to be %v, got %v",
|
||||
cInfo.Committees,
|
||||
fetchedInfo.Committees,
|
||||
"Expected fetched active indices to be %v, got %v",
|
||||
indices,
|
||||
item.Committee[start:end],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockCache_maxSize(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
for i := 0; i < maxCacheSize+10; i++ {
|
||||
cInfo := &CommitteesInSlot{
|
||||
Slot: uint64(i),
|
||||
}
|
||||
if err := cache.AddCommittees(cInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
item1 := &Committee{Epoch: 1}
|
||||
if err := cache.AddCommitteeShuffledList(item1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
item2 := &Committee{Epoch: 2}
|
||||
if err := cache.AddCommitteeShuffledList(item2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
epochs, err := cache.Epochs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted := item1.Epoch + item2.Epoch
|
||||
if sum(epochs) != wanted {
|
||||
t.Errorf("Wanted: %v, got: %v", wanted, sum(epochs))
|
||||
}
|
||||
|
||||
if len(cache.committeesCache.ListKeys()) != maxCacheSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxCacheSize,
|
||||
len(cache.committeesCache.ListKeys()),
|
||||
)
|
||||
item3 := &Committee{Epoch: 4}
|
||||
if err := cache.AddCommitteeShuffledList(item3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
epochs, err = cache.Epochs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted = item1.Epoch + item2.Epoch + item3.Epoch
|
||||
if sum(epochs) != wanted {
|
||||
t.Errorf("Wanted: %v, got: %v", wanted, sum(epochs))
|
||||
}
|
||||
|
||||
item4 := &Committee{Epoch: 6}
|
||||
if err := cache.AddCommitteeShuffledList(item4); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
epochs, err = cache.Epochs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted = item2.Epoch + item3.Epoch + item4.Epoch
|
||||
if sum(epochs) != wanted {
|
||||
t.Errorf("Wanted: %v, got: %v", wanted, sum(epochs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCache_EpochInCache(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 1}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 2}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 99}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 100}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
inCache, err := cache.EpochInCache(1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if inCache {
|
||||
t.Error("Epoch shouldn't be in cache")
|
||||
}
|
||||
inCache, err = cache.EpochInCache(100)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !inCache {
|
||||
t.Error("Epoch should be in cache")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
|
||||
item := &Committee{Epoch: 1, Committee: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
indices, err := cache.ActiveIndices(1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if indices != nil {
|
||||
t.Error("Expected committee count not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddCommitteeShuffledList(item); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
indices, err = cache.ActiveIndices(1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(indices, item.Committee) {
|
||||
t.Error("Did not receive correct active indices from cache")
|
||||
}
|
||||
}
|
||||
|
||||
func sum(values []uint64) uint64 {
|
||||
sum := uint64(0)
|
||||
for _, v := range values {
|
||||
sum = v + sum
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
25
beacon-chain/cache/common.go
vendored
Normal file
25
beacon-chain/cache/common.go
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// maxCacheSize is 4x of the epoch length for additional cache padding.
|
||||
// Requests should be only accessing committees within defined epoch length.
|
||||
maxCacheSize = int(4 * params.BeaconConfig().SlotsPerEpoch)
|
||||
)
|
||||
|
||||
// trim the FIFO queue to the maxSize.
|
||||
func trim(queue *cache.FIFO, maxSize int) {
|
||||
for s := len(queue.ListKeys()); s > maxSize; s-- {
|
||||
// #nosec G104 popProcessNoopFunc never returns an error
|
||||
_, _ = queue.Pop(popProcessNoopFunc)
|
||||
}
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(obj interface{}) error {
|
||||
return nil
|
||||
}
|
||||
34
beacon-chain/cache/depositcache/BUILD.bazel
vendored
Normal file
34
beacon-chain/cache/depositcache/BUILD.bazel
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"deposits_cache.go",
|
||||
"pending_deposits.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"deposits_test.go",
|
||||
"pending_deposits_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
159
beacon-chain/cache/depositcache/deposits_cache.go
vendored
Normal file
159
beacon-chain/cache/depositcache/deposits_cache.go
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
historicalDepositsCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacondb_all_deposits",
|
||||
Help: "The number of total deposits in the beaconDB in-memory database",
|
||||
})
|
||||
)
|
||||
|
||||
// DepositFetcher defines a struct which can retrieve deposit information from a store.
|
||||
type DepositFetcher interface {
|
||||
AllDeposits(ctx context.Context, beforeBlk *big.Int) []*ethpb.Deposit
|
||||
DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int)
|
||||
DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte)
|
||||
}
|
||||
|
||||
// DepositCache stores all in-memory deposit objects. This
|
||||
// stores all the deposit related data that is required by the beacon-node.
|
||||
type DepositCache struct {
|
||||
// Beacon chain deposits in memory.
|
||||
pendingDeposits []*DepositContainer
|
||||
deposits []*DepositContainer
|
||||
depositsLock sync.RWMutex
|
||||
chainStartDeposits []*ethpb.Deposit
|
||||
chainstartPubkeys map[string]bool
|
||||
chainstartPubkeysLock sync.RWMutex
|
||||
}
|
||||
|
||||
// DepositContainer object for holding the deposit and a reference to the block in
|
||||
// which the deposit transaction was included in the proof of work chain.
|
||||
type DepositContainer struct {
|
||||
Deposit *ethpb.Deposit
|
||||
Block *big.Int
|
||||
Index int
|
||||
depositRoot [32]byte
|
||||
}
|
||||
|
||||
// NewDepositCache instantiates a new deposit cache
|
||||
func NewDepositCache() *DepositCache {
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*DepositContainer{},
|
||||
deposits: []*DepositContainer{},
|
||||
chainstartPubkeys: make(map[string]bool),
|
||||
chainStartDeposits: make([]*ethpb.Deposit, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum *big.Int, index int, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil || blockNum == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"deposit root": hex.EncodeToString(depositRoot[:]),
|
||||
}).Warn("Ignoring nil deposit insertion")
|
||||
return
|
||||
}
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
// keep the slice sorted on insertion in order to avoid costly sorting on retrival.
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
|
||||
newDeposits := append([]*DepositContainer{{Deposit: d, Block: blockNum, depositRoot: depositRoot, Index: index}}, dc.deposits[heightIdx:]...)
|
||||
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
|
||||
historicalDepositsCount.Inc()
|
||||
}
|
||||
|
||||
// MarkPubkeyForChainstart sets the pubkey deposit status to true.
|
||||
func (dc *DepositCache) MarkPubkeyForChainstart(ctx context.Context, pubkey string) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.MarkPubkeyForChainstart")
|
||||
defer span.End()
|
||||
dc.chainstartPubkeysLock.Lock()
|
||||
defer dc.chainstartPubkeysLock.Unlock()
|
||||
dc.chainstartPubkeys[pubkey] = true
|
||||
}
|
||||
|
||||
// PubkeyInChainstart returns bool for whether the pubkey passed in has deposited.
|
||||
func (dc *DepositCache) PubkeyInChainstart(ctx context.Context, pubkey string) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.PubkeyInChainstart")
|
||||
defer span.End()
|
||||
dc.chainstartPubkeysLock.Lock()
|
||||
defer dc.chainstartPubkeysLock.Unlock()
|
||||
if dc.chainstartPubkeys != nil {
|
||||
return dc.chainstartPubkeys[pubkey]
|
||||
}
|
||||
dc.chainstartPubkeys = make(map[string]bool)
|
||||
return false
|
||||
}
|
||||
|
||||
// AllDeposits returns a list of deposits all historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (dc *DepositCache) AllDeposits(ctx context.Context, beforeBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.AllDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, ctnr := range dc.deposits {
|
||||
if beforeBlk == nil || beforeBlk.Cmp(ctnr.Block) > -1 {
|
||||
deposits = append(deposits, ctnr.Deposit)
|
||||
}
|
||||
}
|
||||
return deposits
|
||||
}
|
||||
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made prior to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "Beacondb.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Block.Cmp(blockHeight) > 0 })
|
||||
// send the deposit root of the empty trie, if eth1follow distance is greater than the time of the earliest
|
||||
// deposit.
|
||||
if heightIdx == 0 {
|
||||
return 0, [32]byte{}
|
||||
}
|
||||
return uint64(heightIdx), dc.deposits[heightIdx-1].depositRoot
|
||||
}
|
||||
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DepositByPubkey")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var deposit *ethpb.Deposit
|
||||
var blockNum *big.Int
|
||||
for _, ctnr := range dc.deposits {
|
||||
if bytes.Equal(ctnr.Deposit.Data.PublicKey, pubKey) {
|
||||
deposit = ctnr.Deposit
|
||||
blockNum = ctnr.Block
|
||||
break
|
||||
}
|
||||
}
|
||||
return deposit, blockNum
|
||||
}
|
||||
288
beacon-chain/cache/depositcache/deposits_test.go
vendored
Normal file
288
beacon-chain/cache/depositcache/deposits_test.go
vendored
Normal file
@@ -0,0 +1,288 @@
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
const nilDepositErr = "Ignoring nil deposit insertion"
|
||||
|
||||
var _ = DepositFetcher(&DepositCache{})
|
||||
|
||||
func TestBeaconDB_InsertDeposit_LogsOnNilDepositInsertion(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.InsertDeposit(context.Background(), nil, big.NewInt(1), 0, [32]byte{})
|
||||
|
||||
if len(dc.deposits) != 0 {
|
||||
t.Fatal("Number of deposits changed")
|
||||
}
|
||||
if hook.LastEntry().Message != nilDepositErr {
|
||||
t.Errorf("Did not log correct message, wanted \"Ignoring nil deposit insertion\", got \"%s\"", hook.LastEntry().Message)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_InsertDeposit_LogsOnNilBlockNumberInsertion(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.InsertDeposit(context.Background(), ðpb.Deposit{}, nil, 0, [32]byte{})
|
||||
|
||||
if len(dc.deposits) != 0 {
|
||||
t.Fatal("Number of deposits changed")
|
||||
}
|
||||
if hook.LastEntry().Message != nilDepositErr {
|
||||
t.Errorf("Did not log correct message, wanted \"Ignoring nil deposit insertion\", got \"%s\"", hook.LastEntry().Message)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_InsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
insertions := []struct {
|
||||
blkNum *big.Int
|
||||
deposit *ethpb.Deposit
|
||||
index int
|
||||
}{
|
||||
{
|
||||
blkNum: big.NewInt(0),
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: big.NewInt(0),
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 3,
|
||||
},
|
||||
{
|
||||
blkNum: big.NewInt(0),
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: big.NewInt(0),
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range insertions {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
}
|
||||
|
||||
expectedIndices := []int{0, 1, 3, 4}
|
||||
for i, ei := range expectedIndices {
|
||||
if dc.deposits[i].Index != ei {
|
||||
t.Errorf("dc.deposits[%d].Index = %d, wanted %d", i, dc.deposits[i].Index, ei)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_AllDeposits_ReturnsAllDeposits(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
deposits := []*DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
dc.deposits = deposits
|
||||
|
||||
d := dc.AllDeposits(context.Background(), nil)
|
||||
if len(d) != len(deposits) {
|
||||
t.Errorf("Return the wrong number of deposits (%d) wanted %d", len(d), len(deposits))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_AllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
deposits := []*DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
dc.deposits = deposits
|
||||
|
||||
d := dc.AllDeposits(context.Background(), big.NewInt(11))
|
||||
expected := 5
|
||||
if len(d) != expected {
|
||||
t.Errorf("Return the wrong number of deposits (%d) wanted %d", len(d), expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.deposits = []*DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
depositRoot: bytesutil.ToBytes32([]byte("root")),
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(11))
|
||||
if int(n) != 5 {
|
||||
t.Errorf("Returned unexpected deposits number %d wanted %d", n, 5)
|
||||
}
|
||||
|
||||
if root != bytesutil.ToBytes32([]byte("root")) {
|
||||
t.Errorf("Returned unexpected root: %v", root)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLessThanOldestDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.deposits = []*DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
depositRoot: bytesutil.ToBytes32([]byte("root")),
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
depositRoot: bytesutil.ToBytes32([]byte("root")),
|
||||
},
|
||||
}
|
||||
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(2))
|
||||
if int(n) != 0 {
|
||||
t.Errorf("Returned unexpected deposits number %d wanted %d", n, 0)
|
||||
}
|
||||
|
||||
if root != [32]byte{} {
|
||||
t.Errorf("Returned unexpected root: %v", root)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.deposits = []*DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(9),
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
dep, blkNum := dc.DepositByPubkey(context.Background(), []byte("pk1"))
|
||||
|
||||
if !bytes.Equal(dep.Data.PublicKey, []byte("pk1")) {
|
||||
t.Error("Returned wrong deposit")
|
||||
}
|
||||
if blkNum.Cmp(big.NewInt(10)) != 0 {
|
||||
t.Errorf("Returned wrong block number %v", blkNum)
|
||||
}
|
||||
}
|
||||
163
beacon-chain/cache/depositcache/pending_deposits.go
vendored
Normal file
163
beacon-chain/cache/depositcache/pending_deposits.go
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"sort"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
pendingDepositsCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacondb_pending_deposits",
|
||||
Help: "The number of pending deposits in the beaconDB in-memory database",
|
||||
})
|
||||
)
|
||||
|
||||
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
|
||||
// which have not yet been included in the chain.
|
||||
type PendingDepositsFetcher interface {
|
||||
PendingContainers(ctx context.Context, beforeBlk *big.Int) []*DepositContainer
|
||||
}
|
||||
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum *big.Int, index int, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil || blockNum == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
}).Debug("Ignoring nil deposit insertion")
|
||||
return
|
||||
}
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
dc.pendingDeposits = append(dc.pendingDeposits, &DepositContainer{Deposit: d, Block: blockNum, Index: index, depositRoot: depositRoot})
|
||||
pendingDepositsCount.Inc()
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(dc.pendingDeposits))))
|
||||
}
|
||||
|
||||
// PendingDeposits returns a list of deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all pending
|
||||
// deposits.
|
||||
func (dc *DepositCache) PendingDeposits(ctx context.Context, beforeBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var depositCntrs []*DepositContainer
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if beforeBlk == nil || beforeBlk.Cmp(ctnr.Block) > -1 {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
}
|
||||
}
|
||||
// Sort the deposits by Merkle index.
|
||||
sort.SliceStable(depositCntrs, func(i, j int) bool {
|
||||
return depositCntrs[i].Index < depositCntrs[j].Index
|
||||
})
|
||||
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, dep := range depositCntrs {
|
||||
deposits = append(deposits, dep.Deposit)
|
||||
}
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(deposits))))
|
||||
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PendingContainers returns a list of deposit containers until the given block number
|
||||
// (inclusive).
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, beforeBlk *big.Int) []*DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var depositCntrs []*DepositContainer
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if beforeBlk == nil || beforeBlk.Cmp(ctnr.Block) > -1 {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
}
|
||||
}
|
||||
// Sort the deposits by Merkle index.
|
||||
sort.SliceStable(depositCntrs, func(i, j int) bool {
|
||||
return depositCntrs[i].Index < depositCntrs[j].Index
|
||||
})
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(depositCntrs))))
|
||||
|
||||
return depositCntrs
|
||||
}
|
||||
|
||||
// RemovePendingDeposit from the database. The deposit is indexed by the
|
||||
// Index. This method does nothing if deposit ptr is nil.
|
||||
func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Deposit) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
defer span.End()
|
||||
|
||||
if d == nil {
|
||||
log.Debug("Ignoring nil deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
depRoot, err := hashutil.HashProto(d)
|
||||
if err != nil {
|
||||
log.Errorf("Could not remove deposit %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
idx := -1
|
||||
for i, ctnr := range dc.pendingDeposits {
|
||||
hash, err := hashutil.HashProto(ctnr.Deposit)
|
||||
if err != nil {
|
||||
log.Errorf("Could not hash deposit %v", err)
|
||||
continue
|
||||
}
|
||||
if hash == depRoot {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if idx >= 0 {
|
||||
dc.pendingDeposits = append(dc.pendingDeposits[:idx], dc.pendingDeposits[idx+1:]...)
|
||||
pendingDepositsCount.Dec()
|
||||
}
|
||||
}
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
log.Debug("Ignoring 0 deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
var cleanDeposits []*DepositContainer
|
||||
for _, dp := range dc.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
}
|
||||
}
|
||||
|
||||
dc.pendingDeposits = cleanDeposits
|
||||
pendingDepositsCount.Set(float64(len(dc.pendingDeposits)))
|
||||
}
|
||||
162
beacon-chain/cache/depositcache/pending_deposits_test.go
vendored
Normal file
162
beacon-chain/cache/depositcache/pending_deposits_test.go
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
)
|
||||
|
||||
var _ = PendingDepositsFetcher(&DepositCache{})
|
||||
|
||||
func TestInsertPendingDeposit_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.InsertPendingDeposit(context.Background(), ðpb.Deposit{}, big.NewInt(111), 100, [32]byte{})
|
||||
|
||||
if len(dc.pendingDeposits) != 1 {
|
||||
t.Error("Deposit not inserted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, nil /*blockNum*/, 0, [32]byte{})
|
||||
|
||||
if len(dc.pendingDeposits) > 0 {
|
||||
t.Error("Unexpected deposit insertion")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePendingDeposit_OK(t *testing.T) {
|
||||
db := DepositCache{}
|
||||
depToRemove := ðpb.Deposit{Proof: [][]byte{[]byte("A")}}
|
||||
otherDep := ðpb.Deposit{Proof: [][]byte{[]byte("B")}}
|
||||
db.pendingDeposits = []*DepositContainer{
|
||||
{Deposit: depToRemove, Index: 1},
|
||||
{Deposit: otherDep, Index: 5},
|
||||
}
|
||||
db.RemovePendingDeposit(context.Background(), depToRemove)
|
||||
|
||||
if len(db.pendingDeposits) != 1 || !proto.Equal(db.pendingDeposits[0].Deposit, otherDep) {
|
||||
t.Error("Failed to remove deposit")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.pendingDeposits = []*DepositContainer{{Deposit: ðpb.Deposit{}}}
|
||||
dc.RemovePendingDeposit(context.Background(), nil /*deposit*/)
|
||||
if len(dc.pendingDeposits) != 1 {
|
||||
t.Errorf("Deposit unexpectedly removed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPendingDeposit_RoundTrip(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dep := ðpb.Deposit{Proof: [][]byte{[]byte("A")}}
|
||||
dc.InsertPendingDeposit(context.Background(), dep, big.NewInt(111), 100, [32]byte{})
|
||||
dc.RemovePendingDeposit(context.Background(), dep)
|
||||
if len(dc.pendingDeposits) != 0 {
|
||||
t.Error("Failed to insert & delete a pending deposit")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*DepositContainer{
|
||||
{Block: big.NewInt(2), Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("A")}}},
|
||||
{Block: big.NewInt(4), Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("B")}}},
|
||||
{Block: big.NewInt(6), Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("c")}}},
|
||||
}
|
||||
|
||||
deposits := dc.PendingDeposits(context.Background(), big.NewInt(4))
|
||||
expected := []*ethpb.Deposit{
|
||||
{Proof: [][]byte{[]byte("A")}},
|
||||
{Proof: [][]byte{[]byte("B")}},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(deposits, expected) {
|
||||
t.Errorf("Unexpected deposits. got=%+v want=%+v", deposits, expected)
|
||||
}
|
||||
|
||||
all := dc.PendingDeposits(context.Background(), nil)
|
||||
if len(all) != len(dc.pendingDeposits) {
|
||||
t.Error("PendingDeposits(ctx, nil) did not return all deposits")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*DepositContainer{
|
||||
{Block: big.NewInt(2), Index: 2},
|
||||
{Block: big.NewInt(4), Index: 4},
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 0)
|
||||
expected := []*DepositContainer{
|
||||
{Block: big.NewInt(2), Index: 2},
|
||||
{Block: big.NewInt(4), Index: 4},
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
|
||||
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*DepositContainer{
|
||||
{Block: big.NewInt(2), Index: 2},
|
||||
{Block: big.NewInt(4), Index: 4},
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 6)
|
||||
expected := []*DepositContainer{
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
|
||||
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
|
||||
}
|
||||
|
||||
dc.pendingDeposits = []*DepositContainer{
|
||||
{Block: big.NewInt(2), Index: 2},
|
||||
{Block: big.NewInt(4), Index: 4},
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 10)
|
||||
expected = []*DepositContainer{
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
|
||||
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
|
||||
}
|
||||
|
||||
}
|
||||
136
beacon-chain/cache/eth1_data.go
vendored
Normal file
136
beacon-chain/cache/eth1_data.go
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotEth1DataVote will be returned when a cache object is not a pointer to
|
||||
// a Eth1DataVote struct.
|
||||
ErrNotEth1DataVote = errors.New("object is not a eth1 data vote obj")
|
||||
|
||||
// maxEth1DataVoteSize defines the max number of eth1 data votes can cache.
|
||||
maxEth1DataVoteSize = 1000
|
||||
|
||||
// Metrics.
|
||||
eth1DataVoteCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "eth1_data_vote_cache_miss",
|
||||
Help: "The number of eth1 data vote count requests that aren't present in the cache.",
|
||||
})
|
||||
eth1DataVoteCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "eth1_data_vote_cache_hit",
|
||||
Help: "The number of eth1 data vote count requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// Eth1DataVote defines the struct which keeps track of the vote count of individual deposit root.
|
||||
type Eth1DataVote struct {
|
||||
Eth1DataHash [32]byte
|
||||
VoteCount uint64
|
||||
}
|
||||
|
||||
// Eth1DataVoteCache is a struct with 1 queue for looking up eth1 data vote count by deposit root.
|
||||
type Eth1DataVoteCache struct {
|
||||
eth1DataVoteCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// eth1DataVoteKeyFn takes the eth1data hash as the key for the eth1 data vote count of a given eth1data object.
|
||||
func eth1DataVoteKeyFn(obj interface{}) (string, error) {
|
||||
eInfo, ok := obj.(*Eth1DataVote)
|
||||
if !ok {
|
||||
return "", ErrNotEth1DataVote
|
||||
}
|
||||
|
||||
return string(eInfo.Eth1DataHash[:]), nil
|
||||
}
|
||||
|
||||
// NewEth1DataVoteCache creates a new eth1 data vote count cache for storing/accessing Eth1DataVote.
|
||||
func NewEth1DataVoteCache() *Eth1DataVoteCache {
|
||||
return &Eth1DataVoteCache{
|
||||
eth1DataVoteCache: cache.NewFIFO(eth1DataVoteKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// Eth1DataVote fetches eth1 data vote count by the eth1data hash. Returns vote count,
|
||||
// if exists. Otherwise returns false, nil.
|
||||
func (c *Eth1DataVoteCache) Eth1DataVote(eth1DataHash [32]byte) (uint64, error) {
|
||||
if !featureconfig.Get().EnableEth1DataVoteCache {
|
||||
// Return a miss result if cache is not enabled.
|
||||
eth1DataVoteCacheMiss.Inc()
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.eth1DataVoteCache.GetByKey(string(eth1DataHash[:]))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
eth1DataVoteCacheHit.Inc()
|
||||
} else {
|
||||
eth1DataVoteCacheMiss.Inc()
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
eInfo, ok := obj.(*Eth1DataVote)
|
||||
if !ok {
|
||||
return 0, ErrNotEth1DataVote
|
||||
}
|
||||
|
||||
return eInfo.VoteCount, nil
|
||||
}
|
||||
|
||||
// AddEth1DataVote adds eth1 data vote object to the cache. This method also trims the least
|
||||
// recently added Eth1DataVoteByEpoch object if the cache size has ready the max cache size limit.
|
||||
func (c *Eth1DataVoteCache) AddEth1DataVote(eth1DataVote *Eth1DataVote) error {
|
||||
if !featureconfig.Get().EnableEth1DataVoteCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.eth1DataVoteCache.Add(eth1DataVote); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(c.eth1DataVoteCache, maxEth1DataVoteSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// IncrementEth1DataVote increments the existing eth1 data object's vote count by 1,
|
||||
// and returns the vote count.
|
||||
func (c *Eth1DataVoteCache) IncrementEth1DataVote(eth1DataHash [32]byte) (uint64, error) {
|
||||
if !featureconfig.Get().EnableEth1DataVoteCache {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.eth1DataVoteCache.GetByKey(string(eth1DataHash[:]))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !exists {
|
||||
return 0, errors.New("eth1 data vote object does not exist")
|
||||
}
|
||||
|
||||
eth1DataVoteCacheHit.Inc()
|
||||
|
||||
eInfo, _ := obj.(*Eth1DataVote)
|
||||
eInfo.VoteCount++
|
||||
|
||||
if err := c.eth1DataVoteCache.Add(eInfo); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return eInfo.VoteCount, nil
|
||||
}
|
||||
110
beacon-chain/cache/eth1_data_test.go
vendored
Normal file
110
beacon-chain/cache/eth1_data_test.go
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEth1DataVoteKeyFn_OK(t *testing.T) {
|
||||
eInfo := &Eth1DataVote{
|
||||
VoteCount: 44,
|
||||
Eth1DataHash: [32]byte{'A'},
|
||||
}
|
||||
|
||||
key, err := eth1DataVoteKeyFn(eInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != string(eInfo.Eth1DataHash[:]) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, string(eInfo.Eth1DataHash[:]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEth1DataVoteKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := eth1DataVoteKeyFn("bad")
|
||||
if err != ErrNotEth1DataVote {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotEth1DataVote, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEth1DataVoteCache_CanAdd(t *testing.T) {
|
||||
cache := NewEth1DataVoteCache()
|
||||
|
||||
eInfo := &Eth1DataVote{
|
||||
VoteCount: 55,
|
||||
Eth1DataHash: [32]byte{'B'},
|
||||
}
|
||||
count, err := cache.Eth1DataVote(eInfo.Eth1DataHash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 0 {
|
||||
t.Error("Expected seed not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddEth1DataVote(eInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
count, err = cache.Eth1DataVote(eInfo.Eth1DataHash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != eInfo.VoteCount {
|
||||
t.Errorf(
|
||||
"Expected vote count to be %d, got %d",
|
||||
eInfo.VoteCount,
|
||||
count,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEth1DataVoteCache_CanIncrement(t *testing.T) {
|
||||
cache := NewEth1DataVoteCache()
|
||||
|
||||
eInfo := &Eth1DataVote{
|
||||
VoteCount: 55,
|
||||
Eth1DataHash: [32]byte{'B'},
|
||||
}
|
||||
|
||||
if err := cache.AddEth1DataVote(eInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err := cache.IncrementEth1DataVote(eInfo.Eth1DataHash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, _ = cache.IncrementEth1DataVote(eInfo.Eth1DataHash)
|
||||
count, _ := cache.IncrementEth1DataVote(eInfo.Eth1DataHash)
|
||||
|
||||
if count != 58 {
|
||||
t.Errorf(
|
||||
"Expected vote count to be %d, got %d",
|
||||
58,
|
||||
count,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEth1Data_MaxSize(t *testing.T) {
|
||||
cache := NewEth1DataVoteCache()
|
||||
|
||||
for i := 0; i < maxEth1DataVoteSize+1; i++ {
|
||||
var hash [32]byte
|
||||
copy(hash[:], []byte(strconv.Itoa(i)))
|
||||
eInfo := &Eth1DataVote{
|
||||
Eth1DataHash: hash,
|
||||
}
|
||||
if err := cache.AddEth1DataVote(eInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.eth1DataVoteCache.ListKeys()) != maxEth1DataVoteSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxEth1DataVoteSize,
|
||||
len(cache.eth1DataVoteCache.ListKeys()),
|
||||
)
|
||||
}
|
||||
}
|
||||
14
beacon-chain/cache/feature_flag_test.go
vendored
Normal file
14
beacon-chain/cache/feature_flag_test.go
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package cache
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
|
||||
func init() {
|
||||
featureconfig.Init(&featureconfig.Flag{
|
||||
EnableAttestationCache: true,
|
||||
EnableEth1DataVoteCache: true,
|
||||
EnableShuffledIndexCache: true,
|
||||
EnableCommitteeCache: true,
|
||||
EnableActiveCountCache: true,
|
||||
EnableActiveIndicesCache: true,
|
||||
})
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["main.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/chaintest",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//beacon-chain/chaintest/backend:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"@com_github_go_yaml_yaml//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "chaintest",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["yaml_test.go"],
|
||||
data = glob(["tests/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/chaintest/backend:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,238 +0,0 @@
|
||||
# Ethereum 2.0 E2E Test Suite
|
||||
|
||||
This is a test-suite for conformity end-2-end tests for Prysm's implementation of the Ethereum 2.0 specification. Implementation teams have decided to utilize YAML as a general conformity test format for the current beacon chain's runtime functionality.
|
||||
|
||||
The test suite opts for YAML due to wide language support and support for inline comments.
|
||||
|
||||
# Testing Format
|
||||
|
||||
The testing format follows the official ETH2.0 Specification created [here](https://github.com/ethereum/eth2.0-specs/blob/master/specs/test-format.md)
|
||||
|
||||
## Stateful Tests
|
||||
|
||||
Chain tests check for conformity of a certain client to the beacon chain specification for items such as the fork choice rule and Casper FFG validator rewards & penalties. Stateful tests need to specify a certain configuration of a beacon chain, with items such as the number validators, in the YAML file. Sample tests will all required fields are shown below.
|
||||
|
||||
### State Transition
|
||||
|
||||
The most important use case for this test format is to verify the ins and outs of the Ethereum Phase 0 Beacon Chain state advancement. The specification details very strict guidelines for blocks to successfully trigger a state transition, including items such as Casper Proof of Stake slashing conditions of validators, pseudorandomness in the form of RANDAO, and attestation on shard blocks being processed all inside each incoming beacon block. The YAML configuration for this test type allows for configuring a state transition run over N slots, triggering slashing conditions, processing deposits of new validators, and more.
|
||||
|
||||
An example state transition test for testing slot and block processing will look as follows:
|
||||
|
||||
```yaml
|
||||
title: Sample Ethereum Serenity State Transition Tests
|
||||
summary: Testing full state transition block processing
|
||||
test_suite: prysm
|
||||
fork: sapphire
|
||||
version: 1.0
|
||||
test_cases:
|
||||
- config:
|
||||
epoch_length: 64
|
||||
deposits_for_chain_start: 1000
|
||||
num_slots: 32 # Testing advancing state to slot < SlotsPerEpoch
|
||||
results:
|
||||
slot: 32
|
||||
num_validators: 1000
|
||||
- config:
|
||||
epoch_length: 64
|
||||
deposits_for_chain_start: 16384
|
||||
num_slots: 64
|
||||
deposits:
|
||||
- slot: 1
|
||||
amount: 32
|
||||
merkle_index: 0
|
||||
pubkey: !!binary |
|
||||
SlAAbShSkUg7PLiPHZI/rTS1uAvKiieOrifPN6Moso0=
|
||||
- slot: 15
|
||||
amount: 32
|
||||
merkle_index: 1
|
||||
pubkey: !!binary |
|
||||
Oklajsjdkaklsdlkajsdjlajslkdjlkasjlkdjlajdsd
|
||||
- slot: 55
|
||||
amount: 32
|
||||
merkle_index: 2
|
||||
pubkey: !!binary |
|
||||
LkmqmqoodLKAslkjdkajsdljasdkajlksjdasldjasdd
|
||||
proposer_slashings:
|
||||
- slot: 16 # At slot 16, we trigger a proposal slashing occurring
|
||||
proposer_index: 16385 # We penalize the proposer that was just added from slot 15
|
||||
proposal_1_shard: 0
|
||||
proposal_1_slot: 15
|
||||
proposal_1_root: !!binary |
|
||||
LkmqmqoodLKAslkjdkajsdljasdkajlksjdasldjasdd
|
||||
proposal_2_shard: 0
|
||||
proposal_2_slot: 15
|
||||
proposal_2_root: !!binary |
|
||||
LkmqmqoodLKAslkjdkajsdljasdkajlksjdasldjasdd
|
||||
attester_slashings:
|
||||
- slot: 59 # At slot 59, we trigger a attester slashing
|
||||
slashable_vote_data_1_slot: 55
|
||||
slashable_vote_data_2_slot: 55
|
||||
slashable_vote_data_1_justified_slot: 0
|
||||
slashable_vote_data_2_justified_slot: 1
|
||||
slashable_vote_data_1_custody_0_indices: [16386]
|
||||
slashable_vote_data_1_custody_1_indices: []
|
||||
slashable_vote_data_2_custody_0_indices: []
|
||||
slashable_vote_data_2_custody_1_indices: [16386]
|
||||
results:
|
||||
slot: 64
|
||||
num_validators: 16387
|
||||
penalized_validators: [16385, 16386] # We test that the validators at indices 16385, 16386 were indeed penalized
|
||||
- config:
|
||||
skip_slots: [10, 20]
|
||||
epoch_length: 64
|
||||
deposits_for_chain_start: 1000
|
||||
num_slots: 128 # Testing advancing state's slot == 2*SlotsPerEpoch
|
||||
deposits:
|
||||
- slot: 10
|
||||
amount: 32
|
||||
merkle_index: 0
|
||||
pubkey: !!binary |
|
||||
SlAAbShSkUg7PLiPHZI/rTS1uAvKiieOrifPN6Moso0=
|
||||
- slot: 20
|
||||
amount: 32
|
||||
merkle_index: 1
|
||||
pubkey: !!binary |
|
||||
Oklajsjdkaklsdlkajsdjlajslkdjlkasjlkdjlajdsd
|
||||
results:
|
||||
slot: 128
|
||||
num_validators: 1000 # Validator registry should not have grown if slots 10 and 20 were skipped
|
||||
```
|
||||
|
||||
#### Test Configuration Options
|
||||
|
||||
The following configuration options are available for state transition tests:
|
||||
|
||||
**Config**
|
||||
|
||||
- **skip_slots**: `[int]` determines which slot numbers to simulate a proposer not submitting a block in the state transition TODO
|
||||
- **epoch_length**: `int` the number of slots in an epoch
|
||||
- **deposits_for_chain_start**: `int` the number of eth deposits needed for the beacon chain to initialize (this simulates an initial validator registry based on this number in the test)
|
||||
- **num_slots**: `int` the number of times we run a state transition in the test
|
||||
- **deposits**: `[Deposit Config]` trigger a new validator deposit into the beacon state based on configuration options
|
||||
- **proposer_slashings**: `[Proposer Slashing Config]` trigger a proposer slashing at a certain slot for a certain proposer index
|
||||
- **attester_slashings**: `[Casper Slashing Config]` trigger a attester slashing at a certain slot
|
||||
- **validator_exits**: `[Validator Exit Config]` trigger a voluntary validator exit at a certain slot for a validator index
|
||||
|
||||
**Deposit Config**
|
||||
|
||||
- **slot**: `int` a slot in which to trigger a deposit during a state transition test
|
||||
- **amount**: `int` the ETH deposit amount to trigger
|
||||
- **merkle_index**: `int` the index of the deposit in the validator deposit contract's Merkle trie
|
||||
- **pubkey**: `!!binary` the public key of the validator in the triggered deposit object
|
||||
|
||||
**Proposer Slashing Config**
|
||||
|
||||
- **slot**: `int` a slot in which to trigger a proposer slashing during a state transition test
|
||||
- **proposer_index**: `int` the proposer to penalize
|
||||
- **proposal_1_shard**: `int` the first proposal data's shard id
|
||||
- **proposal_1_slot**: `int` the first proposal data's slot
|
||||
- **proposal_1_root**: `!!binary` the second proposal data's block root
|
||||
- **proposal_2_shard**: `int` the second proposal data's shard id
|
||||
- **proposal_2_slot**: `int` the second proposal data's slot
|
||||
- **proposal_2_root**: `!!binary` the second proposal data's block root
|
||||
|
||||
**Casper Slashing Config**
|
||||
|
||||
- **slot**: `int` a slot in which to trigger a attester slashing during a state transition test
|
||||
- **slashable_vote_data_1_slot**: `int` the slot of the attestation data of slashableVoteData1
|
||||
- **slashable_vote_data_2_slot**: `int` the slot of the attestation data of slashableVoteData2
|
||||
- **slashable_vote_data_1_justified_slot**: `int` the justified slot of the attestation data of slashableVoteData1
|
||||
- **slashable_vote_data_2_justified_slot**: `int` the justified slot of the attestation data of slashableVoteData2
|
||||
- **slashable_vote_data_1_custody_0_indices**: `[int]` the custody indices 0 for slashableVoteData1
|
||||
- **slashable_vote_data_1_custody_1_indices**: `[int]` the custody indices 1 for slashableVoteData1
|
||||
- **slashable_vote_data_2_custody_0_indices**: `[int]` the custody indices 0 for slashableVoteData2
|
||||
- **slashable_vote_data_2_custody_1_indices**: `[int]` the custody indices 1 for slashableVoteData2
|
||||
|
||||
**Validator Exit Config**
|
||||
|
||||
- **slot**: `int` the slot at which a validator wants to voluntarily exit the validator registry
|
||||
- **validator_index**: `int` the index of the validator in the registry that is exiting
|
||||
|
||||
#### Test Results
|
||||
|
||||
The following are **mandatory** fields as they correspond to checks done at the end of the test run.
|
||||
|
||||
- **slot**: `int` check the slot of the state resulting from applying N state transitions in the test
|
||||
- **num_validators** `[int]` check the number of validators in the validator registry after applying N state transitions
|
||||
- **penalized_validators** `[int]` the list of validator indices we verify were penalized during the test
|
||||
- **exited_validators**: `[int]` the list of validator indices we verify voluntarily exited the registry during the test
|
||||
|
||||
## Stateless Tests
|
||||
|
||||
Stateless tests represent simple unit test definitions for important invariants in the ETH2.0 runtime. In particular, these test conformity across clients with respect to items such as Simple Serialize (SSZ), Signature Aggregation (BLS), and Validator Shuffling
|
||||
|
||||
**Simple Serialize**
|
||||
|
||||
TODO
|
||||
|
||||
**Signature Aggregation**
|
||||
|
||||
TODO
|
||||
|
||||
**Validator Shuffling**
|
||||
|
||||
```yaml
|
||||
title: Shuffling Algorithm Tests
|
||||
summary: Test vectors for shuffling a list based upon a seed using `shuffle`
|
||||
test_suite: shuffle
|
||||
fork: tchaikovsky
|
||||
version: 1.0
|
||||
|
||||
test_cases:
|
||||
- input: []
|
||||
output: []
|
||||
seed: !!binary ""
|
||||
- name: boring_list
|
||||
description: List with a single element, 0
|
||||
input: [0]
|
||||
output: [0]
|
||||
seed: !!binary ""
|
||||
- input: [255]
|
||||
output: [255]
|
||||
seed: !!binary ""
|
||||
- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5]
|
||||
output: [1, 6, 4, 1, 6, 6, 2, 2, 4, 5]
|
||||
seed: !!binary ""
|
||||
- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
|
||||
output: [4, 7, 10, 13, 3, 1, 2, 9, 12, 6, 11, 8, 5]
|
||||
seed: !!binary ""
|
||||
- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5]
|
||||
output: [6, 65, 2, 5, 4, 2, 6, 6, 1, 1]
|
||||
seed: !!binary |
|
||||
JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0=
|
||||
```
|
||||
|
||||
# Using the Runner
|
||||
|
||||
First, create a directory containing the YAML files you wish to test (or use the default `./sampletests` directory included with Prysm).
|
||||
Then, make sure you have the following folder structure for the directory:
|
||||
|
||||
```
|
||||
yourtestdir/
|
||||
fork-choice-tests/
|
||||
*.yaml
|
||||
...
|
||||
shuffle-tests/
|
||||
*.yaml
|
||||
...
|
||||
state-tests/
|
||||
*.yaml
|
||||
...
|
||||
```
|
||||
|
||||
Then, navigate to the test runner's directory and use the go tool as follows:
|
||||
|
||||
```bash
|
||||
go run main.go -tests-dir /path/to/your/testsdir
|
||||
```
|
||||
|
||||
The runner will then start up a simulated backend and run all your specified YAML tests.
|
||||
|
||||
```bash
|
||||
[2018-11-06 15:01:44] INFO ----Running Chain Tests----
|
||||
[2018-11-06 15:01:44] INFO Running 4 YAML Tests
|
||||
[2018-11-06 15:01:44] INFO Title: Sample Ethereum 2.0 Beacon Chain Test
|
||||
[2018-11-06 15:01:44] INFO Summary: Basic, functioning fork choice rule for Ethereum 2.0
|
||||
[2018-11-06 15:01:44] INFO Test Suite: prysm
|
||||
[2018-11-06 15:01:44] INFO Test Runs Finished In: 0.000643545 Seconds
|
||||
```
|
||||
@@ -1,42 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"fork_choice_test_format.go",
|
||||
"helpers.go",
|
||||
"shuffle_test_format.go",
|
||||
"simulated_backend.go",
|
||||
"state_test_format.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/chaintest/backend",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/utils:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/forkutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["simulated_backend_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,51 +0,0 @@
|
||||
package backend
|
||||
|
||||
// ForkChoiceTest --
|
||||
type ForkChoiceTest struct {
|
||||
Title string
|
||||
Summary string
|
||||
TestSuite string `yaml:"test_suite"`
|
||||
TestCases []*ForkChoiceTestCase `yaml:"test_cases"`
|
||||
}
|
||||
|
||||
// ForkChoiceTestCase --
|
||||
type ForkChoiceTestCase struct {
|
||||
Config *ForkChoiceTestConfig `yaml:"config"`
|
||||
Slots []*ForkChoiceTestSlot `yaml:"slots,flow"`
|
||||
Results *ForkChoiceTestResult `yaml:"results"`
|
||||
}
|
||||
|
||||
// ForkChoiceTestConfig --
|
||||
type ForkChoiceTestConfig struct {
|
||||
ValidatorCount uint64 `yaml:"validator_count"`
|
||||
CycleLength uint64 `yaml:"cycle_length"`
|
||||
ShardCount uint64 `yaml:"shard_count"`
|
||||
MinCommitteeSize uint64 `yaml:"min_committee_size"`
|
||||
}
|
||||
|
||||
// ForkChoiceTestSlot --
|
||||
type ForkChoiceTestSlot struct {
|
||||
SlotNumber uint64 `yaml:"slot_number"`
|
||||
NewBlock *TestBlock `yaml:"new_block"`
|
||||
Attestations []*TestAttestation `yaml:",flow"`
|
||||
}
|
||||
|
||||
// ForkChoiceTestResult --
|
||||
type ForkChoiceTestResult struct {
|
||||
Head string
|
||||
LastJustifiedBlock string `yaml:"last_justified_block"`
|
||||
LastFinalizedBlock string `yaml:"last_finalized_block"`
|
||||
}
|
||||
|
||||
// TestBlock --
|
||||
type TestBlock struct {
|
||||
ID string `yaml:"ID"`
|
||||
Parent string `yaml:"parent"`
|
||||
}
|
||||
|
||||
// TestAttestation --
|
||||
type TestAttestation struct {
|
||||
Block string `yaml:"block"`
|
||||
ValidatorRegistry string `yaml:"validators"`
|
||||
CommitteeSlot uint64 `yaml:"committee_slot"`
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/forkutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/trieutil"
|
||||
)
|
||||
|
||||
// Generates a simulated beacon block to use
|
||||
// in the next state transition given the current state,
|
||||
// the previous beacon block, and previous beacon block root.
|
||||
func generateSimulatedBlock(
|
||||
beaconState *pb.BeaconState,
|
||||
prevBlockRoot [32]byte,
|
||||
historicalDeposits []*pb.Deposit,
|
||||
simObjects *SimulatedObjects,
|
||||
privKeys []*bls.SecretKey,
|
||||
) (*pb.BeaconBlock, [32]byte, error) {
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, fmt.Errorf("could not tree hash state: %v", err)
|
||||
}
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(beaconState, beaconState.Slot+1)
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, err
|
||||
}
|
||||
epoch := helpers.SlotToEpoch(beaconState.Slot + 1)
|
||||
buf := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(buf, epoch)
|
||||
domain := forkutil.DomainVersion(beaconState.Fork, epoch, params.BeaconConfig().DomainRandao)
|
||||
// We make the previous validator's index sign the message instead of the proposer.
|
||||
epochSignature := privKeys[proposerIdx].Sign(buf, domain)
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 1,
|
||||
RandaoReveal: epochSignature.Marshal(),
|
||||
ParentRootHash32: prevBlockRoot[:],
|
||||
StateRootHash32: stateRoot[:],
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{1},
|
||||
BlockHash32: []byte{2},
|
||||
},
|
||||
Body: &pb.BeaconBlockBody{
|
||||
ProposerSlashings: []*pb.ProposerSlashing{},
|
||||
AttesterSlashings: []*pb.AttesterSlashing{},
|
||||
Attestations: []*pb.Attestation{},
|
||||
Deposits: []*pb.Deposit{},
|
||||
VoluntaryExits: []*pb.VoluntaryExit{},
|
||||
},
|
||||
}
|
||||
if simObjects.simDeposit != nil {
|
||||
depositInput := &pb.DepositInput{
|
||||
Pubkey: []byte(simObjects.simDeposit.Pubkey),
|
||||
WithdrawalCredentialsHash32: make([]byte, 32),
|
||||
ProofOfPossession: make([]byte, 96),
|
||||
}
|
||||
|
||||
data, err := helpers.EncodeDepositData(depositInput, simObjects.simDeposit.Amount, time.Now().Unix())
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, fmt.Errorf("could not encode deposit data: %v", err)
|
||||
}
|
||||
|
||||
// We then update the deposits Merkle trie with the deposit data and return
|
||||
// its Merkle branch leading up to the root of the trie.
|
||||
historicalDepositData := make([][]byte, len(historicalDeposits))
|
||||
for i := range historicalDeposits {
|
||||
historicalDepositData[i] = historicalDeposits[i].DepositData
|
||||
}
|
||||
newTrie, err := trieutil.GenerateTrieFromItems(append(historicalDepositData, data), int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, fmt.Errorf("could not regenerate trie: %v", err)
|
||||
}
|
||||
proof, err := newTrie.MerkleProof(int(simObjects.simDeposit.MerkleIndex))
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, fmt.Errorf("could not generate proof: %v", err)
|
||||
}
|
||||
|
||||
root := newTrie.Root()
|
||||
block.Eth1Data.DepositRootHash32 = root[:]
|
||||
block.Body.Deposits = append(block.Body.Deposits, &pb.Deposit{
|
||||
DepositData: data,
|
||||
MerkleProofHash32S: proof,
|
||||
MerkleTreeIndex: simObjects.simDeposit.MerkleIndex,
|
||||
})
|
||||
}
|
||||
if simObjects.simProposerSlashing != nil {
|
||||
block.Body.ProposerSlashings = append(block.Body.ProposerSlashings, &pb.ProposerSlashing{
|
||||
ProposerIndex: simObjects.simProposerSlashing.ProposerIndex,
|
||||
ProposalData_1: &pb.ProposalSignedData{
|
||||
Slot: simObjects.simProposerSlashing.Proposal1Slot,
|
||||
Shard: simObjects.simProposerSlashing.Proposal1Shard,
|
||||
BlockRootHash32: []byte(simObjects.simProposerSlashing.Proposal1Root),
|
||||
},
|
||||
ProposalData_2: &pb.ProposalSignedData{
|
||||
Slot: simObjects.simProposerSlashing.Proposal2Slot,
|
||||
Shard: simObjects.simProposerSlashing.Proposal2Shard,
|
||||
BlockRootHash32: []byte(simObjects.simProposerSlashing.Proposal2Root),
|
||||
},
|
||||
})
|
||||
}
|
||||
if simObjects.simAttesterSlashing != nil {
|
||||
block.Body.AttesterSlashings = append(block.Body.AttesterSlashings, &pb.AttesterSlashing{
|
||||
SlashableAttestation_1: &pb.SlashableAttestation{
|
||||
Data: &pb.AttestationData{
|
||||
Slot: simObjects.simAttesterSlashing.SlashableAttestation1Slot,
|
||||
JustifiedEpoch: simObjects.simAttesterSlashing.SlashableAttestation1JustifiedEpoch,
|
||||
},
|
||||
CustodyBitfield: []byte(simObjects.simAttesterSlashing.SlashableAttestation1CustodyBitField),
|
||||
ValidatorIndices: simObjects.simAttesterSlashing.SlashableAttestation1ValidatorIndices,
|
||||
},
|
||||
SlashableAttestation_2: &pb.SlashableAttestation{
|
||||
Data: &pb.AttestationData{
|
||||
Slot: simObjects.simAttesterSlashing.SlashableAttestation2Slot,
|
||||
JustifiedEpoch: simObjects.simAttesterSlashing.SlashableAttestation2JustifiedEpoch,
|
||||
},
|
||||
CustodyBitfield: []byte(simObjects.simAttesterSlashing.SlashableAttestation2CustodyBitField),
|
||||
ValidatorIndices: simObjects.simAttesterSlashing.SlashableAttestation2ValidatorIndices,
|
||||
},
|
||||
})
|
||||
}
|
||||
if simObjects.simValidatorExit != nil {
|
||||
block.Body.VoluntaryExits = append(block.Body.VoluntaryExits, &pb.VoluntaryExit{
|
||||
Epoch: simObjects.simValidatorExit.Epoch,
|
||||
ValidatorIndex: simObjects.simValidatorExit.ValidatorIndex,
|
||||
})
|
||||
}
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, fmt.Errorf("could not tree hash new block: %v", err)
|
||||
}
|
||||
return block, blockRoot, nil
|
||||
}
|
||||
|
||||
// generateInitialSimulatedDeposits generates initial deposits for creating a beacon state in the simulated
|
||||
// backend based on the yaml configuration.
|
||||
func generateInitialSimulatedDeposits(numDeposits uint64) ([]*pb.Deposit, []*bls.SecretKey, error) {
|
||||
genesisTime := time.Date(2018, 9, 0, 0, 0, 0, 0, time.UTC).Unix()
|
||||
deposits := make([]*pb.Deposit, numDeposits)
|
||||
privKeys := make([]*bls.SecretKey, numDeposits)
|
||||
for i := 0; i < len(deposits); i++ {
|
||||
priv, err := bls.RandKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not initialize key: %v", err)
|
||||
}
|
||||
depositInput := &pb.DepositInput{
|
||||
Pubkey: priv.PublicKey().Marshal(),
|
||||
WithdrawalCredentialsHash32: make([]byte, 32),
|
||||
ProofOfPossession: make([]byte, 96),
|
||||
}
|
||||
depositData, err := helpers.EncodeDepositData(
|
||||
depositInput,
|
||||
params.BeaconConfig().MaxDepositAmount,
|
||||
genesisTime,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not encode genesis block deposits: %v", err)
|
||||
}
|
||||
deposits[i] = &pb.Deposit{DepositData: depositData, MerkleTreeIndex: uint64(i)}
|
||||
privKeys[i] = priv
|
||||
}
|
||||
return deposits, privKeys, nil
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package backend
|
||||
|
||||
// ShuffleTest --
|
||||
type ShuffleTest struct {
|
||||
Title string `yaml:"title"`
|
||||
Summary string `yaml:"summary"`
|
||||
TestSuite string `yaml:"test_suite"`
|
||||
Fork string `yaml:"fork"`
|
||||
Version string `yaml:"version"`
|
||||
TestCases []*ShuffleTestCase `yaml:"test_cases"`
|
||||
}
|
||||
|
||||
// ShuffleTestCase --
|
||||
type ShuffleTestCase struct {
|
||||
Input []uint64 `yaml:"input,flow"`
|
||||
Output []uint64 `yaml:"output,flow"`
|
||||
Seed string
|
||||
}
|
||||
@@ -1,393 +0,0 @@
|
||||
// Package backend contains utilities for simulating an entire
|
||||
// ETH 2.0 beacon chain for e2e tests and benchmarking
|
||||
// purposes.
|
||||
package backend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/utils"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SimulatedBackend allowing for a programmatic advancement
|
||||
// of an in-memory beacon chain for client test runs
|
||||
// and other e2e use cases.
|
||||
type SimulatedBackend struct {
|
||||
chainService *blockchain.ChainService
|
||||
beaconDB *db.BeaconDB
|
||||
state *pb.BeaconState
|
||||
prevBlockRoots [][32]byte
|
||||
inMemoryBlocks []*pb.BeaconBlock
|
||||
historicalDeposits []*pb.Deposit
|
||||
}
|
||||
|
||||
// SimulatedObjects is a container to hold the
|
||||
// required primitives for generation of a beacon
|
||||
// block.
|
||||
type SimulatedObjects struct {
|
||||
simDeposit *StateTestDeposit
|
||||
simProposerSlashing *StateTestProposerSlashing
|
||||
simAttesterSlashing *StateTestAttesterSlashing
|
||||
simValidatorExit *StateTestValidatorExit
|
||||
}
|
||||
|
||||
// NewSimulatedBackend creates an instance by initializing a chain service
|
||||
// utilizing a mockDB which will act according to test run parameters specified
|
||||
// in the common ETH 2.0 client test YAML format.
|
||||
func NewSimulatedBackend() (*SimulatedBackend, error) {
|
||||
db, err := db.SetupDB()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not setup simulated backend db: %v", err)
|
||||
}
|
||||
cs, err := blockchain.NewChainService(context.Background(), &blockchain.Config{
|
||||
BeaconDB: db,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SimulatedBackend{
|
||||
chainService: cs,
|
||||
beaconDB: db,
|
||||
inMemoryBlocks: make([]*pb.BeaconBlock, 0),
|
||||
historicalDeposits: make([]*pb.Deposit, 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SetupBackend sets up the simulated backend with simulated deposits, and initializes the
|
||||
// state and genesis block.
|
||||
func (sb *SimulatedBackend) SetupBackend(numOfDeposits uint64) ([]*bls.SecretKey, error) {
|
||||
initialDeposits, privKeys, err := generateInitialSimulatedDeposits(numOfDeposits)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not simulate initial validator deposits: %v", err)
|
||||
}
|
||||
if err := sb.setupBeaconStateAndGenesisBlock(initialDeposits); err != nil {
|
||||
return nil, fmt.Errorf("could not set up beacon state and initialize genesis block %v", err)
|
||||
}
|
||||
return privKeys, nil
|
||||
}
|
||||
|
||||
// DB returns the underlying db instance in the simulated
|
||||
// backend.
|
||||
func (sb *SimulatedBackend) DB() *db.BeaconDB {
|
||||
return sb.beaconDB
|
||||
}
|
||||
|
||||
// GenerateBlockAndAdvanceChain generates a simulated block and runs that block though
|
||||
// state transition.
|
||||
func (sb *SimulatedBackend) GenerateBlockAndAdvanceChain(objects *SimulatedObjects, privKeys []*bls.SecretKey) error {
|
||||
prevBlockRoot := sb.prevBlockRoots[len(sb.prevBlockRoots)-1]
|
||||
// We generate a new block to pass into the state transition.
|
||||
newBlock, newBlockRoot, err := generateSimulatedBlock(
|
||||
sb.state,
|
||||
prevBlockRoot,
|
||||
sb.historicalDeposits,
|
||||
objects,
|
||||
privKeys,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not generate simulated beacon block %v", err)
|
||||
}
|
||||
newState := sb.state
|
||||
newState.LatestEth1Data = newBlock.Eth1Data
|
||||
newState, err = state.ExecuteStateTransition(
|
||||
context.Background(),
|
||||
sb.state,
|
||||
newBlock,
|
||||
prevBlockRoot,
|
||||
state.DefaultConfig(),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not execute state transition: %v", err)
|
||||
}
|
||||
|
||||
sb.state = newState
|
||||
sb.prevBlockRoots = append(sb.prevBlockRoots, newBlockRoot)
|
||||
sb.inMemoryBlocks = append(sb.inMemoryBlocks, newBlock)
|
||||
if len(newBlock.Body.Deposits) > 0 {
|
||||
sb.historicalDeposits = append(sb.historicalDeposits, newBlock.Body.Deposits...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateNilBlockAndAdvanceChain would trigger a state transition with a nil block.
|
||||
func (sb *SimulatedBackend) GenerateNilBlockAndAdvanceChain() error {
|
||||
prevBlockRoot := sb.prevBlockRoots[len(sb.prevBlockRoots)-1]
|
||||
newState, err := state.ExecuteStateTransition(
|
||||
context.Background(),
|
||||
sb.state,
|
||||
nil,
|
||||
prevBlockRoot,
|
||||
state.DefaultConfig(),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not execute state transition: %v", err)
|
||||
}
|
||||
sb.state = newState
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown closes the db associated with the simulated backend.
|
||||
func (sb *SimulatedBackend) Shutdown() error {
|
||||
return sb.beaconDB.Close()
|
||||
}
|
||||
|
||||
// State is a getter to return the current beacon state
|
||||
// of the backend.
|
||||
func (sb *SimulatedBackend) State() *pb.BeaconState {
|
||||
return sb.state
|
||||
}
|
||||
|
||||
// InMemoryBlocks returns the blocks that have been processed by the simulated
|
||||
// backend.
|
||||
func (sb *SimulatedBackend) InMemoryBlocks() []*pb.BeaconBlock {
|
||||
return sb.inMemoryBlocks
|
||||
}
|
||||
|
||||
// RunForkChoiceTest uses a parsed set of chaintests from a YAML file
|
||||
// according to the ETH 2.0 client chain test specification and runs them
|
||||
// against the simulated backend.
|
||||
func (sb *SimulatedBackend) RunForkChoiceTest(testCase *ForkChoiceTestCase) error {
|
||||
defer db.TeardownDB(sb.beaconDB)
|
||||
// Utilize the config parameters in the test case to setup
|
||||
// the DB and set global config parameters accordingly.
|
||||
// Config parameters include: ValidatorCount, ShardCount,
|
||||
// CycleLength, MinCommitteeSize, and more based on the YAML
|
||||
// test language specification.
|
||||
c := params.BeaconConfig()
|
||||
c.ShardCount = testCase.Config.ShardCount
|
||||
c.SlotsPerEpoch = testCase.Config.CycleLength
|
||||
c.TargetCommitteeSize = testCase.Config.MinCommitteeSize
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
// Then, we create the validators based on the custom test config.
|
||||
validators := make([]*pb.Validator, testCase.Config.ValidatorCount)
|
||||
for i := uint64(0); i < testCase.Config.ValidatorCount; i++ {
|
||||
validators[i] = &pb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().ActivationExitDelay,
|
||||
Pubkey: []byte{},
|
||||
}
|
||||
}
|
||||
// TODO(#718): Next step is to update and save the blocks specified
|
||||
// in the case case into the DB.
|
||||
//
|
||||
// Then, we call the updateHead routine and confirm the
|
||||
// chain's head is the expected result from the test case.
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunShuffleTest uses validator set specified from a YAML file, runs the validator shuffle
|
||||
// algorithm, then compare the output with the expected output from the YAML file.
|
||||
func (sb *SimulatedBackend) RunShuffleTest(testCase *ShuffleTestCase) error {
|
||||
defer db.TeardownDB(sb.beaconDB)
|
||||
seed := common.BytesToHash([]byte(testCase.Seed))
|
||||
output, err := utils.ShuffleIndices(seed, testCase.Input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !reflect.DeepEqual(output, testCase.Output) {
|
||||
return fmt.Errorf("shuffle result error: expected %v, actual %v", testCase.Output, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunStateTransitionTest advances a beacon chain state transition an N amount of
|
||||
// slots from a genesis state, with a block being processed at every iteration
|
||||
// of the state transition function.
|
||||
func (sb *SimulatedBackend) RunStateTransitionTest(testCase *StateTestCase) error {
|
||||
defer db.TeardownDB(sb.beaconDB)
|
||||
setTestConfig(testCase)
|
||||
|
||||
privKeys, err := sb.initializeStateTest(testCase)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not initialize state test %v", err)
|
||||
}
|
||||
averageTimesPerTransition := []time.Duration{}
|
||||
startSlot := params.BeaconConfig().GenesisSlot
|
||||
for i := startSlot; i < startSlot+testCase.Config.NumSlots; i++ {
|
||||
|
||||
// If the slot is marked as skipped in the configuration options,
|
||||
// we simply run the state transition with a nil block argument.
|
||||
if sliceutil.IsInUint64(i, testCase.Config.SkipSlots) {
|
||||
if err := sb.GenerateNilBlockAndAdvanceChain(); err != nil {
|
||||
return fmt.Errorf("could not advance the chain with a nil block %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
simulatedObjects := sb.generateSimulatedObjects(testCase, i)
|
||||
startTime := time.Now()
|
||||
|
||||
if err := sb.GenerateBlockAndAdvanceChain(simulatedObjects, privKeys); err != nil {
|
||||
return fmt.Errorf("could not generate the block and advance the chain %v", err)
|
||||
}
|
||||
|
||||
endTime := time.Now()
|
||||
averageTimesPerTransition = append(averageTimesPerTransition, endTime.Sub(startTime))
|
||||
}
|
||||
|
||||
log.Infof(
|
||||
"with %d initial deposits, each state transition took average time = %v",
|
||||
testCase.Config.DepositsForChainStart,
|
||||
averageDuration(averageTimesPerTransition),
|
||||
)
|
||||
|
||||
if err := sb.compareTestCase(testCase); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// initializeStateTest sets up the environment by generating all the required objects in order
|
||||
// to proceed with the state test.
|
||||
func (sb *SimulatedBackend) initializeStateTest(testCase *StateTestCase) ([]*bls.SecretKey, error) {
|
||||
initialDeposits, privKeys, err := generateInitialSimulatedDeposits(testCase.Config.DepositsForChainStart)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not simulate initial validator deposits: %v", err)
|
||||
}
|
||||
if err := sb.setupBeaconStateAndGenesisBlock(initialDeposits); err != nil {
|
||||
return nil, fmt.Errorf("could not set up beacon state and initialize genesis block %v", err)
|
||||
}
|
||||
return privKeys, nil
|
||||
}
|
||||
|
||||
// setupBeaconStateAndGenesisBlock creates the initial beacon state and genesis block in order to
|
||||
// proceed with the test.
|
||||
func (sb *SimulatedBackend) setupBeaconStateAndGenesisBlock(initialDeposits []*pb.Deposit) error {
|
||||
var err error
|
||||
genesisTime := time.Date(2018, 9, 0, 0, 0, 0, 0, time.UTC).Unix()
|
||||
sb.state, err = state.GenesisBeaconState(initialDeposits, uint64(genesisTime), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not initialize simulated beacon state: %v", err)
|
||||
}
|
||||
sb.historicalDeposits = initialDeposits
|
||||
|
||||
// We do not expect hashing initial beacon state and genesis block to
|
||||
// fail, so we can safely ignore the error below.
|
||||
// #nosec G104
|
||||
stateRoot, err := hashutil.HashProto(sb.state)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not tree hash state: %v", err)
|
||||
}
|
||||
genesisBlock := b.NewGenesisBlock(stateRoot[:])
|
||||
genesisBlockRoot, err := hashutil.HashBeaconBlock(genesisBlock)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not tree hash genesis block: %v", err)
|
||||
}
|
||||
|
||||
// We now keep track of generated blocks for each state transition in
|
||||
// a slice.
|
||||
sb.prevBlockRoots = [][32]byte{genesisBlockRoot}
|
||||
sb.inMemoryBlocks = append(sb.inMemoryBlocks, genesisBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateSimulatedObjects generates the simulated objects depending on the testcase and current slot.
|
||||
func (sb *SimulatedBackend) generateSimulatedObjects(testCase *StateTestCase, slotNumber uint64) *SimulatedObjects {
|
||||
// If the slot is not skipped, we check if we are simulating a deposit at the current slot.
|
||||
var simulatedDeposit *StateTestDeposit
|
||||
for _, deposit := range testCase.Config.Deposits {
|
||||
if deposit.Slot == slotNumber {
|
||||
simulatedDeposit = deposit
|
||||
break
|
||||
}
|
||||
}
|
||||
var simulatedProposerSlashing *StateTestProposerSlashing
|
||||
for _, pSlashing := range testCase.Config.ProposerSlashings {
|
||||
if pSlashing.Slot == slotNumber {
|
||||
simulatedProposerSlashing = pSlashing
|
||||
break
|
||||
}
|
||||
}
|
||||
var simulatedAttesterSlashing *StateTestAttesterSlashing
|
||||
for _, cSlashing := range testCase.Config.AttesterSlashings {
|
||||
if cSlashing.Slot == slotNumber {
|
||||
simulatedAttesterSlashing = cSlashing
|
||||
break
|
||||
}
|
||||
}
|
||||
var simulatedValidatorExit *StateTestValidatorExit
|
||||
for _, exit := range testCase.Config.ValidatorExits {
|
||||
if exit.Epoch == slotNumber/params.BeaconConfig().SlotsPerEpoch {
|
||||
simulatedValidatorExit = exit
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return &SimulatedObjects{
|
||||
simDeposit: simulatedDeposit,
|
||||
simProposerSlashing: simulatedProposerSlashing,
|
||||
simAttesterSlashing: simulatedAttesterSlashing,
|
||||
simValidatorExit: simulatedValidatorExit,
|
||||
}
|
||||
}
|
||||
|
||||
// compareTestCase compares the state in the simulated backend against the values in inputted test case. If
|
||||
// there are any discrepancies it returns an error.
|
||||
func (sb *SimulatedBackend) compareTestCase(testCase *StateTestCase) error {
|
||||
if sb.state.Slot != testCase.Results.Slot {
|
||||
return fmt.Errorf(
|
||||
"incorrect state slot after %d state transitions without blocks, wanted %d, received %d",
|
||||
testCase.Config.NumSlots,
|
||||
sb.state.Slot,
|
||||
testCase.Results.Slot,
|
||||
)
|
||||
}
|
||||
if len(sb.state.ValidatorRegistry) != testCase.Results.NumValidators {
|
||||
return fmt.Errorf(
|
||||
"incorrect num validators after %d state transitions without blocks, wanted %d, received %d",
|
||||
testCase.Config.NumSlots,
|
||||
testCase.Results.NumValidators,
|
||||
len(sb.state.ValidatorRegistry),
|
||||
)
|
||||
}
|
||||
for _, slashed := range testCase.Results.SlashedValidators {
|
||||
if sb.state.ValidatorRegistry[slashed].SlashedEpoch == params.BeaconConfig().FarFutureEpoch {
|
||||
return fmt.Errorf(
|
||||
"expected validator at index %d to have been slashed",
|
||||
slashed,
|
||||
)
|
||||
}
|
||||
}
|
||||
for _, exited := range testCase.Results.ExitedValidators {
|
||||
if sb.state.ValidatorRegistry[exited].StatusFlags != pb.Validator_INITIATED_EXIT {
|
||||
return fmt.Errorf(
|
||||
"expected validator at index %d to have exited",
|
||||
exited,
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setTestConfig(testCase *StateTestCase) {
|
||||
// We setup the initial configuration for running state
|
||||
// transition tests below.
|
||||
c := params.BeaconConfig()
|
||||
c.SlotsPerEpoch = testCase.Config.SlotsPerEpoch
|
||||
c.DepositsForChainStart = testCase.Config.DepositsForChainStart
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
|
||||
func averageDuration(times []time.Duration) time.Duration {
|
||||
sum := int64(0)
|
||||
for _, t := range times {
|
||||
sum += t.Nanoseconds()
|
||||
}
|
||||
return time.Duration(sum / int64(len(times)))
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func init() {
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
EnableCrosslinks: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestSimulatedBackendStop_ShutsDown(t *testing.T) {
|
||||
|
||||
backend, err := NewSimulatedBackend()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create a new simulated backedn %v", err)
|
||||
}
|
||||
if err := backend.Shutdown(); err != nil {
|
||||
t.Errorf("Could not successfully shutdown simulated backend %v", err)
|
||||
}
|
||||
|
||||
db.TeardownDB(backend.beaconDB)
|
||||
}
|
||||
|
||||
func TestGenerateBlockAndAdvanceChain_IncreasesSlot(t *testing.T) {
|
||||
backend, err := NewSimulatedBackend()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create a new simulated backend %v", err)
|
||||
}
|
||||
|
||||
privKeys, err := backend.SetupBackend(100)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not set up backend %v", err)
|
||||
}
|
||||
defer backend.Shutdown()
|
||||
defer db.TeardownDB(backend.beaconDB)
|
||||
|
||||
slotLimit := params.BeaconConfig().SlotsPerEpoch + uint64(1)
|
||||
|
||||
for i := uint64(0); i < slotLimit; i++ {
|
||||
if err := backend.GenerateBlockAndAdvanceChain(&SimulatedObjects{}, privKeys); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, backend.state.Slot+1)
|
||||
}
|
||||
if backend.inMemoryBlocks[len(backend.inMemoryBlocks)-1].Slot != backend.state.Slot {
|
||||
t.Errorf("In memory Blocks do not have the same last slot as the state, expected %d but got %v",
|
||||
backend.state.Slot, backend.inMemoryBlocks[len(backend.inMemoryBlocks)-1])
|
||||
}
|
||||
}
|
||||
|
||||
if backend.state.Slot != params.BeaconConfig().GenesisSlot+uint64(slotLimit) {
|
||||
t.Errorf("Unequal state slot and expected slot %d %d", backend.state.Slot, slotLimit)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGenerateNilBlockAndAdvanceChain_IncreasesSlot(t *testing.T) {
|
||||
backend, err := NewSimulatedBackend()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create a new simulated backedn %v", err)
|
||||
}
|
||||
|
||||
if _, err := backend.SetupBackend(100); err != nil {
|
||||
t.Fatalf("Could not set up backend %v", err)
|
||||
}
|
||||
defer backend.Shutdown()
|
||||
defer db.TeardownDB(backend.beaconDB)
|
||||
|
||||
slotLimit := params.BeaconConfig().SlotsPerEpoch + uint64(1)
|
||||
|
||||
for i := uint64(0); i < slotLimit; i++ {
|
||||
if err := backend.GenerateNilBlockAndAdvanceChain(); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, backend.state.Slot+1)
|
||||
}
|
||||
}
|
||||
|
||||
if backend.state.Slot != params.BeaconConfig().GenesisSlot+uint64(slotLimit) {
|
||||
t.Errorf("Unequal state slot and expected slot %d %d", backend.state.Slot, slotLimit)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
package backend
|
||||
|
||||
// StateTest --
|
||||
type StateTest struct {
|
||||
Title string
|
||||
Summary string
|
||||
Fork string `yaml:"fork"`
|
||||
Version string `yaml:"version"`
|
||||
TestSuite string `yaml:"test_suite"`
|
||||
TestCases []*StateTestCase `yaml:"test_cases"`
|
||||
}
|
||||
|
||||
// StateTestCase --
|
||||
type StateTestCase struct {
|
||||
Config *StateTestConfig `yaml:"config"`
|
||||
Results *StateTestResults `yaml:"results"`
|
||||
}
|
||||
|
||||
// StateTestConfig --
|
||||
type StateTestConfig struct {
|
||||
SkipSlots []uint64 `yaml:"skip_slots"`
|
||||
DepositSlots []uint64 `yaml:"deposit_slots"`
|
||||
Deposits []*StateTestDeposit `yaml:"deposits"`
|
||||
ProposerSlashings []*StateTestProposerSlashing `yaml:"proposer_slashings"`
|
||||
AttesterSlashings []*StateTestAttesterSlashing `yaml:"attester_slashings"`
|
||||
ValidatorExits []*StateTestValidatorExit `yaml:"validator_exits"`
|
||||
SlotsPerEpoch uint64 `yaml:"slots_per_epoch"`
|
||||
ShardCount uint64 `yaml:"shard_count"`
|
||||
DepositsForChainStart uint64 `yaml:"deposits_for_chain_start"`
|
||||
NumSlots uint64 `yaml:"num_slots"`
|
||||
}
|
||||
|
||||
// StateTestDeposit --
|
||||
type StateTestDeposit struct {
|
||||
Slot uint64 `yaml:"slot"`
|
||||
Amount uint64 `yaml:"amount"`
|
||||
MerkleIndex uint64 `yaml:"merkle_index"`
|
||||
Pubkey string `yaml:"pubkey"`
|
||||
}
|
||||
|
||||
// StateTestProposerSlashing --
|
||||
type StateTestProposerSlashing struct {
|
||||
Slot uint64 `yaml:"slot"`
|
||||
ProposerIndex uint64 `yaml:"proposer_index"`
|
||||
Proposal1Shard uint64 `yaml:"proposal_1_shard"`
|
||||
Proposal2Shard uint64 `yaml:"proposal_2_shard"`
|
||||
Proposal1Slot uint64 `yaml:"proposal_1_slot"`
|
||||
Proposal2Slot uint64 `yaml:"proposal_2_slot"`
|
||||
Proposal1Root string `yaml:"proposal_1_root"`
|
||||
Proposal2Root string `yaml:"proposal_2_root"`
|
||||
}
|
||||
|
||||
// StateTestAttesterSlashing --
|
||||
type StateTestAttesterSlashing struct {
|
||||
Slot uint64 `yaml:"slot"`
|
||||
SlashableAttestation1Slot uint64 `yaml:"slashable_attestation_1_slot"`
|
||||
SlashableAttestation1JustifiedEpoch uint64 `yaml:"slashable_attestation_1_justified_epoch"`
|
||||
SlashableAttestation1ValidatorIndices []uint64 `yaml:"slashable_attestation_1_validator_indices"`
|
||||
SlashableAttestation1CustodyBitField string `yaml:"slashable_attestation_1_custody_bitfield"`
|
||||
SlashableAttestation2Slot uint64 `yaml:"slashable_attestation_2_slot"`
|
||||
SlashableAttestation2JustifiedEpoch uint64 `yaml:"slashable_attestation_2_justified_epoch"`
|
||||
SlashableAttestation2ValidatorIndices []uint64 `yaml:"slashable_attestation_2_validator_indices"`
|
||||
SlashableAttestation2CustodyBitField string `yaml:"slashable_attestation_2_custody_bitfield"`
|
||||
}
|
||||
|
||||
// StateTestValidatorExit --
|
||||
type StateTestValidatorExit struct {
|
||||
Epoch uint64 `yaml:"epoch"`
|
||||
ValidatorIndex uint64 `yaml:"validator_index"`
|
||||
}
|
||||
|
||||
// StateTestResults --
|
||||
type StateTestResults struct {
|
||||
Slot uint64
|
||||
NumValidators int `yaml:"num_validators"`
|
||||
SlashedValidators []uint64 `yaml:"slashed_validators"`
|
||||
ExitedValidators []uint64 `yaml:"exited_validators"`
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/go-yaml/yaml"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/chaintest/backend"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
log "github.com/sirupsen/logrus"
|
||||
prefixed "github.com/x-cray/logrus-prefixed-formatter"
|
||||
)
|
||||
|
||||
func init() {
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
EnableCrosslinks: false,
|
||||
})
|
||||
}
|
||||
|
||||
func readTestsFromYaml(yamlDir string) ([]interface{}, error) {
|
||||
const forkChoiceTestsFolderName = "fork-choice-tests"
|
||||
const shuffleTestsFolderName = "shuffle-tests"
|
||||
const stateTestsFolderName = "state-tests"
|
||||
|
||||
var tests []interface{}
|
||||
|
||||
dirs, err := ioutil.ReadDir(yamlDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read YAML tests directory: %v", err)
|
||||
}
|
||||
for _, dir := range dirs {
|
||||
files, err := ioutil.ReadDir(path.Join(yamlDir, dir.Name()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read YAML tests directory: %v", err)
|
||||
}
|
||||
for _, file := range files {
|
||||
filePath := path.Join(yamlDir, dir.Name(), file.Name())
|
||||
// #nosec G304
|
||||
data, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read YAML file: %v", err)
|
||||
}
|
||||
switch dir.Name() {
|
||||
case forkChoiceTestsFolderName:
|
||||
decoded := &backend.ForkChoiceTest{}
|
||||
if err := yaml.Unmarshal(data, decoded); err != nil {
|
||||
return nil, fmt.Errorf("could not unmarshal YAML file into test struct: %v", err)
|
||||
}
|
||||
tests = append(tests, decoded)
|
||||
case shuffleTestsFolderName:
|
||||
decoded := &backend.ShuffleTest{}
|
||||
if err := yaml.Unmarshal(data, decoded); err != nil {
|
||||
return nil, fmt.Errorf("could not unmarshal YAML file into test struct: %v", err)
|
||||
}
|
||||
tests = append(tests, decoded)
|
||||
case stateTestsFolderName:
|
||||
decoded := &backend.StateTest{}
|
||||
if err := yaml.Unmarshal(data, decoded); err != nil {
|
||||
return nil, fmt.Errorf("could not unmarshal YAML file into test struct: %v", err)
|
||||
}
|
||||
tests = append(tests, decoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
return tests, nil
|
||||
}
|
||||
|
||||
func runTests(tests []interface{}, sb *backend.SimulatedBackend) error {
|
||||
for _, tt := range tests {
|
||||
switch typedTest := tt.(type) {
|
||||
case *backend.ForkChoiceTest:
|
||||
log.Infof("Title: %v", typedTest.Title)
|
||||
log.Infof("Summary: %v", typedTest.Summary)
|
||||
log.Infof("Test Suite: %v", typedTest.TestSuite)
|
||||
for _, testCase := range typedTest.TestCases {
|
||||
if err := sb.RunForkChoiceTest(testCase); err != nil {
|
||||
return fmt.Errorf("chain test failed: %v", err)
|
||||
}
|
||||
}
|
||||
log.Info("Test PASSED")
|
||||
case *backend.ShuffleTest:
|
||||
log.Infof("Title: %v", typedTest.Title)
|
||||
log.Infof("Summary: %v", typedTest.Summary)
|
||||
log.Infof("Test Suite: %v", typedTest.TestSuite)
|
||||
log.Infof("Fork: %v", typedTest.Fork)
|
||||
log.Infof("Version: %v", typedTest.Version)
|
||||
for _, testCase := range typedTest.TestCases {
|
||||
if err := sb.RunShuffleTest(testCase); err != nil {
|
||||
return fmt.Errorf("chain test failed: %v", err)
|
||||
}
|
||||
}
|
||||
log.Info("Test PASSED")
|
||||
case *backend.StateTest:
|
||||
log.Infof("Title: %v", typedTest.Title)
|
||||
log.Infof("Summary: %v", typedTest.Summary)
|
||||
log.Infof("Test Suite: %v", typedTest.TestSuite)
|
||||
log.Infof("Fork: %v", typedTest.Fork)
|
||||
log.Infof("Version: %v", typedTest.Version)
|
||||
for _, testCase := range typedTest.TestCases {
|
||||
if err := sb.RunStateTransitionTest(testCase); err != nil {
|
||||
return fmt.Errorf("chain test failed: %v", err)
|
||||
}
|
||||
}
|
||||
log.Info("Test PASSED")
|
||||
default:
|
||||
return fmt.Errorf("receive unknown test type: %T", typedTest)
|
||||
}
|
||||
log.Info("-----------------------------")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
var yamlDir = flag.String("tests-dir", "", "path to directory of yaml tests")
|
||||
flag.Parse()
|
||||
|
||||
customFormatter := new(prefixed.TextFormatter)
|
||||
customFormatter.TimestampFormat = "2006-01-02 15:04:05"
|
||||
customFormatter.FullTimestamp = true
|
||||
log.SetFormatter(customFormatter)
|
||||
|
||||
tests, err := readTestsFromYaml(*yamlDir)
|
||||
if err != nil {
|
||||
log.Fatalf("Fail to load tests from yaml: %v", err)
|
||||
}
|
||||
|
||||
sb, err := backend.NewSimulatedBackend()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create backend: %v", err)
|
||||
}
|
||||
|
||||
log.Info("----Running Tests----")
|
||||
startTime := time.Now()
|
||||
|
||||
err = runTests(tests, sb)
|
||||
if err != nil {
|
||||
log.Fatalf("Test failed %v", err)
|
||||
}
|
||||
|
||||
endTime := time.Now()
|
||||
log.Infof("Test Runs Finished In: %v", endTime.Sub(startTime))
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
# Credits to Danny Ryan (Ethereum Foundation)
|
||||
---
|
||||
|
||||
title: Sample Ethereum 2.0 Beacon Chain Test
|
||||
summary: Basic, functioning fork choice rule for Ethereum 2.0
|
||||
test_suite: prysm
|
||||
test_cases:
|
||||
- config:
|
||||
validator_count: 100
|
||||
cycle_length: 8
|
||||
shard_count: 64
|
||||
min_committee_size: 8
|
||||
slots:
|
||||
# "slot_number" has a minimum of 1
|
||||
- slot_number: 1
|
||||
new_block:
|
||||
id: A
|
||||
# "*" is used for the genesis block
|
||||
parent: "*"
|
||||
attestations:
|
||||
- block: A
|
||||
# the following is a shorthand string for [0, 1, 2, 3, 4, 5]
|
||||
validators: "0-5"
|
||||
- slot_number: 2
|
||||
new_block:
|
||||
id: B
|
||||
parent: A
|
||||
attestations:
|
||||
- block: B
|
||||
validators: "0-5"
|
||||
- slot_number: 3
|
||||
new_block:
|
||||
id: C
|
||||
parent: A
|
||||
attestations:
|
||||
# attestation "committee_slot" defaults to the slot during which the attestation occurs
|
||||
- block: C
|
||||
validators: "2-7"
|
||||
# default "committee_slot" can be directly overridden
|
||||
- block: C
|
||||
committee_slot: 2
|
||||
validators: "6, 7"
|
||||
- slot_number: 4
|
||||
new_block:
|
||||
id: D
|
||||
parent: C
|
||||
attestations:
|
||||
- block: D
|
||||
validators: "1-4"
|
||||
# slots can be skipped entirely (5 in this case)
|
||||
- slot_number: 6
|
||||
new_block:
|
||||
id: E
|
||||
parent: D
|
||||
attestations:
|
||||
- block: E
|
||||
validators: "0-4"
|
||||
- block: B
|
||||
validators: "5, 6, 7"
|
||||
results:
|
||||
head: E
|
||||
last_justified_block: "*"
|
||||
last_finalized_block: "*"
|
||||
@@ -1,44 +0,0 @@
|
||||
# Credits to Danny Ryan (Ethereum Foundation)
|
||||
---
|
||||
|
||||
title: Shuffling Algorithm Tests
|
||||
summary: Test vectors for shuffling a list based upon a seed using `shuffle`
|
||||
test_suite: shuffle
|
||||
fork: tchaikovsky
|
||||
version: 1.0
|
||||
|
||||
test_cases:
|
||||
- config:
|
||||
validator_count: 100
|
||||
cycle_length: 8
|
||||
shard_count: 32
|
||||
min_committee_size: 8
|
||||
- input: []
|
||||
output: []
|
||||
seed: !!binary ""
|
||||
- name: boring_list
|
||||
description: List with a single element, 0
|
||||
input: [0]
|
||||
output: [0]
|
||||
seed: !!binary ""
|
||||
- input: [255]
|
||||
output: [255]
|
||||
seed: !!binary ""
|
||||
- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5]
|
||||
output: [2, 1, 6, 1, 4, 5, 6, 4, 6, 2]
|
||||
seed: !!binary ""
|
||||
- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
|
||||
output: [4, 9, 1, 13, 8, 3, 5, 10, 7, 6, 11, 2, 12]
|
||||
seed: !!binary ""
|
||||
- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5]
|
||||
output: [6, 1, 2, 2, 6, 6, 1, 5, 65, 4]
|
||||
seed: !!binary |
|
||||
JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0=
|
||||
- input: [35, 6, 2, 6, 1, 4, 6, 2, 1, 5, 7, 98, 3, 2, 11]
|
||||
output: [35, 1, 6, 4, 6, 6, 5, 11, 2, 3, 7, 1, 2, 2, 98]
|
||||
seed: !!binary |
|
||||
VGhlIHF1aWNrIGJyb3duIGZveCBqdW1wcyBvdmVyIDEzIGxhenkgZG9ncy4=
|
||||
- input: [35, 6, 2, 6, 1, 4, 6, 2, 1, 5, 7, 98, 3, 2, 11]
|
||||
output: [98, 6, 6, 11, 5, 35, 2, 7, 2, 6, 4, 2, 1, 3, 1]
|
||||
seed: !!binary |
|
||||
rDTbe23J4UA0yLIurjbJqk49VcavAC0Nysas+l5MlwvLc0B/JqQ=
|
||||
@@ -1,82 +0,0 @@
|
||||
title: Sample Ethereum Serenity State Transition Tests
|
||||
summary: Testing full state transition block processing
|
||||
test_suite: prysm
|
||||
fork: sapphire
|
||||
version: 1.0
|
||||
test_cases:
|
||||
- config:
|
||||
slots_per_epoch: 64
|
||||
deposits_for_chain_start: 64
|
||||
num_slots: 32 # Testing advancing state to slot < SlotsPerEpoch
|
||||
results:
|
||||
slot: 9223372036854775840
|
||||
num_validators: 64
|
||||
- config:
|
||||
slots_per_epoch: 64
|
||||
deposits_for_chain_start: 64
|
||||
num_slots: 64 # Testing advancing state to exactly slot == SlotsPerEpoch
|
||||
deposits:
|
||||
- slot: 9223372036854775809
|
||||
amount: 32
|
||||
merkle_index: 64
|
||||
pubkey: !!binary |
|
||||
SlAAbShSkUg7PLiPHZI/rTS1uAvKiieOrifPN6Moso0=
|
||||
- slot: 9223372036854775823
|
||||
amount: 32
|
||||
merkle_index: 65
|
||||
pubkey: !!binary |
|
||||
Oklajsjdkaklsdlkajsdjlajslkdjlkasjlkdjlajdsd
|
||||
- slot: 9223372036854775863
|
||||
amount: 32
|
||||
merkle_index: 66
|
||||
pubkey: !!binary |
|
||||
LkmqmqoodLKAslkjdkajsdljasdkajlksjdasldjasdd
|
||||
proposer_slashings:
|
||||
- slot: 9223372036854775824 # At slot 9223372036854775824, we trigger a proposal slashing occurring
|
||||
proposer_index: 50 # We penalize the proposer that was just added from slot 15
|
||||
proposal_1_shard: 0
|
||||
proposal_1_slot: 15
|
||||
proposal_1_root: !!binary |
|
||||
LkmqmqoodLKAslkjdkajsdljasdkajlksjdasldjasdd
|
||||
proposal_2_shard: 0
|
||||
proposal_2_slot: 15
|
||||
proposal_2_root: !!binary |
|
||||
LkmqmqoodLKAslkjdkajsdljasdkajlksjdasldjasdd
|
||||
attester_slashings:
|
||||
- slot: 9223372036854775868 # At slot 59, we trigger a attester slashing
|
||||
slashable_attestation_1_slot: 9223372036854775864
|
||||
slashable_attestation_2_slot: 9223372036854775864
|
||||
slashable_attestation_1_justified_epoch: 0
|
||||
slashable_attestation_2_justified_epoch: 1
|
||||
slashable_attestation_1_custody_bitfield: !binary "F"
|
||||
slashable_attestation_1_validator_indices: [1, 2, 3, 4, 5, 6, 7, 51]
|
||||
slashable_attestation_2_custody_bitfield: !binary "F"
|
||||
slashable_attestation_2_validator_indices: [1, 2, 3, 4, 5, 6, 7, 51]
|
||||
validator_exits:
|
||||
- epoch: 144115188075855872
|
||||
validator_index: 45 # At slot 9223372036854775868, validator at index 45 triggers a voluntary exit
|
||||
results:
|
||||
slot: 9223372036854775872
|
||||
num_validators: 67
|
||||
penalized_validators: [50, 51] # We test that the validators at indices were indeed penalized
|
||||
exited_validators: [45] # We confirm the indices of validators that willingly exited the registry
|
||||
# TODO(1387): Waiting for spec to stable to proceed with this test case
|
||||
# - config:
|
||||
# skip_slots: [10, 20]
|
||||
# slots_per_epoch: 64
|
||||
# deposits_for_chain_start: 1000
|
||||
# num_slots: 128 # Testing advancing state's slot == 2*SlotsPerEpoch
|
||||
# deposits:
|
||||
# - slot: 10
|
||||
# amount: 32
|
||||
# merkle_index: 0
|
||||
# pubkey: !!binary |
|
||||
# SlAAbShSkUg7PLiPHZI/rTS1uAvKiieOrifPN6Moso0=
|
||||
# - slot: 20
|
||||
# amount: 32
|
||||
# merkle_index: 1
|
||||
# pubkey: !!binary |
|
||||
# Oklajsjdkaklsdlkajsdjlajslkdjlkasjlkdjlajdsd
|
||||
# results:
|
||||
# slot: 128
|
||||
# num_validators: 1000 # Validator registry should not have grown if slots 10 and 20 were skipped
|
||||
@@ -1,49 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/chaintest/backend"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
)
|
||||
|
||||
func init() {
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
EnableCrosslinks: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestFromYaml_Pass(t *testing.T) {
|
||||
tests, err := readTestsFromYaml("./tests")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read yaml files: %v", err)
|
||||
}
|
||||
|
||||
sb, err := backend.NewSimulatedBackend()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create backend: %v", err)
|
||||
}
|
||||
|
||||
if err := runTests(tests, sb); err != nil {
|
||||
t.Errorf("Failed to run yaml tests %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStateTestFromYaml(b *testing.B) {
|
||||
tests, err := readTestsFromYaml("./tests")
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to read yaml files: %v", err)
|
||||
}
|
||||
|
||||
sb, err := backend.NewSimulatedBackend()
|
||||
if err != nil {
|
||||
b.Fatalf("Could not create backend: %v", err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := runTests(tests, sb); err != nil {
|
||||
b.Errorf("Failed to run yaml tests %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["rewards_penalties.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/balances",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["rewards_penalties_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
],
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user