mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
822 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
89eedd2123 | ||
|
|
2182e1cdc9 | ||
|
|
6d2a2ebadf | ||
|
|
9aed0034ec | ||
|
|
f764522cbe | ||
|
|
c7ae03e1b2 | ||
|
|
4efc0f5286 | ||
|
|
9052620453 | ||
|
|
0174397f6e | ||
|
|
9f5caf8fea | ||
|
|
3b3f2c78e2 | ||
|
|
242e4bccbf | ||
|
|
59ab89c98a | ||
|
|
ac768207ac | ||
|
|
77d41024dc | ||
|
|
f03083f6c8 | ||
|
|
5ff9ae2108 | ||
|
|
5fa03edb29 | ||
|
|
ebe4c9c971 | ||
|
|
6efe5ef496 | ||
|
|
fbbf5514d1 | ||
|
|
220af25bce | ||
|
|
c9252c06c4 | ||
|
|
2c565f5d59 | ||
|
|
1cb58e859e | ||
|
|
d26839c1f2 | ||
|
|
2cb8430ad4 | ||
|
|
03356fc7b5 | ||
|
|
bdc4045e23 | ||
|
|
dc1bd1ef62 | ||
|
|
35380dd9bf | ||
|
|
9674575892 | ||
|
|
b7d0d7cbb6 | ||
|
|
28eadac172 | ||
|
|
d5181496c4 | ||
|
|
b337a5720c | ||
|
|
53b8eb57ee | ||
|
|
30b4b045f5 | ||
|
|
ec1e7ae005 | ||
|
|
a949673e33 | ||
|
|
996f4c7f5a | ||
|
|
3915a6e15a | ||
|
|
961dd21554 | ||
|
|
2e4908e7c4 | ||
|
|
da637668a8 | ||
|
|
20168ad729 | ||
|
|
0b07a9f227 | ||
|
|
5dca662d01 | ||
|
|
8c28d1080c | ||
|
|
6a54a430e1 | ||
|
|
908d220eb2 | ||
|
|
ff1fd77425 | ||
|
|
e27bc8312f | ||
|
|
78968c1e29 | ||
|
|
fb431c11c1 | ||
|
|
30ed59e9c8 | ||
|
|
2e2d5199e8 | ||
|
|
4fe31cf1b3 | ||
|
|
e82e582cdf | ||
|
|
0b2d9d8576 | ||
|
|
65e3f3e007 | ||
|
|
2c28e4e7a3 | ||
|
|
642254daa6 | ||
|
|
c41140e15a | ||
|
|
23a6c20dd4 | ||
|
|
514f5f904f | ||
|
|
5844436716 | ||
|
|
5879b26b4b | ||
|
|
566efaef89 | ||
|
|
d9062a7e30 | ||
|
|
3f344aee55 | ||
|
|
fd93751bf7 | ||
|
|
325a2503f7 | ||
|
|
2179ac683e | ||
|
|
0f4dabfad8 | ||
|
|
8724dcd41b | ||
|
|
89e1200b73 | ||
|
|
0f677a09b6 | ||
|
|
c5dcf49ded | ||
|
|
a5881f924f | ||
|
|
d93ec64b21 | ||
|
|
a9a5973b98 | ||
|
|
570efe3d04 | ||
|
|
2e9c3895f4 | ||
|
|
9033f6801b | ||
|
|
c0b3767757 | ||
|
|
e72ff1bb4f | ||
|
|
0cb59bb018 | ||
|
|
6e549c90ba | ||
|
|
813233373e | ||
|
|
5757ce8894 | ||
|
|
7c11367cd8 | ||
|
|
6d2c37caf1 | ||
|
|
812311f6f7 | ||
|
|
22d81ef0ed | ||
|
|
414fcda9a2 | ||
|
|
bb2fc4cd5e | ||
|
|
5fd6a92052 | ||
|
|
7ccbe48f54 | ||
|
|
7a46cc0681 | ||
|
|
92d21c72b8 | ||
|
|
0cb681476e | ||
|
|
fa7b8ab60d | ||
|
|
bdb80271a3 | ||
|
|
1b8eb16fc7 | ||
|
|
1222ebb6db | ||
|
|
3e15e2fc1e | ||
|
|
667466020e | ||
|
|
f63ab1e136 | ||
|
|
6841d96f36 | ||
|
|
cae24068d4 | ||
|
|
dc0b8fad4f | ||
|
|
d3375d98a8 | ||
|
|
9d4c7cb4f7 | ||
|
|
ae2b2e74ca | ||
|
|
83179376d4 | ||
|
|
c36a852329 | ||
|
|
650a278fee | ||
|
|
6816337589 | ||
|
|
2950e4aeb4 | ||
|
|
746cc142d0 | ||
|
|
261428118e | ||
|
|
544e5309ad | ||
|
|
23dd951e59 | ||
|
|
498417a8fc | ||
|
|
617325b726 | ||
|
|
9e5cc81340 | ||
|
|
f75a5a5df8 | ||
|
|
ae8df9c32b | ||
|
|
90cbe49496 | ||
|
|
c31f46d973 | ||
|
|
83781d0b74 | ||
|
|
6488b0527c | ||
|
|
eeb8779cfc | ||
|
|
f40bbb92d1 | ||
|
|
4f0bef929f | ||
|
|
81a83cf100 | ||
|
|
8bbc589edd | ||
|
|
32245a9062 | ||
|
|
28c4f28d32 | ||
|
|
a2d4701f6e | ||
|
|
8e4022f8aa | ||
|
|
42e766e909 | ||
|
|
a686be8bd0 | ||
|
|
e3c3dea5d2 | ||
|
|
7754cfb6c6 | ||
|
|
f55a380ade | ||
|
|
9a317ffc0f | ||
|
|
3be4894b8a | ||
|
|
646411b881 | ||
|
|
0e99e4af4f | ||
|
|
e87337a97a | ||
|
|
53523b3eef | ||
|
|
5ec02b28a5 | ||
|
|
1620290305 | ||
|
|
fc171434c5 | ||
|
|
b08f3f760d | ||
|
|
7495961d6b | ||
|
|
4dbf68b50c | ||
|
|
e24b060eb6 | ||
|
|
e90358cd8e | ||
|
|
80865ff3f2 | ||
|
|
60469ec7ee | ||
|
|
67be8bd4f0 | ||
|
|
3682bf1cda | ||
|
|
e203f66fe0 | ||
|
|
04df922ac9 | ||
|
|
0326be86b5 | ||
|
|
a7ccd52a95 | ||
|
|
1ced4754db | ||
|
|
b872f74fd3 | ||
|
|
c1c48a8af5 | ||
|
|
b88e6dc918 | ||
|
|
3868837471 | ||
|
|
60b1596c4d | ||
|
|
4f0dcd5e6e | ||
|
|
ac405c714f | ||
|
|
7d0e5a9dc4 | ||
|
|
feb1267fee | ||
|
|
7a9c297206 | ||
|
|
21deed0fb7 | ||
|
|
627791c54e | ||
|
|
3358bde42d | ||
|
|
9e45cffabc | ||
|
|
2c8ff7b36f | ||
|
|
a7ec0679b5 | ||
|
|
f717c5d852 | ||
|
|
0cec0ee6c3 | ||
|
|
2f392544a6 | ||
|
|
75ce8359eb | ||
|
|
f5cb04012e | ||
|
|
f461d1e024 | ||
|
|
bdbd0aaeb8 | ||
|
|
715d06a215 | ||
|
|
976a3af637 | ||
|
|
8f8d2d36c0 | ||
|
|
a264a097cc | ||
|
|
4330839bc1 | ||
|
|
835418d1e3 | ||
|
|
ae07dc7962 | ||
|
|
d071a0a90a | ||
|
|
2d7802c637 | ||
|
|
fcb663acde | ||
|
|
858dbbf038 | ||
|
|
49c2dd2cfc | ||
|
|
7a22e98c0f | ||
|
|
26da7c4114 | ||
|
|
7acb45d186 | ||
|
|
24a5000e47 | ||
|
|
65d920e13a | ||
|
|
d27d18b192 | ||
|
|
0e88085661 | ||
|
|
3f6435ac80 | ||
|
|
64b69d9216 | ||
|
|
13207a9de5 | ||
|
|
ab756ec094 | ||
|
|
499f05f34b | ||
|
|
0077654fb5 | ||
|
|
f8cac0fb41 | ||
|
|
607f086de9 | ||
|
|
3b18aee181 | ||
|
|
f43a7c67f2 | ||
|
|
199ddc6cdb | ||
|
|
023dfebc73 | ||
|
|
53c4a26184 | ||
|
|
5acc362f7e | ||
|
|
68edad13bc | ||
|
|
bb2f329562 | ||
|
|
5169209360 | ||
|
|
c4ca8a47b3 | ||
|
|
904898e405 | ||
|
|
7f96fcc51b | ||
|
|
24583864b4 | ||
|
|
db9153e8e4 | ||
|
|
cd6e3e8a09 | ||
|
|
fc7c530696 | ||
|
|
8f05f14b36 | ||
|
|
3b8701296b | ||
|
|
48f69c0762 | ||
|
|
75521fffbd | ||
|
|
8ba6c84d6b | ||
|
|
6ae829a555 | ||
|
|
89f4053c33 | ||
|
|
3332abbb5a | ||
|
|
76e9111833 | ||
|
|
81c53c26fb | ||
|
|
62aaec1e20 | ||
|
|
cc18b2f4d3 | ||
|
|
a938c305b4 | ||
|
|
b87d0abc6c | ||
|
|
768c2bd812 | ||
|
|
c8b8c6165d | ||
|
|
db866e6580 | ||
|
|
b50f1583f3 | ||
|
|
0be4e6fed8 | ||
|
|
e9bd530221 | ||
|
|
67cf86ad5e | ||
|
|
5a789fca4a | ||
|
|
295b3a74e9 | ||
|
|
d35c5db260 | ||
|
|
9c1e3c260a | ||
|
|
371f808aa4 | ||
|
|
8df65c1bcc | ||
|
|
6b6273fec1 | ||
|
|
6e90931837 | ||
|
|
bf49fa3c26 | ||
|
|
d4e7e15e50 | ||
|
|
7d1633230d | ||
|
|
db26c0d012 | ||
|
|
2b0acffe7f | ||
|
|
485fc538c3 | ||
|
|
be1b90d511 | ||
|
|
8977e5088e | ||
|
|
cc16a10a33 | ||
|
|
103bdfc688 | ||
|
|
ab92326dfb | ||
|
|
da6c270d46 | ||
|
|
86cd873e67 | ||
|
|
49e0ddf861 | ||
|
|
9ca95530fa | ||
|
|
a29032c2bf | ||
|
|
82de66bb90 | ||
|
|
b2b48c2a4d | ||
|
|
5f79abd828 | ||
|
|
1b1e994a80 | ||
|
|
fbc31dc99b | ||
|
|
2785a6d5ee | ||
|
|
2b444ea954 | ||
|
|
ae89cce593 | ||
|
|
749f4b776b | ||
|
|
0481eb4872 | ||
|
|
f1627c0b67 | ||
|
|
3d1b69e945 | ||
|
|
f8c870aa91 | ||
|
|
094f1974be | ||
|
|
c4d47faae5 | ||
|
|
582c382771 | ||
|
|
c5b0b3c326 | ||
|
|
57036d16f9 | ||
|
|
1c4b4c8393 | ||
|
|
0a3825e79e | ||
|
|
acf11262de | ||
|
|
2d98902eed | ||
|
|
ae1e435231 | ||
|
|
d5547355d5 | ||
|
|
544ce2b4ed | ||
|
|
9a0fb5dca1 | ||
|
|
32271aeae1 | ||
|
|
2fefe6d14b | ||
|
|
27bd188ea8 | ||
|
|
c2e7aa7a39 | ||
|
|
be5451abef | ||
|
|
e4dafd8475 | ||
|
|
7b8331c607 | ||
|
|
552baf1c21 | ||
|
|
e37e757226 | ||
|
|
f86b7ac62d | ||
|
|
a440c32155 | ||
|
|
1138c2cb51 | ||
|
|
c9a7a9c709 | ||
|
|
97905c3e79 | ||
|
|
d8e70fe83c | ||
|
|
635e20529a | ||
|
|
053fa5e616 | ||
|
|
5000535907 | ||
|
|
04113baf9d | ||
|
|
1433fab0d4 | ||
|
|
23be8419fe | ||
|
|
2437a0e33c | ||
|
|
e42af4f11d | ||
|
|
a05dca18c7 | ||
|
|
a41ac6b498 | ||
|
|
fb510a3510 | ||
|
|
aedd38092f | ||
|
|
89ef6d6648 | ||
|
|
fef6b95fed | ||
|
|
d4db7a68aa | ||
|
|
921d0a6e7e | ||
|
|
6bf14dedcd | ||
|
|
cde87ae39b | ||
|
|
4130c78be7 | ||
|
|
2d863a1e63 | ||
|
|
a62ac97a35 | ||
|
|
86a8ec035c | ||
|
|
f0944d205d | ||
|
|
b0eccd24a2 | ||
|
|
9d441011d7 | ||
|
|
f63c12b7b2 | ||
|
|
00a5a25323 | ||
|
|
0d1aeeeaf4 | ||
|
|
c5d4d5dfce | ||
|
|
2bd1e54d92 | ||
|
|
9e6b4d1f29 | ||
|
|
1dbb67af81 | ||
|
|
aa07843157 | ||
|
|
707dfca62c | ||
|
|
d4001a8b29 | ||
|
|
964c54f911 | ||
|
|
df80a7d949 | ||
|
|
9bf55e53e7 | ||
|
|
1c4ea5c471 | ||
|
|
1a94ef12b9 | ||
|
|
46ecbdc997 | ||
|
|
384fd5336e | ||
|
|
4e22f52ab3 | ||
|
|
9254ebf3ba | ||
|
|
cbeedeb5a7 | ||
|
|
093c32e229 | ||
|
|
23764c4640 | ||
|
|
750bc83369 | ||
|
|
14d9a83cda | ||
|
|
66dcf2b80d | ||
|
|
91cb081b7e | ||
|
|
73ffde869f | ||
|
|
24cbcc552f | ||
|
|
aa819bf5ba | ||
|
|
273871940c | ||
|
|
20e97bc6c3 | ||
|
|
fddb51fc45 | ||
|
|
50b1d209ab | ||
|
|
fb0e504856 | ||
|
|
58dbdfb6f5 | ||
|
|
a6f6bb12fa | ||
|
|
4bee60826d | ||
|
|
6d2ce49c06 | ||
|
|
7a04ff6368 | ||
|
|
f046c77499 | ||
|
|
f39f4336a0 | ||
|
|
1064f6ebaf | ||
|
|
33b746e025 | ||
|
|
4daf62fc28 | ||
|
|
8bab55d88e | ||
|
|
c632b96454 | ||
|
|
323ee8dfac | ||
|
|
42a2d5c1ee | ||
|
|
d5e02eaa43 | ||
|
|
2d9550e55c | ||
|
|
d9c0e65cef | ||
|
|
f78d6e66b3 | ||
|
|
87f0581742 | ||
|
|
3d37a4e038 | ||
|
|
2a5046fbc9 | ||
|
|
98f3efffea | ||
|
|
628da919a4 | ||
|
|
944d3b16fd | ||
|
|
8d215feb25 | ||
|
|
6a203dce81 | ||
|
|
4f1d2868f8 | ||
|
|
a2a66e7cb7 | ||
|
|
8ece8fb44b | ||
|
|
22ddcb253d | ||
|
|
23c3138c57 | ||
|
|
2dd71c076e | ||
|
|
7c6270143f | ||
|
|
5675038e5d | ||
|
|
571efc11d1 | ||
|
|
1c51b509ad | ||
|
|
0e8828abd3 | ||
|
|
7fe65bb53b | ||
|
|
5a92725329 | ||
|
|
508fac65be | ||
|
|
4c8269aca3 | ||
|
|
00e68c6cc7 | ||
|
|
877f596c54 | ||
|
|
d02e73c5fe | ||
|
|
707a816f2b | ||
|
|
59b4ade50b | ||
|
|
24df2d3e44 | ||
|
|
ee837ecbb9 | ||
|
|
4bd2730c5e | ||
|
|
e1e36e1424 | ||
|
|
14bc8d7637 | ||
|
|
b089cdd216 | ||
|
|
f681bc6867 | ||
|
|
1600217eb1 | ||
|
|
ddf6f7d4d9 | ||
|
|
9dc1674417 | ||
|
|
4a73bc13b5 | ||
|
|
90a02a035b | ||
|
|
6a4b46ab0e | ||
|
|
af1301ddcb | ||
|
|
156e3ca65a | ||
|
|
d7891fca88 | ||
|
|
b8bd28cca2 | ||
|
|
32ffb70a1a | ||
|
|
2690c2080d | ||
|
|
9b008522b8 | ||
|
|
8f0b131631 | ||
|
|
a683f4652f | ||
|
|
5a533f8e4a | ||
|
|
82ac56d0e7 | ||
|
|
73938876b1 | ||
|
|
0a2dfedf0f | ||
|
|
3ef681e649 | ||
|
|
f52bac7d06 | ||
|
|
fb74dae835 | ||
|
|
3a890e70f7 | ||
|
|
ef6f2a196e | ||
|
|
ba4f45b180 | ||
|
|
f6a3fcb778 | ||
|
|
b5984af17c | ||
|
|
5345ddf686 | ||
|
|
8ce96428b1 | ||
|
|
5398faea44 | ||
|
|
6c892dc376 | ||
|
|
7c9ddfeb58 | ||
|
|
3dcaeabb3e | ||
|
|
2335b5eae7 | ||
|
|
e64287773c | ||
|
|
0e329fc115 | ||
|
|
0db690df75 | ||
|
|
41631c2e3a | ||
|
|
07360bcc07 | ||
|
|
cbcbb487ac | ||
|
|
ad47817bcd | ||
|
|
305d0299dd | ||
|
|
5d33514001 | ||
|
|
f5aa25821d | ||
|
|
b8bdf71d5b | ||
|
|
9577e2c123 | ||
|
|
b015dc793a | ||
|
|
4432c88f73 | ||
|
|
b5b10a8d35 | ||
|
|
5294a6c5af | ||
|
|
ab2d4e8ad6 | ||
|
|
64795bd231 | ||
|
|
4e6ed2744d | ||
|
|
41ea8a18a0 | ||
|
|
71098b6ed8 | ||
|
|
b7853f1fa8 | ||
|
|
762f108ea5 | ||
|
|
041735ef54 | ||
|
|
315d4f0549 | ||
|
|
2b2ef4f37c | ||
|
|
fb8d6a4046 | ||
|
|
37596ac188 | ||
|
|
9fcc6fc201 | ||
|
|
c29a7be0ec | ||
|
|
cf2ad1f21c | ||
|
|
a2aa142b90 | ||
|
|
44e5e5de65 | ||
|
|
4bc2d628b1 | ||
|
|
ac176a5078 | ||
|
|
4ffef61e1d | ||
|
|
cba44e5151 | ||
|
|
8179ed57b9 | ||
|
|
e8b6951591 | ||
|
|
33ef5f9150 | ||
|
|
495621e99b | ||
|
|
e1861bdb31 | ||
|
|
f69195f211 | ||
|
|
36e3a9f82a | ||
|
|
6f25e4ce81 | ||
|
|
b919429801 | ||
|
|
26af4496c0 | ||
|
|
8701ccfe87 | ||
|
|
d9664d3b6b | ||
|
|
037c01f4d7 | ||
|
|
9ab08e6998 | ||
|
|
bdb1b472b6 | ||
|
|
0d318b394e | ||
|
|
b9f9cf0b2c | ||
|
|
b1b76ac87c | ||
|
|
b63e938cfb | ||
|
|
31eae719b9 | ||
|
|
b863004b2a | ||
|
|
7eba8da9d2 | ||
|
|
7e7941b0af | ||
|
|
49a529388b | ||
|
|
9683a83750 | ||
|
|
c9f48373cb | ||
|
|
bf07cfcdab | ||
|
|
bef58620fc | ||
|
|
c5b4cf7f7d | ||
|
|
a2685245f2 | ||
|
|
d597410d9b | ||
|
|
86d4eb5868 | ||
|
|
c6236df603 | ||
|
|
9d62e542e5 | ||
|
|
d36061d62f | ||
|
|
8887ccdd51 | ||
|
|
1e086b63e8 | ||
|
|
8c7ef61238 | ||
|
|
0a0d579822 | ||
|
|
91f824fe10 | ||
|
|
bee3aff6c5 | ||
|
|
b04b542e64 | ||
|
|
3e8a94516d | ||
|
|
273b917319 | ||
|
|
e0e3dada7b | ||
|
|
8ff289fe1a | ||
|
|
14ed36a41e | ||
|
|
ccece73483 | ||
|
|
798bbbdc82 | ||
|
|
b4975f2b9d | ||
|
|
1edeb8ec4c | ||
|
|
3708a8f476 | ||
|
|
af07c13730 | ||
|
|
8d234014a4 | ||
|
|
4dad28d1f6 | ||
|
|
5e939378d0 | ||
|
|
d94522510f | ||
|
|
adc27a0bc2 | ||
|
|
a3c3a72e72 | ||
|
|
4235980511 | ||
|
|
fb20fc7881 | ||
|
|
171e5007c5 | ||
|
|
56a395a297 | ||
|
|
b133bced26 | ||
|
|
14c59b2ff9 | ||
|
|
75bce9b7e1 | ||
|
|
c383b6a30c | ||
|
|
75c0b01932 | ||
|
|
b0e6d7215c | ||
|
|
b6e0d700ec | ||
|
|
0a61c379a5 | ||
|
|
6614816061 | ||
|
|
60c048a0ec | ||
|
|
5ec629af71 | ||
|
|
399f704bf5 | ||
|
|
8f342cc5bb | ||
|
|
8ce8717676 | ||
|
|
90b2a880c6 | ||
|
|
d23ba8e69d | ||
|
|
b52f32d17c | ||
|
|
b1a102fd1d | ||
|
|
da630f349f | ||
|
|
c412dde3bd | ||
|
|
250e911faa | ||
|
|
510184c9cc | ||
|
|
b32c19a004 | ||
|
|
34a163b110 | ||
|
|
876e0ea84d | ||
|
|
25dbc5ea85 | ||
|
|
a4ac23160a | ||
|
|
146b611dc8 | ||
|
|
ca2a55874c | ||
|
|
8e2dcb81ae | ||
|
|
f131585041 | ||
|
|
9a6410ec15 | ||
|
|
314bc513af | ||
|
|
95c528f0bc | ||
|
|
205fe1baa5 | ||
|
|
c425bf2c31 | ||
|
|
538babb7e9 | ||
|
|
f0332e1131 | ||
|
|
1f0aad31d2 | ||
|
|
f49469a820 | ||
|
|
d8fd7e502a | ||
|
|
206222c5bc | ||
|
|
816aac82d5 | ||
|
|
9e5864fc61 | ||
|
|
5d7c33a8dc | ||
|
|
d84ae95309 | ||
|
|
e8f030977a | ||
|
|
14f77449ce | ||
|
|
cbb66dab50 | ||
|
|
2ee4f00b81 | ||
|
|
7bb5ac0dde | ||
|
|
9f2c2f0197 | ||
|
|
323bbe10ed | ||
|
|
3a138b9e77 | ||
|
|
ca0f61bf24 | ||
|
|
701c70ae3b | ||
|
|
7beafa159d | ||
|
|
f188609137 | ||
|
|
aca775e405 | ||
|
|
64d0826469 | ||
|
|
a1020585fd | ||
|
|
53d9fca201 | ||
|
|
f99e2bd7c9 | ||
|
|
0b5b3865ef | ||
|
|
5828278807 | ||
|
|
9ad00ffafb | ||
|
|
6bcb68f862 | ||
|
|
045badc5f3 | ||
|
|
919877f301 | ||
|
|
122166b317 | ||
|
|
8870bcea64 | ||
|
|
06c97256bc | ||
|
|
9d15196bed | ||
|
|
111f225177 | ||
|
|
a31057de83 | ||
|
|
5294caf5e8 | ||
|
|
a852d610e2 | ||
|
|
3b422cb9c6 | ||
|
|
b04bfb87a8 | ||
|
|
0353cc533e | ||
|
|
0c0ec97343 | ||
|
|
4484558d87 | ||
|
|
ce65b11801 | ||
|
|
2e8a06d6d4 | ||
|
|
02ca2290e1 | ||
|
|
15f052c48d | ||
|
|
74df2aa0c3 | ||
|
|
22f4807e0b | ||
|
|
7f475bee00 | ||
|
|
ebb0e398d3 | ||
|
|
f342224410 | ||
|
|
c47598514c | ||
|
|
0d64f7b80e | ||
|
|
b59b3ec09c | ||
|
|
8f01b76366 | ||
|
|
4e25f6d78f | ||
|
|
ce28feea45 | ||
|
|
2e352cf5ff | ||
|
|
e0d3e78746 | ||
|
|
bb542d2032 | ||
|
|
36c9a5665d | ||
|
|
83083b9c65 | ||
|
|
c09a6b87c3 | ||
|
|
b91639a32e | ||
|
|
4b17711702 | ||
|
|
de82956088 | ||
|
|
3ca4d6fd91 | ||
|
|
01de412956 | ||
|
|
8fef74ab25 | ||
|
|
bfbff885fe | ||
|
|
b440891aea | ||
|
|
79e57e8e8e | ||
|
|
acb20e269c | ||
|
|
0f123ae562 | ||
|
|
3cb32c3792 | ||
|
|
8fc3c55199 | ||
|
|
e146bc35c0 | ||
|
|
6195a0bfa1 | ||
|
|
e330fa5733 | ||
|
|
1c4b7329f2 | ||
|
|
121a277726 | ||
|
|
900b550864 | ||
|
|
3f0d1c1d41 | ||
|
|
01bbc552cd | ||
|
|
8f967d26d7 | ||
|
|
a7d336a7d0 | ||
|
|
1c8ac6658e | ||
|
|
0b8cbd06b6 | ||
|
|
b7b62e24ad | ||
|
|
e88bbaf614 | ||
|
|
6ac0d12f5b | ||
|
|
4c1ff2a897 | ||
|
|
5f2e0493eb | ||
|
|
16c5d96e6a | ||
|
|
a26ef9b44f | ||
|
|
b8e550b1e9 | ||
|
|
68210eb733 | ||
|
|
78bf39aff7 | ||
|
|
81f868bd48 | ||
|
|
6ec9d7e6e2 | ||
|
|
eb192049b8 | ||
|
|
65ee6eb3af | ||
|
|
c0627e29a8 | ||
|
|
df65a8d118 | ||
|
|
11ac9585ad | ||
|
|
d0bdbe5a33 | ||
|
|
072bb4be27 | ||
|
|
5b7182cf18 | ||
|
|
1eb29a2394 | ||
|
|
8ea586a3e6 | ||
|
|
27319a8990 | ||
|
|
d2186726a3 | ||
|
|
82efca9b6f | ||
|
|
e31792f999 | ||
|
|
4e886a84f9 | ||
|
|
a3ac250ac1 | ||
|
|
655f5830f4 | ||
|
|
856dde497b | ||
|
|
8d8849feed | ||
|
|
fa0ef76561 | ||
|
|
551ed1d335 | ||
|
|
0ab969a87d | ||
|
|
6bd8ae8f67 | ||
|
|
715b9cd5ba | ||
|
|
212f8d6c3f | ||
|
|
22df351e89 | ||
|
|
06950907c8 | ||
|
|
4c62b0410f | ||
|
|
a938274d57 | ||
|
|
dce9c41094 | ||
|
|
bb2d79be85 | ||
|
|
830a0a4bca | ||
|
|
d153abd992 | ||
|
|
3d63bca127 | ||
|
|
e1dfe73525 | ||
|
|
d860dbbb60 | ||
|
|
32c426ed1b | ||
|
|
b3e29399aa | ||
|
|
2ec8a46cb2 | ||
|
|
ccc7d8d7b7 | ||
|
|
4e041c852b | ||
|
|
cb5c920502 | ||
|
|
9ec54ae432 | ||
|
|
dec694916b | ||
|
|
64f7569894 | ||
|
|
7d2bb5878f | ||
|
|
bccd2f95cc | ||
|
|
d97b691f7d | ||
|
|
d59800210a | ||
|
|
7e819990f6 | ||
|
|
d6b311ab84 | ||
|
|
ea09a918d8 | ||
|
|
b1fcaa03ae | ||
|
|
0beb919fc0 | ||
|
|
953c59a302 | ||
|
|
7eca7ba43b | ||
|
|
f019e54ebb | ||
|
|
474fd20123 | ||
|
|
08ac1c3c35 | ||
|
|
b504d3beb8 | ||
|
|
da551f688d | ||
|
|
57d60d681a | ||
|
|
f70d94675b | ||
|
|
3c3e4a2cb5 | ||
|
|
5f4cdd6095 | ||
|
|
63cf0f07a2 | ||
|
|
68f29967f3 | ||
|
|
f72f7677b3 | ||
|
|
3fe0933936 | ||
|
|
ad82e84503 | ||
|
|
fc907261e9 | ||
|
|
96c32c3865 | ||
|
|
8cbd1097d7 | ||
|
|
956b07f5c1 | ||
|
|
4ebe2fb5b5 | ||
|
|
40fca7bb2c | ||
|
|
ed78f1f406 | ||
|
|
b80d9f4f7f | ||
|
|
c1eeeef853 | ||
|
|
88b715d8f6 | ||
|
|
e452b46873 | ||
|
|
59253afb96 | ||
|
|
94a73e847f | ||
|
|
7d47be84ed | ||
|
|
f58afa62af | ||
|
|
9f2543267e | ||
|
|
590aaaf370 | ||
|
|
17576af752 | ||
|
|
9d0e9fa77d | ||
|
|
41e55a6902 | ||
|
|
930e992e85 | ||
|
|
a2caba9956 | ||
|
|
be514076c1 | ||
|
|
5374350a1c | ||
|
|
d42fab070d | ||
|
|
b06876d698 | ||
|
|
6a930ba175 | ||
|
|
1d71398b7c | ||
|
|
8cfbf0309d | ||
|
|
d5dcc25472 | ||
|
|
1b5b8a57e0 | ||
|
|
c8e8e84c60 | ||
|
|
4bb5160817 | ||
|
|
365580706b | ||
|
|
fd22f73d1f | ||
|
|
db5549f143 | ||
|
|
4a422b13f0 | ||
|
|
acf1ebff2d | ||
|
|
4a4316eb95 | ||
|
|
cc696d90e3 | ||
|
|
dfc64121c6 | ||
|
|
e744d1a07e | ||
|
|
d8e24af4c3 | ||
|
|
ddff0f1c51 |
21
.bazelrc
21
.bazelrc
@@ -1,5 +1,26 @@
|
||||
# Print warnings for tests with inappropriate test size or timeout.
|
||||
test --test_verbose_timeout_warnings
|
||||
|
||||
# Only build test targets when running bazel test //...
|
||||
test --build_tests_only
|
||||
test --test_output=errors
|
||||
|
||||
# Fix for rules_docker. See: https://github.com/bazelbuild/rules_docker/issues/842
|
||||
build --host_force_python=PY2
|
||||
test --host_force_python=PY2
|
||||
run --host_force_python=PY2
|
||||
|
||||
# Networking is blocked for tests by default, add "requires-network" tag to your test if networking
|
||||
# is required within the sandbox. This flag is no longer experimental after 0.29.0.
|
||||
# Network sandboxing only works on linux.
|
||||
--experimental_sandbox_default_allow_network=false
|
||||
|
||||
# Use minimal protobufs at runtime
|
||||
run --define ssz=mainnet
|
||||
test --define ssz=mainnet
|
||||
build --define ssz=mainnet
|
||||
|
||||
# Prevent PATH changes from rebuilding when switching from IDE to command line.
|
||||
build --incompatible_strict_action_env
|
||||
test --incompatible_strict_action_env
|
||||
run --incompatible_strict_action_env
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
# across machines, developers, and workspaces.
|
||||
#
|
||||
# This config is loaded from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/latest.bazelrc
|
||||
build:remote-cache --remote_cache=remotebuildexecution.googleapis.com
|
||||
build:remote-cache --tls_enabled=true
|
||||
build:remote-cache --remote_cache=grpcs://remotebuildexecution.googleapis.com
|
||||
build:remote-cache --remote_timeout=3600
|
||||
build:remote-cache --auth_enabled=true
|
||||
build:remote-cache --spawn_strategy=standalone
|
||||
@@ -11,15 +10,25 @@ build:remote-cache --strategy=Javac=standalone
|
||||
build:remote-cache --strategy=Closure=standalone
|
||||
build:remote-cache --strategy=Genrule=standalone
|
||||
|
||||
# Build results backend.
|
||||
#build:remote-cache --bes_results_url="https://source.cloud.google.com/results/invocations/"
|
||||
#build:remote-cache --bes_backend=buildeventservice.googleapis.com
|
||||
#build:remote-cache --bes_timeout=60s
|
||||
#build:remote-cache --project_id=prysmaticlabs
|
||||
|
||||
# Prysm specific remote-cache properties.
|
||||
build:remote-cache --disk_cache=
|
||||
build:remote-cache --jobs=50
|
||||
build:remote-cache --host_platform_remote_properties_override='properties:{name:\"cache-silo-key\" value:\"prysm\"}'
|
||||
build:remote-cache --remote_instance_name=projects/prysmaticlabs/instances/default_instance
|
||||
|
||||
build:remote-cache --experimental_remote_download_outputs=minimal
|
||||
build:remote-cache --experimental_inmemory_jdeps_files
|
||||
build:remote-cache --experimental_inmemory_dotd_files
|
||||
|
||||
# Import workspace options.
|
||||
import %workspace%/.bazelrc
|
||||
|
||||
startup --host_jvm_args=-Xmx1000m --host_jvm_args=-Xms1000m
|
||||
build --experimental_strict_action_env
|
||||
build --disk_cache=/tmp/bazelbuilds
|
||||
build --experimental_multi_threaded_digest
|
||||
@@ -31,5 +40,8 @@ build --curses=yes --color=yes
|
||||
build --keep_going
|
||||
build --test_output=errors
|
||||
build --flaky_test_attempts=5
|
||||
build --jobs=50
|
||||
build --stamp
|
||||
test --local_test_jobs=2
|
||||
# Disabled race detection due to unstable test results under constrained environment build kite
|
||||
# build --features=race
|
||||
|
||||
2
.dockerignore
Normal file
2
.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
||||
bazel-*
|
||||
.git
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -21,4 +21,8 @@ yarn-error.log
|
||||
.vscode/
|
||||
|
||||
# Ignore password file
|
||||
password.txt
|
||||
password.txt
|
||||
|
||||
# go dependancy
|
||||
/go.mod
|
||||
/go.sum
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"extends": "solium:recommended",
|
||||
"plugins": [
|
||||
"security"
|
||||
],
|
||||
"rules": {
|
||||
"quotes": [
|
||||
"error",
|
||||
"double"
|
||||
],
|
||||
"security/no-inline-assembly": ["warning"],
|
||||
|
||||
"indentation": [
|
||||
"error",
|
||||
4
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@ matrix:
|
||||
- go get ${gobuild_args} -t ./...
|
||||
- go get ${gobuild_args} github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
script:
|
||||
- golangci-lint run
|
||||
- golangci-lint run --skip-dirs ./proto
|
||||
email: false
|
||||
after_success:
|
||||
- wget https://raw.githubusercontent.com/k3rn31p4nic/travis-ci-discord-webhook/master/send.sh
|
||||
|
||||
55
BUILD.bazel
55
BUILD.bazel
@@ -3,10 +3,14 @@ load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
|
||||
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
|
||||
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "nogo")
|
||||
load("@graknlabs_bazel_distribution//common:rules.bzl", "assemble_targz", "assemble_versioned")
|
||||
load("//tools:binary_targets.bzl", "binary_targets", "determine_targets")
|
||||
|
||||
prefix = "github.com/prysmaticlabs/prysm"
|
||||
|
||||
exports_files(["genesis.json"])
|
||||
exports_files([
|
||||
"LICENSE.md",
|
||||
])
|
||||
|
||||
# gazelle:prefix github.com/prysmaticlabs/prysm
|
||||
gazelle(
|
||||
@@ -27,27 +31,21 @@ alias(
|
||||
alias(
|
||||
name = "grpc_proto_compiler",
|
||||
actual = "@io_bazel_rules_go//proto:gogofast_grpc",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Protobuf gRPC compiler without gogoproto. Required for gRPC gateway.
|
||||
alias(
|
||||
name = "grpc_nogogo_proto_compiler",
|
||||
actual = "@io_bazel_rules_go//proto:go_grpc",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Protobuf gRPC gateway compiler
|
||||
alias(
|
||||
name = "grpc_gateway_proto_compiler",
|
||||
actual = "@grpc_ecosystem_grpc_gateway//protoc-gen-grpc-gateway:go_gen_grpc_gateway",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
gometalinter(
|
||||
@@ -108,3 +106,40 @@ nogo(
|
||||
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_tool_library",
|
||||
],
|
||||
)
|
||||
|
||||
assemble_versioned(
|
||||
name = "assemble-versioned-all",
|
||||
tags = ["manual"],
|
||||
targets = [
|
||||
":assemble-{}-{}-targz".format(
|
||||
pair[0],
|
||||
pair[1],
|
||||
)
|
||||
for pair in binary_targets
|
||||
],
|
||||
version_file = "//:VERSION",
|
||||
)
|
||||
|
||||
common_files = {
|
||||
"//:LICENSE.md": "LICENSE.md",
|
||||
"//:README.md": "README.md",
|
||||
}
|
||||
|
||||
[assemble_targz(
|
||||
name = "assemble-{}-{}-targz".format(
|
||||
pair[0],
|
||||
pair[1],
|
||||
),
|
||||
additional_files = determine_targets(pair, common_files),
|
||||
output_filename = "prysm-{}-{}".format(
|
||||
pair[0],
|
||||
pair[1],
|
||||
),
|
||||
tags = ["manual"],
|
||||
) for pair in binary_targets]
|
||||
|
||||
toolchain(
|
||||
name = "built_cmake_toolchain",
|
||||
toolchain = "@rules_foreign_cc//tools/build_defs/native_tools:built_cmake",
|
||||
toolchain_type = "@rules_foreign_cc//tools/build_defs:cmake_toolchain",
|
||||
)
|
||||
|
||||
90
INTEROP.md
Normal file
90
INTEROP.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Prysm Client Interoperability Guide
|
||||
|
||||
This README details how to setup Prysm for interop testing for usage with other Ethereum 2.0 clients.
|
||||
|
||||
## Installation & Setup
|
||||
|
||||
1. Install [Bazel](https://docs.bazel.build/versions/master/install.html) **(Recommended)**
|
||||
2. `git clone https://github.com/prysmaticlabs/prysm && cd prysm`
|
||||
3. `bazel build //...`
|
||||
|
||||
## Starting from Genesis
|
||||
|
||||
Prysm supports a few ways to quickly launch a beacon node from basic configurations:
|
||||
|
||||
- `NumValidators + GenesisTime`: Launches a beacon node by deterministically generating a state from a num-validators flag along with a genesis time **(Recommended)**
|
||||
- `SSZ Genesis`: Launches a beacon node from a .ssz file containing a SSZ-encoded, genesis beacon state
|
||||
|
||||
## Generating a Genesis State
|
||||
|
||||
To setup the necessary files for these quick starts, Prysm provides a tool to generate a `genesis.ssz` from
|
||||
a deterministically generated set of validator private keys following the official interop YAML format
|
||||
[here](https://github.com/ethereum/eth2.0-pm/blob/master/interop/mocked_start).
|
||||
|
||||
You can use `bazel run //tools/genesis-state-gen` to create a deterministic genesis state for interop.
|
||||
|
||||
### Usage
|
||||
|
||||
- **--genesis-time** uint: Unix timestamp used as the genesis time in the generated genesis state (defaults to now)
|
||||
- **--mainnet-config** bool: Select whether genesis state should be generated with mainnet or minimal (default) params
|
||||
- **--num-validators** int: Number of validators to deterministically include in the generated genesis state
|
||||
- **--output-ssz** string: Output filename of the SSZ marshaling of the generated genesis state
|
||||
|
||||
The example below creates 64 validator keys, instantiates a genesis state with those 64 validators and with genesis unix timestamp 1567542540,
|
||||
and finally writes a ssz encoded output to ~/Desktop/genesis.ssz. This file can be used to kickstart the beacon chain in the next section.
|
||||
|
||||
```
|
||||
bazel run //tools/genesis-state-gen -- --output-ssz ~/Desktop/genesis.ssz --num-validators 64 --genesis-time 1567542540
|
||||
```
|
||||
|
||||
## Launching a Beacon Node + Validator Client
|
||||
|
||||
### Launching from Pure CLI Flags
|
||||
|
||||
Open up two terminal windows, run:
|
||||
|
||||
```
|
||||
bazel run //beacon-chain -- \
|
||||
--bootstrap-node= \
|
||||
--deposit-contract $(curl -s https://prylabs.net/contract) \
|
||||
--force-clear-db \
|
||||
--interop-num-validators 64 \
|
||||
--interop-eth1data-votes
|
||||
```
|
||||
|
||||
This will deterministically generate a beacon genesis state and start
|
||||
the system with 64 validators and the genesis time set to the current unix timestamp.
|
||||
Wait a bit until your beacon chain starts, and in the other window:
|
||||
|
||||
```
|
||||
bazel run //validator -- --interop-num-validators 64
|
||||
```
|
||||
|
||||
This will launch and kickstart the system with your 64 validators performing their duties accordingly.
|
||||
|
||||
### Launching from `genesis.ssz`
|
||||
|
||||
Assuming you generated a `genesis.ssz` file with 64 validators, open up two terminal windows, run:
|
||||
|
||||
```
|
||||
bazel run //beacon-chain -- \
|
||||
--bootstrap-node= \
|
||||
--deposit-contract $(curl -s https://prylabs.net/contract) \
|
||||
--force-clear-db \
|
||||
--interop-genesis-state /path/to/genesis.ssz \
|
||||
--interop-eth1data-votes
|
||||
```
|
||||
|
||||
Wait a bit until your beacon chain starts, and in the other window:
|
||||
|
||||
```
|
||||
bazel run //validator -- --interop-num-validators 64
|
||||
```
|
||||
|
||||
This will launch and kickstart the system with your 64 validators performing their duties accordingly.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
218
README.md
218
README.md
@@ -1,144 +1,236 @@
|
||||
# Prysm: Ethereum 'Serenity' 2.0 Go Implementation
|
||||
# Prysm: An Ethereum 2.0 Client Written in Go
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://github.com/ethereum/eth2.0-specs/tree/v0.9.3)
|
||||
[](https://discord.gg/KSA7rPr)
|
||||
[](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
|
||||
This is the Core repository for Prysm, [Prysmatic Labs](https://prysmaticlabs.com)' [Go](https://golang.org/) implementation of the Ethereum protocol 2.0 (Serenity).
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the Ethereum 2.0 client specifications developed by [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
|
||||
### Need assistance?
|
||||
A more detailed set of installation and usage instructions as well as explanations of each component are available on our [official documentation portal](https://prysmaticlabs.gitbook.io/prysm/). If you still have questions, feel free to stop by either our [Discord](https://discord.gg/KSA7rPr) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) and a member of the team or our community will be happy to assist you.
|
||||
|
||||
**Interested in what's next?** Be sure to read our [Roadmap Reference Implementation](https://github.com/prysmaticlabs/prysm/blob/master/docs/ROADMAP.md) document. This page outlines the basics of sharding as well as the various short-term milestones that we hope to achieve over the coming year.
|
||||
A more detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://prysmaticlabs.gitbook.io/prysm/). If you still have questions, feel free to stop by either our [Discord](https://discord.gg/KSA7rPr) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) and a member of the team or our community will be happy to assist you.
|
||||
|
||||
### Come join the testnet!
|
||||
Participation is now open to the public in our testnet release for Ethereum 2.0 phase 0. Visit [prylabs.net](https://prylabs.net) for more information on the project itself or to sign up as a validator on the network.
|
||||
Participation is now open to the public for our Ethereum 2.0 phase 0 testnet release. Visit [prylabs.net](https://prylabs.net) for more information on the project or to sign up as a validator on the network.
|
||||
|
||||
# Table of Contents
|
||||
|
||||
- [Dependencies](#dependencies)
|
||||
- [Installation](#installation)
|
||||
- [Build Via Docker](#build-via-docker)
|
||||
- [Build Via Bazel](#build-via-bazel)
|
||||
- [Running an Ethereum 2.0 Beacon Node](#running-an-ethereum-20-beacon-node)
|
||||
- [Staking ETH: Running a Validator Client](#staking-eth-running-a-validator-client)
|
||||
- [Installation](#installing-prysm)
|
||||
- [Build via Docker](#build-via-docker)
|
||||
- [Build via Bazel](#build-via-bazel)
|
||||
- [Connecting to the public testnet: running a beacon node](#connecting-to-the-testnet-running-a-beacon-node)
|
||||
- [Running via Docker](#running-via-docker)
|
||||
- [Running via Bazel](#running-via-bazel)
|
||||
- [Staking ETH: running a validator client](#staking-eth-running-a-validator-client)
|
||||
- [Activating your validator: depositing 3.2 Goerli ETH](#activating-your-validator-depositing-32-göerli-eth)
|
||||
- [Starting the validator with Bazel](#starting-the-validator-with-bazel)
|
||||
- [Setting up a local ETH2 development chain](#setting-up-a-local-eth2-development-chain)
|
||||
- [Installation and dependencies](#installation-and-dependencies)
|
||||
- [Running a local beacon node and validator client](#running-a-local-beacon-node-and-validator-client)
|
||||
- [Testing Prysm](#testing-prysm)
|
||||
- [Contributing](#contributing)
|
||||
- [License](#license)
|
||||
|
||||
## Dependencies
|
||||
Prysm can be installed either with Docker **(recommended method)** or using our build tool, Bazel. The below instructions include sections for performing both.
|
||||
|
||||
**For Docker installations:**
|
||||
- The latest release of [Docker](https://docs.docker.com/install/)
|
||||
Prysm can be installed either with Docker **\(recommended\)** or using our build tool, Bazel. The below instructions include sections for performing both.
|
||||
|
||||
**For Bazel installations:**
|
||||
- The latest release of [Bazel](https://docs.bazel.build/versions/master/install.html)
|
||||
- A modern GNU/Linux operating system
|
||||
#### **For Docker installations:**
|
||||
|
||||
## Installation
|
||||
* The latest release of [Docker](https://docs.docker.com/install/)
|
||||
|
||||
#### **For Bazel installations:**
|
||||
|
||||
* The latest release of [Bazel](https://docs.bazel.build/versions/master/install.html)
|
||||
* The latest release of `cmake`
|
||||
* The latest release of `git`
|
||||
* A modern UNIX operating system \(macOS included\)
|
||||
|
||||
## Installing Prysm
|
||||
|
||||
### Build via Docker
|
||||
|
||||
1. Ensure you are running the most recent version of Docker by issuing the command:
|
||||
```
|
||||
|
||||
```text
|
||||
docker -v
|
||||
```
|
||||
2. To pull the Prysm images from the server, issue the following commands:
|
||||
```
|
||||
|
||||
2. To pull the Prysm images, issue the following commands:
|
||||
|
||||
```text
|
||||
docker pull gcr.io/prysmaticlabs/prysm/validator:latest
|
||||
docker pull gcr.io/prysmaticlabs/prysm/beacon-chain:latest
|
||||
```
|
||||
|
||||
This process will also install any related dependencies.
|
||||
|
||||
### Build via Bazel
|
||||
|
||||
1. Open a terminal window. Ensure you are running the most recent version of Bazel by issuing the command:
|
||||
```
|
||||
|
||||
```text
|
||||
bazel version
|
||||
```
|
||||
2. Clone this repository and enter the directory:
|
||||
```
|
||||
|
||||
2. Clone Prysm's [main repository](https://github.com/prysmaticlabs/prysm) and enter the directory:
|
||||
|
||||
```text
|
||||
git clone https://github.com/prysmaticlabs/prysm
|
||||
cd prysm
|
||||
```
|
||||
3. Build both the beacon chain node implementation and the validator client:
|
||||
```
|
||||
|
||||
3. Build both the beacon chain node and the validator client:
|
||||
|
||||
```text
|
||||
bazel build //beacon-chain:beacon-chain
|
||||
bazel build //validator:validator
|
||||
```
|
||||
|
||||
Bazel will automatically pull and install any dependencies as well, including Go and necessary compilers.
|
||||
|
||||
## Running an Ethereum 2.0 Beacon Node
|
||||
To understand the role that both the beacon node and validator play in Prysm, see [this section of our documentation](https://prysmaticlabs.gitbook.io/prysm/how-prysm-works/basic-architecture-overview).
|
||||
## Connecting to the testnet: running a beacon node
|
||||
|
||||
Below are instructions for initialising a beacon node and connecting to the public testnet. To further understand the role that the beacon node plays in Prysm, see [this section of the documentation.](https://prysmaticlabs.gitbook.io/prysm/how-prysm-works/overview-technical)
|
||||
|
||||
|
||||
**NOTE:** It is recommended to open up port 13000 on your local router to improve connectivity and receive more peers from the network. To do so, navigate to `192.168.0.1` in your browser and login if required. Follow along with the interface to modify your routers firewall settings. When this task is completed, append the parameter`--p2p-host-ip=$(curl -s ident.me)` to your selected beacon startup command presented in this section to use the newly opened port.
|
||||
|
||||
### Running via Docker
|
||||
|
||||
**Docker on Linux/Mac:**
|
||||
1. To start your beacon node, issue the following command:
|
||||
```
|
||||
docker run -v /tmp/prysm-data:/data -p 4000:4000 \
|
||||
#### **Docker on Linux/macOS:**
|
||||
|
||||
To start your beacon node, issue the following command:
|
||||
|
||||
```text
|
||||
docker run -it -v $HOME/prysm:/data -p 4000:4000 --name beacon-node \
|
||||
gcr.io/prysmaticlabs/prysm/beacon-chain:latest \
|
||||
--datadir=/data
|
||||
--datadir=/data \
|
||||
--init-sync-no-verify
|
||||
```
|
||||
|
||||
The beacon node can be halted by either using `Ctrl+c` or with the command:
|
||||
|
||||
```text
|
||||
docker stop beacon-node
|
||||
```
|
||||
|
||||
To restart the beacon node, issue the following command:
|
||||
|
||||
```text
|
||||
docker start -ai beacon-node
|
||||
```
|
||||
|
||||
To delete a corrupted container, issue the following command:
|
||||
|
||||
```text
|
||||
docker rm beacon-node
|
||||
```
|
||||
|
||||
To recreate a deleted container and refresh the chain database, issue the start command with an additional `--clear-db` parameter:
|
||||
|
||||
```text
|
||||
docker run -it -v $HOME/prysm:/data -p 4000:4000 --name beacon-node \
|
||||
gcr.io/prysmaticlabs/prysm/beacon-chain:latest \
|
||||
--datadir=/data \
|
||||
--clear-db
|
||||
```
|
||||
**Docker on Windows:**
|
||||
|
||||
1) You will need to share the local drive you wish to mount to to container (e.g. C:).
|
||||
1. Enter Docker settings (right click the tray icon)
|
||||
2. Click 'Shared Drives'
|
||||
3. Select a drive to share
|
||||
4. Click 'Apply'
|
||||
#### **Docker on Windows:**
|
||||
|
||||
2) You will next need to create a directory named ```/tmp/prysm-data/``` within your selected shared Drive. This folder will be used as a local data directory for Beacon Node chain data as well as account and keystore information required by the validator. Docker will **not** create this directory if it does not exist already. For the purposes of these instructions, it is assumed that ```C:``` is your prior-selected shared Drive.
|
||||
1. You will need to 'share' the local drive you wish to mount to \(e.g. C:\).
|
||||
1. Enter Docker settings \(right click the tray icon\)
|
||||
2. Click 'Shared Drives'
|
||||
3. Select a drive to share
|
||||
4. Click 'Apply'
|
||||
|
||||
2. You will next need to create a directory named `/prysm/` within your selected shared Drive. This folder will be used as a local data directory for Beacon Node chain data as well as account and keystore information required by the validator. Docker will **not** create this directory if it does not exist already. For the purposes of these instructions, it is assumed that `C:` is your prior-selected shared Drive.
|
||||
3. To run the beacon node, issue the following command:
|
||||
|
||||
4) To run the beacon node, issue the following command:
|
||||
```
|
||||
docker run -it -v c:/tmp/prysm-data:/data -p 4000:4000 gcr.io/prysmaticlabs/prysm/beacon-chain:latest --datadir=/data --clear-db
|
||||
```text
|
||||
docker run -it -v c:/prysm/:/data -p 4000:4000 gcr.io/prysmaticlabs/prysm/beacon-chain:latest --datadir=/data --init-sync-no-verify --clear-db
|
||||
```
|
||||
|
||||
### Running via Bazel
|
||||
|
||||
1) To start your Beacon Node with Bazel, issue the following command:
|
||||
```
|
||||
bazel run //beacon-chain -- --clear-db --datadir=/tmp/prysm-data
|
||||
To start your Beacon Node with Bazel, issue the following command:
|
||||
|
||||
```text
|
||||
bazel run //beacon-chain -- --clear-db --datadir=$HOME/prysm
|
||||
```
|
||||
|
||||
This will sync up the Beacon Node with the latest head block in the network.
|
||||
This will sync up the beacon node with the latest head block in the network.
|
||||
|
||||
|
||||
## Staking ETH: Running a Validator Client
|
||||
**NOTE:** The beacon node must be **completely synced** before attempting to initialise a validator client, otherwise the validator will not be able to complete the deposit and **funds will lost**.
|
||||
|
||||
Once your beacon node is up, the chain will be waiting for you to deposit 3.2 Goerli ETH into the Validator Deposit Contract to activate your validator (discussed in the section below). First though, you will need to create a *validator client* to connect to this node in order to stake and participate. Each validator represents 3.2 Goerli ETH being staked in the system, and it is possible to spin up as many as you desire in order to have more stake in the network.
|
||||
|
||||
### Activating Your Validator: Depositing 3.2 Goerli ETH
|
||||
## Staking ETH: Running a validator client
|
||||
|
||||
Using your validator deposit data from the previous step, follow the instructions found on https://alpha.prylabs.net/participate to make a deposit.
|
||||
Once your beacon node is up, the chain will be waiting for you to deposit 3.2 Goerli ETH into a [validator deposit contract](how-prysm-works/validator-deposit-contract.md) in order to activate your validator \(discussed in the section below\). First though, you will need to create this validator and connect to this node to participate in consensus.
|
||||
|
||||
It will take a while for the nodes in the network to process your deposit, but once your node is active, the validator will begin doing its responsibility. In your validator client, you will be able to frequently see your validator balance as it goes up over time. Note that, should your node ever go offline for a long period, you'll start gradually losing your deposit until you are removed from the system.
|
||||
Each validator represents 3.2 Goerli ETH being staked in the system, and it is possible to spin up as many as you desire in order to have more stake in the network.
|
||||
|
||||
### Starting the validator with Bazel
|
||||
### Activating your validator: depositing 3.2 Göerli ETH
|
||||
|
||||
To begin setting up a validator, follow the instructions found on [prylabs.net](https://prylabs.net) to use the Göerli ETH faucet and make a deposit. For step-by-step assistance with the deposit page, see the [Activating a Validator ](activating-a-validator.md)section of this documentation.
|
||||
|
||||
It will take a while for the nodes in the network to process a deposit. Once the node is active, the validator will immediately begin performing its responsibilities.
|
||||
|
||||
In your validator client, you will be able to frequently see your validator balance as it goes up over time. Note that, should your node ever go offline for a long period, a validator will start gradually losing its deposit until it is removed from the network entirely.
|
||||
|
||||
1. Open another terminal window. Enter your Prysm directory and run the validator by issuing the following command:
|
||||
```
|
||||
cd prysm
|
||||
bazel run //validator
|
||||
```
|
||||
**Congratulations, you are now running Ethereum 2.0 Phase 0!**
|
||||
|
||||
## Setting up a local ETH2 development chain
|
||||
|
||||
This section outlines the process of setting up Prysm for local testing with other Ethereum 2.0 client implementations. See the [INTEROP.md](https://github.com/prysmaticlabs/prysm/blob/master/INTEROP.md) file for advanced configuration options. For more background information on interoperability development, see [this blog post](https://blog.ethereum.org/2019/09/19/eth2-interop-in-review/).
|
||||
|
||||
### Installation and dependencies
|
||||
|
||||
To begin setting up a local ETH2 development chain, follow the **Bazel** instructions found in the [dependencies](https://github.com/prysmaticlabs/prysm#dependencies) and [installation](https://github.com/prysmaticlabs/prysm#installation) sections respectively.
|
||||
|
||||
### Running a local beacon node and validator client
|
||||
|
||||
The example below will generate a beacon genesis state and initiate Prysm with 64 validators with the genesis time set to your machines UNIX time.
|
||||
|
||||
Open up two terminal windows. In the first, issue the command:
|
||||
|
||||
```text
|
||||
bazel run //beacon-chain -- \
|
||||
--no-genesis-delay \
|
||||
--bootstrap-node= \
|
||||
--deposit-contract $(curl https://prylabs.net/contract) \
|
||||
--clear-db \
|
||||
--interop-num-validators 64 \
|
||||
--interop-eth1data-votes
|
||||
```
|
||||
|
||||
Wait a moment for the beacon chain to start. In the other terminal, issue the command:
|
||||
|
||||
```text
|
||||
bazel run //validator -- --interop-num-validators 64
|
||||
```
|
||||
|
||||
This command will kickstart the system with your 64 validators performing their duties accordingly.
|
||||
|
||||
## Testing Prysm
|
||||
|
||||
**To run the unit tests of our system**, issue the command:
|
||||
```
|
||||
To run the unit tests of our system, issue the command:
|
||||
|
||||
```text
|
||||
bazel test //...
|
||||
```
|
||||
|
||||
**To run our linter**, make sure you have [golangci-lint](https://https://github.com/golangci/golangci-lint) installed and then issue the command:
|
||||
```
|
||||
To run our linter, make sure you have [golangci-lint](https://github.com/golangci/golangci-lint) installed and then issue the command:
|
||||
|
||||
```text
|
||||
golangci-lint run
|
||||
```
|
||||
|
||||
|
||||
## Contributing
|
||||
We have put all of our contribution guidelines into [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/master/CONTRIBUTING.md)! Check it out to get started.
|
||||
Want to get involved? Check out our [Contribution Guide](https://prysmaticlabs.gitbook.io/prysm/getting-involved/contribution-guidelines) to learn more!
|
||||
|
||||
## License
|
||||
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||
|
||||
43
TESTNET.md
Normal file
43
TESTNET.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Testnet
|
||||
|
||||
The Prysmatic Labs test network is available for anyone to join. The easiest way to participate is by joining through the website, https://prylabs.net.
|
||||
|
||||
## Interop
|
||||
|
||||
For developers looking to connect a client other than Prysm to the test network, here is the relevant information for compatability.
|
||||
|
||||
|
||||
**Spec version** - [v0.8.3](https://github.com/ethereum/eth2.0-specs/tree/v0.8.3)
|
||||
|
||||
**ETH 1 Deposit Contract Address** - See https://prylabs.net/contract. This contract is deployed on the [goerli](https://goerli.net/) network.
|
||||
|
||||
**Genesis time** - The ETH1 block time in which the 64th deposit to start ETH2 was included. This is NOT midnight of the next day as required by spec.
|
||||
|
||||
### ETH 2 Configuration
|
||||
|
||||
Use the [minimal config](https://github.com/ethereum/eth2.0-specs/blob/v0.8.3/configs/minimal.yaml) with the following changes.
|
||||
|
||||
| field | value |
|
||||
|-------|-------|
|
||||
| MIN_DEPOSIT_AMOUNT | 100 |
|
||||
| MAX_EFFECTIVE_BALANCE | 3.2 * 1e9 |
|
||||
| EJECTION_BALANCE | 1.6 * 1e9 |
|
||||
| EFFECTIVE_BALANCE_INCREMENT | 0.1 * 1e9 |
|
||||
| ETH1_FOLLOW_DISTANCE | 16 |
|
||||
| GENESIS_FORK_VERSION | See [latest code](https://github.com/prysmaticlabs/prysm/blob/master/shared/params/config.go#L236) |
|
||||
|
||||
These parameters reduce the minimal config to 1/10 of the required ETH.
|
||||
|
||||
We have a genesis.ssz file available for download [here](https://prysmaticlabs.com/uploads/genesis.ssz)
|
||||
|
||||
### Connecting to the network
|
||||
|
||||
We have a libp2p bootstrap node available at `/dns4/prylabs.net/tcp/30001/p2p/16Uiu2HAm7Qwe19vz9WzD2Mxn7fXd1vgHHp4iccuyq7TxwRXoAGfc`.
|
||||
|
||||
Some of the Prysmatic Labs hosted nodes are behind a libp2p relay, so your libp2p implementation protocol should understand this functionality.
|
||||
|
||||
### Other
|
||||
|
||||
Undoubtably, you will have bugs. Reach out to us on [Discord](https://discord.gg/KSA7rPr) and be sure to capture issues on Github at https://github.com/prysmaticlabs/prysm/issues.
|
||||
|
||||
If you have instructions for you client, we would love to attempt this on your behalf. Kindly send over the instructions via github issue, PR, email to team@prysmaticlabs.com, or discord.
|
||||
@@ -1,6 +1,8 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
|
||||
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_push")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle")
|
||||
load("//tools:binary_targets.bzl", "binary_targets")
|
||||
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@@ -8,22 +10,21 @@ go_library(
|
||||
"main.go",
|
||||
"usage.go",
|
||||
],
|
||||
data = [
|
||||
"//proto/beacon/rpc/v1:swagger",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/node:go_default_library",
|
||||
"//beacon-chain/utils:go_default_library",
|
||||
"//shared/cmd:go_default_library",
|
||||
"//shared/debug:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/logutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ipfs_go_log//:go_default_library",
|
||||
"@com_github_joonix_log//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli//:go_default_library",
|
||||
"@com_github_whyrusleeping_go_logging//:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
"@org_uber_go_automaxprocs//:go_default_library",
|
||||
],
|
||||
@@ -35,6 +36,7 @@ go_image(
|
||||
"main.go",
|
||||
"usage.go",
|
||||
],
|
||||
base = "//tools:cc_image",
|
||||
goarch = "amd64",
|
||||
goos = "linux",
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain",
|
||||
@@ -43,36 +45,45 @@ go_image(
|
||||
tags = ["manual"],
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/node:go_default_library",
|
||||
"//beacon-chain/utils:go_default_library",
|
||||
"//shared/cmd:go_default_library",
|
||||
"//shared/debug:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/logutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ipfs_go_log//:go_default_library",
|
||||
"@com_github_joonix_log//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli//:go_default_library",
|
||||
"@com_github_whyrusleeping_go_logging//:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
"@org_uber_go_automaxprocs//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
container_push(
|
||||
name = "push_image",
|
||||
format = "Docker",
|
||||
image = ":image",
|
||||
registry = "gcr.io",
|
||||
repository = "prysmaticlabs/prysm/beacon-chain",
|
||||
tag = "latest",
|
||||
container_bundle(
|
||||
name = "image_bundle",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest": ":image",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}": ":image",
|
||||
},
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images",
|
||||
bundle = ":image_bundle",
|
||||
tags = ["manual"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "beacon-chain",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//endtoend:__pkg__",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
@@ -82,3 +93,15 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = ["@com_github_urfave_cli//:go_default_library"],
|
||||
)
|
||||
|
||||
[go_binary(
|
||||
name = "beacon-chain-{}-{}".format(
|
||||
pair[0],
|
||||
pair[1],
|
||||
),
|
||||
embed = [":go_default_library"],
|
||||
goarch = pair[1],
|
||||
goos = pair[0],
|
||||
tags = ["manual"],
|
||||
visibility = ["//visibility:public"],
|
||||
) for pair in binary_targets]
|
||||
|
||||
44
beacon-chain/archiver/BUILD.bazel
Normal file
44
beacon-chain/archiver/BUILD.bazel
Normal file
@@ -0,0 +1,44 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["service.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/archiver",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["service_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
194
beacon-chain/archiver/service.go
Normal file
194
beacon-chain/archiver/service.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "archiver")
|
||||
|
||||
// Service defining archiver functionality for persisting checkpointed
|
||||
// beacon chain information to a database backend for historical purposes.
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.Database
|
||||
headFetcher blockchain.HeadFetcher
|
||||
participationFetcher blockchain.ParticipationFetcher
|
||||
stateNotifier statefeed.Notifier
|
||||
lastArchivedEpoch uint64
|
||||
}
|
||||
|
||||
// Config options for the archiver service.
|
||||
type Config struct {
|
||||
BeaconDB db.Database
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ParticipationFetcher blockchain.ParticipationFetcher
|
||||
StateNotifier statefeed.Notifier
|
||||
}
|
||||
|
||||
// NewArchiverService initializes the service from configuration options.
|
||||
func NewArchiverService(ctx context.Context, cfg *Config) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
headFetcher: cfg.HeadFetcher,
|
||||
participationFetcher: cfg.ParticipationFetcher,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
}
|
||||
}
|
||||
|
||||
// Start the archiver service event loop.
|
||||
func (s *Service) Start() {
|
||||
go s.run(s.ctx)
|
||||
}
|
||||
|
||||
// Stop the archiver service event loop.
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status reports the healthy status of the archiver. Returning nil means service
|
||||
// is correctly running without error.
|
||||
func (s *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We archive committee information pertaining to the head state's epoch.
|
||||
func (s *Service) archiveCommitteeInfo(ctx context.Context, headState *pb.BeaconState, epoch uint64) error {
|
||||
proposerSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
attesterSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
|
||||
info := &pb.ArchivedCommitteeInfo{
|
||||
ProposerSeed: proposerSeed[:],
|
||||
AttesterSeed: attesterSeed[:],
|
||||
}
|
||||
if err := s.beaconDB.SaveArchivedCommitteeInfo(ctx, epoch, info); err != nil {
|
||||
return errors.Wrap(err, "could not archive committee info")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// We archive active validator set changes that happened during the previous epoch.
|
||||
func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *pb.BeaconState, epoch uint64) error {
|
||||
prevEpoch := epoch - 1
|
||||
activations := validators.ActivatedValidatorIndices(prevEpoch, headState.Validators)
|
||||
slashings := validators.SlashedValidatorIndices(prevEpoch, headState.Validators)
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, prevEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
exited, err := validators.ExitedValidatorIndices(prevEpoch, headState.Validators, activeValidatorCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine exited validator indices")
|
||||
}
|
||||
activeSetChanges := &pb.ArchivedActiveSetChanges{
|
||||
Activated: activations,
|
||||
Exited: exited,
|
||||
Slashed: slashings,
|
||||
}
|
||||
if err := s.beaconDB.SaveArchivedActiveValidatorChanges(ctx, prevEpoch, activeSetChanges); err != nil {
|
||||
return errors.Wrap(err, "could not archive active validator set changes")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// We compute participation metrics by first retrieving the head state and
|
||||
// matching validator attestations during the epoch.
|
||||
func (s *Service) archiveParticipation(ctx context.Context, epoch uint64) error {
|
||||
p := s.participationFetcher.Participation(epoch)
|
||||
participation := ðpb.ValidatorParticipation{}
|
||||
if p != nil {
|
||||
participation = ðpb.ValidatorParticipation{
|
||||
EligibleEther: p.PrevEpoch,
|
||||
VotedEther: p.PrevEpochTargetAttesters,
|
||||
GlobalParticipationRate: float32(p.PrevEpochTargetAttesters) / float32(p.PrevEpoch),
|
||||
}
|
||||
}
|
||||
return s.beaconDB.SaveArchivedValidatorParticipation(ctx, epoch, participation)
|
||||
}
|
||||
|
||||
// We archive validator balances and active indices.
|
||||
func (s *Service) archiveBalances(ctx context.Context, headState *pb.BeaconState, epoch uint64) error {
|
||||
balances := headState.Balances
|
||||
if err := s.beaconDB.SaveArchivedBalances(ctx, epoch, balances); err != nil {
|
||||
return errors.Wrap(err, "could not archive balances")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) run(ctx context.Context) {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.BlockProcessed {
|
||||
data := event.Data.(*statefeed.BlockProcessedData)
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", data.BlockRoot)).Debug("Received block processed event")
|
||||
headState, err := s.headFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Head state is not available")
|
||||
continue
|
||||
}
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
if !helpers.IsEpochEnd(headState.Slot) && currentEpoch <= s.lastArchivedEpoch {
|
||||
continue
|
||||
}
|
||||
epochToArchive := currentEpoch
|
||||
if !helpers.IsEpochEnd(headState.Slot) {
|
||||
epochToArchive--
|
||||
}
|
||||
if err := s.archiveCommitteeInfo(ctx, headState, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive committee info")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveActiveSetChanges(ctx, headState, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive active validator set changes")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveParticipation(ctx, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator participation")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveBalances(ctx, headState, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator balances and active indices")
|
||||
continue
|
||||
}
|
||||
log.WithField(
|
||||
"epoch",
|
||||
epochToArchive,
|
||||
).Debug("Successfully archived beacon chain data during epoch")
|
||||
s.lastArchivedEpoch = epochToArchive
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state feed notifier failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
416
beacon-chain/archiver/service_test.go
Normal file
416
beacon-chain/archiver/service_test.go
Normal file
@@ -0,0 +1,416 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
}
|
||||
|
||||
func TestArchiverService_ReceivesBlockProcessedEvent(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: &pb.BeaconState{Slot: 1},
|
||||
}
|
||||
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
testutil.AssertLogsContain(t, hook, fmt.Sprintf("%#x", event.Data.(*statefeed.BlockProcessedData).BlockRoot))
|
||||
testutil.AssertLogsContain(t, hook, "Received block processed event")
|
||||
}
|
||||
|
||||
func TestArchiverService_OnlyArchiveAtEpochEnd(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
// The head state is NOT an epoch end.
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: &pb.BeaconState{Slot: params.BeaconConfig().SlotsPerEpoch - 2},
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
// The context should have been canceled.
|
||||
if svc.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Received block processed event")
|
||||
// The service should ONLY log any archival logs if we receive a
|
||||
// head slot that is an epoch end.
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_ArchivesEvenThroughSkipSlot(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
svc, beaconDB := setupService(t)
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
svc.run(svc.ctx)
|
||||
<-exitRoutine
|
||||
}()
|
||||
|
||||
// Send out an event every slot, skipping the end slot of the epoch.
|
||||
for i := uint64(0); i < params.BeaconConfig().SlotsPerEpoch+1; i++ {
|
||||
headState.Slot = i
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
if helpers.IsEpochEnd(i) {
|
||||
continue
|
||||
}
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = svc.stateNotifier.StateFeed().Send(event)
|
||||
}
|
||||
}
|
||||
if err := svc.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exitRoutine <- true
|
||||
|
||||
// The context should have been canceled.
|
||||
if svc.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Received block processed event")
|
||||
// Even though there was a skip slot, we should still be able to archive
|
||||
// upon the next block event afterwards.
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
attestedBalance := uint64(1)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
wanted := ðpb.ValidatorParticipation{
|
||||
VotedEther: attestedBalance,
|
||||
EligibleEther: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
|
||||
GlobalParticipationRate: float32(attestedBalance) / float32(validatorCount*params.BeaconConfig().MaxEffectiveBalance),
|
||||
}
|
||||
|
||||
retrieved, err := svc.beaconDB.ArchivedValidatorParticipation(svc.ctx, currentEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !proto.Equal(wanted, retrieved) {
|
||||
t.Errorf("Wanted participation for epoch %d %v, retrieved %v", currentEpoch-1, wanted, retrieved)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesIndicesAndBalances(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
retrieved, err := svc.beaconDB.ArchivedBalances(svc.ctx, helpers.CurrentEpoch(headState))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(headState.Balances, retrieved) {
|
||||
t.Errorf(
|
||||
"Wanted balances for epoch %d %v, retrieved %v",
|
||||
helpers.CurrentEpoch(headState),
|
||||
headState.Balances,
|
||||
retrieved,
|
||||
)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesCommitteeInfo(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
proposerSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
attesterSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted := &pb.ArchivedCommitteeInfo{
|
||||
ProposerSeed: proposerSeed[:],
|
||||
AttesterSeed: attesterSeed[:],
|
||||
}
|
||||
|
||||
retrieved, err := svc.beaconDB.ArchivedCommitteeInfo(svc.ctx, helpers.CurrentEpoch(headState))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !proto.Equal(wanted, retrieved) {
|
||||
t.Errorf(
|
||||
"Wanted committee info for epoch %d %v, retrieved %v",
|
||||
helpers.CurrentEpoch(headState),
|
||||
wanted,
|
||||
retrieved,
|
||||
)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesActivatedValidatorChanges(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
prevEpoch := helpers.PrevEpoch(headState)
|
||||
delayedActEpoch := helpers.DelayedActivationExitEpoch(prevEpoch)
|
||||
headState.Validators[4].ActivationEpoch = delayedActEpoch
|
||||
headState.Validators[5].ActivationEpoch = delayedActEpoch
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Fatal("Retrieved indices are nil")
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Activated, []uint64{4, 5}) {
|
||||
t.Errorf("Wanted indices 4 5 activated, received %v", retrieved.Activated)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesSlashedValidatorChanges(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
prevEpoch := helpers.PrevEpoch(headState)
|
||||
headState.Validators[95].Slashed = true
|
||||
headState.Validators[96].Slashed = true
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Fatal("Retrieved indices are nil")
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Slashed, []uint64{95, 96}) {
|
||||
t.Errorf("Wanted indices 95, 96 slashed, received %v", retrieved.Slashed)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_SavesExitedValidatorChanges(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
prevEpoch := helpers.PrevEpoch(headState)
|
||||
headState.Validators[95].ExitEpoch = prevEpoch
|
||||
headState.Validators[95].WithdrawableEpoch = prevEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Fatal("Retrieved indices are nil")
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Exited, []uint64{95}) {
|
||||
t.Errorf("Wanted indices 95 exited, received %v", retrieved.Exited)
|
||||
}
|
||||
}
|
||||
|
||||
func setupState(t *testing.T, validatorCount uint64) *pb.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
balances := make([]uint64, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
atts := []*pb.PendingAttestation{{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{}}}}
|
||||
|
||||
// We initialize a head state that has attestations from participated
|
||||
// validators in a simulated fashion.
|
||||
return &pb.BeaconState{
|
||||
Slot: (2 * params.BeaconConfig().SlotsPerEpoch) - 1,
|
||||
Validators: validators,
|
||||
Balances: balances,
|
||||
BlockRoots: make([][]byte, 128),
|
||||
Slashings: []uint64{0, 1e9, 1e9},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CurrentEpochAttestations: atts,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
JustificationBits: bitfield.Bitvector4{0x00},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{},
|
||||
}
|
||||
}
|
||||
|
||||
func setupService(t *testing.T) (*Service, db.Database) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
validatorCount := uint64(100)
|
||||
totalBalance := validatorCount * params.BeaconConfig().MaxEffectiveBalance
|
||||
mockChainService := &mock.ChainService{}
|
||||
return &Service{
|
||||
beaconDB: beaconDB,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
stateNotifier: mockChainService.StateNotifier(),
|
||||
participationFetcher: &mock.ChainService{
|
||||
Balance: &precompute.Balance{PrevEpoch: totalBalance, PrevEpochTargetAttesters: 1}},
|
||||
}, beaconDB
|
||||
}
|
||||
|
||||
func triggerStateEvent(t *testing.T, svc *Service, event *feed.Event) {
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
svc.run(svc.ctx)
|
||||
<-exitRoutine
|
||||
}()
|
||||
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = svc.stateNotifier.StateFeed().Send(event)
|
||||
}
|
||||
if err := svc.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exitRoutine <- true
|
||||
|
||||
// The context should have been canceled.
|
||||
if svc.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"service.go",
|
||||
"vote_metrics.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/attestation",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bitutil:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/messagehandler:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["service_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/internal:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,360 +0,0 @@
|
||||
// Package attestation defines the life-cycle and status of single and aggregated attestation.
|
||||
package attestation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bitutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
handler "github.com/prysmaticlabs/prysm/shared/messagehandler"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "attestation")
|
||||
var committeeCache = cache.NewCommitteesCache()
|
||||
|
||||
// TargetHandler provides an interface for fetching latest attestation targets
|
||||
// and updating attestations in batches.
|
||||
type TargetHandler interface {
|
||||
LatestAttestationTarget(state *pb.BeaconState, validatorIndex uint64) (*pb.AttestationTarget, error)
|
||||
BatchUpdateLatestAttestation(ctx context.Context, atts []*pb.Attestation) error
|
||||
}
|
||||
|
||||
type attestationStore struct {
|
||||
sync.RWMutex
|
||||
m map[[48]byte]*pb.Attestation
|
||||
}
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing single and aggregated attestation.
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB *db.BeaconDB
|
||||
incomingFeed *event.Feed
|
||||
incomingChan chan *pb.Attestation
|
||||
// store is the mapping of individual
|
||||
// validator's public key to it's latest attestation.
|
||||
store attestationStore
|
||||
pooledAttestations []*pb.Attestation
|
||||
poolLimit int
|
||||
}
|
||||
|
||||
// Config options for the service.
|
||||
type Config struct {
|
||||
BeaconDB *db.BeaconDB
|
||||
}
|
||||
|
||||
// NewAttestationService instantiates a new service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewAttestationService(ctx context.Context, cfg *Config) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
incomingFeed: new(event.Feed),
|
||||
incomingChan: make(chan *pb.Attestation, params.BeaconConfig().DefaultBufferSize),
|
||||
store: attestationStore{m: make(map[[48]byte]*pb.Attestation)},
|
||||
pooledAttestations: make([]*pb.Attestation, 0, 1),
|
||||
poolLimit: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// Start an attestation service's main event loop.
|
||||
func (a *Service) Start() {
|
||||
log.Info("Starting service")
|
||||
go a.attestationPool()
|
||||
}
|
||||
|
||||
// Stop the Attestation service's main event loop and associated goroutines.
|
||||
func (a *Service) Stop() error {
|
||||
defer a.cancel()
|
||||
log.Info("Stopping service")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status always returns nil.
|
||||
// TODO(1201): Add service health checks.
|
||||
func (a *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IncomingAttestationFeed returns a feed that any service can send incoming p2p attestations into.
|
||||
// The attestation service will subscribe to this feed in order to relay incoming attestations.
|
||||
func (a *Service) IncomingAttestationFeed() *event.Feed {
|
||||
return a.incomingFeed
|
||||
}
|
||||
|
||||
// LatestAttestationTarget returns the target block that the validator index attested to,
|
||||
// the highest slotNumber attestation in attestation pool gets returned.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// Let `get_latest_attestation_target(store: Store, validator_index: ValidatorIndex) ->
|
||||
// BeaconBlock` be the target block in the attestation
|
||||
// `get_latest_attestation(store, validator_index)`.
|
||||
func (a *Service) LatestAttestationTarget(beaconState *pb.BeaconState, index uint64) (*pb.AttestationTarget, error) {
|
||||
if index >= uint64(len(beaconState.ValidatorRegistry)) {
|
||||
return nil, fmt.Errorf("invalid validator index %d", index)
|
||||
}
|
||||
validator := beaconState.ValidatorRegistry[index]
|
||||
|
||||
pubKey := bytesutil.ToBytes48(validator.Pubkey)
|
||||
a.store.RLock()
|
||||
defer a.store.RUnlock()
|
||||
if _, exists := a.store.m[pubKey]; !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
attestation := a.store.m[pubKey]
|
||||
if attestation == nil {
|
||||
return nil, nil
|
||||
}
|
||||
targetRoot := bytesutil.ToBytes32(attestation.Data.BeaconBlockRootHash32)
|
||||
if !a.beaconDB.HasBlock(targetRoot) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return a.beaconDB.AttestationTarget(targetRoot)
|
||||
}
|
||||
|
||||
// attestationPool takes an newly received attestation from sync service
|
||||
// and updates attestation pool.
|
||||
func (a *Service) attestationPool() {
|
||||
incomingSub := a.incomingFeed.Subscribe(a.incomingChan)
|
||||
defer incomingSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case <-a.ctx.Done():
|
||||
log.Debug("Attestation pool closed, exiting goroutine")
|
||||
return
|
||||
// Listen for a newly received incoming attestation from the sync service.
|
||||
case attestations := <-a.incomingChan:
|
||||
handler.SafelyHandleMessage(a.ctx, a.handleAttestation, attestations)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Service) handleAttestation(ctx context.Context, msg proto.Message) error {
|
||||
attestation := msg.(*pb.Attestation)
|
||||
a.pooledAttestations = append(a.pooledAttestations, attestation)
|
||||
if len(a.pooledAttestations) > a.poolLimit {
|
||||
if err := a.BatchUpdateLatestAttestation(ctx, a.pooledAttestations); err != nil {
|
||||
return err
|
||||
}
|
||||
state, err := a.beaconDB.HeadState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This sets the pool limit, once the old pool is cleared out. It does by using the number of active
|
||||
// validators per slot as an estimate. The active indices here are not used in the actual processing
|
||||
// of attestations.
|
||||
activeIndices := helpers.ActiveValidatorIndices(state.ValidatorRegistry, helpers.CurrentEpoch(state))
|
||||
attPerSlot := len(activeIndices) / int(params.BeaconConfig().SlotsPerEpoch)
|
||||
// we only set the limit at 70% of the calculated amount to be safe so that relevant attestations
|
||||
// arent carried over to the next batch.
|
||||
a.poolLimit = attPerSlot * 7 / 10
|
||||
if a.poolLimit == 0 {
|
||||
a.poolLimit++
|
||||
}
|
||||
attestationPoolLimit.Set(float64(a.poolLimit))
|
||||
a.pooledAttestations = make([]*pb.Attestation, 0, a.poolLimit)
|
||||
}
|
||||
attestationPoolSize.Set(float64(len(a.pooledAttestations)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLatestAttestation inputs an new attestation and checks whether
|
||||
// the attesters who submitted this attestation with the higher slot number
|
||||
// have been noted in the attestation pool. If not, it updates the
|
||||
// attestation pool with attester's public key to attestation.
|
||||
func (a *Service) UpdateLatestAttestation(ctx context.Context, attestation *pb.Attestation) error {
|
||||
totalAttestationSeen.Inc()
|
||||
|
||||
// Potential improvement, instead of getting the state,
|
||||
// we could get a mapping of validator index to public key.
|
||||
beaconState, err := a.beaconDB.HeadState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
head, err := a.beaconDB.ChainHead()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headRoot, err := hashutil.HashBeaconBlock(head)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return a.updateAttestation(ctx, headRoot, beaconState, attestation)
|
||||
}
|
||||
|
||||
// BatchUpdateLatestAttestation updates multiple attestations and adds them into the attestation store
|
||||
// if they are valid.
|
||||
func (a *Service) BatchUpdateLatestAttestation(ctx context.Context, attestations []*pb.Attestation) error {
|
||||
|
||||
if attestations == nil {
|
||||
return nil
|
||||
}
|
||||
// Potential improvement, instead of getting the state,
|
||||
// we could get a mapping of validator index to public key.
|
||||
beaconState, err := a.beaconDB.HeadState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
head, err := a.beaconDB.ChainHead()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headRoot, err := hashutil.HashBeaconBlock(head)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attestations = a.sortAttestations(attestations)
|
||||
|
||||
for _, attestation := range attestations {
|
||||
if err := a.updateAttestation(ctx, headRoot, beaconState, attestation); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertAttestationIntoStore locks the store, inserts the attestation, then
|
||||
// unlocks the store again. This method may be used by external services
|
||||
// in testing to populate the attestation store.
|
||||
func (a *Service) InsertAttestationIntoStore(pubkey [48]byte, att *pb.Attestation) {
|
||||
a.store.Lock()
|
||||
defer a.store.Unlock()
|
||||
a.store.m[pubkey] = att
|
||||
}
|
||||
|
||||
func (a *Service) updateAttestation(ctx context.Context, headRoot [32]byte, beaconState *pb.BeaconState,
|
||||
attestation *pb.Attestation) error {
|
||||
totalAttestationSeen.Inc()
|
||||
|
||||
slot := attestation.Data.Slot
|
||||
var committee []uint64
|
||||
var cachedCommittees *cache.CommitteesInSlot
|
||||
var err error
|
||||
|
||||
for beaconState.Slot < slot {
|
||||
beaconState, err = state.ExecuteStateTransition(
|
||||
ctx, beaconState, nil /* block */, headRoot, state.DefaultConfig(),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not execute head transition: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
cachedCommittees, err = committeeCache.CommitteesInfoBySlot(slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cachedCommittees == nil {
|
||||
crosslinkCommittees, err := helpers.CrosslinkCommitteesAtSlot(beaconState, slot, false /* registryChange */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cachedCommittees = helpers.ToCommitteeCache(slot, crosslinkCommittees)
|
||||
if err := committeeCache.AddCommittees(cachedCommittees); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Find committee for shard.
|
||||
for _, v := range cachedCommittees.Committees {
|
||||
if v.Shard == attestation.Data.Shard {
|
||||
committee = v.Committee
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestationSlot": attestation.Data.Slot - params.BeaconConfig().GenesisSlot,
|
||||
"attestationShard": attestation.Data.Shard,
|
||||
"committeesShard": cachedCommittees.Committees[0].Shard,
|
||||
"committeesList": cachedCommittees.Committees[0].Committee,
|
||||
"lengthOfCommittees": len(cachedCommittees.Committees),
|
||||
}).Debug("Updating latest attestation")
|
||||
|
||||
// The participation bitfield from attestation is represented in bytes,
|
||||
// here we multiply by 8 to get an accurate validator count in bits.
|
||||
bitfield := attestation.AggregationBitfield
|
||||
totalBits := len(bitfield) * 8
|
||||
|
||||
// Check each bit of participation bitfield to find out which
|
||||
// attester has submitted new attestation.
|
||||
// This is has O(n) run time and could be optimized down the line.
|
||||
for i := 0; i < totalBits; i++ {
|
||||
bitSet, err := bitutil.CheckBit(bitfield, i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bitSet {
|
||||
continue
|
||||
}
|
||||
|
||||
if i >= len(committee) {
|
||||
log.Debugf("bitfield points to an invalid index in the committee: bitfield %08b", bitfield)
|
||||
return nil
|
||||
}
|
||||
|
||||
if int(committee[i]) >= len(beaconState.ValidatorRegistry) {
|
||||
log.Debugf("index doesn't exist in validator registry: index %d", committee[i])
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the attestation came from this attester. We use the slot committee to find the
|
||||
// validator's actual index.
|
||||
pubkey := bytesutil.ToBytes48(beaconState.ValidatorRegistry[committee[i]].Pubkey)
|
||||
newAttestationSlot := attestation.Data.Slot
|
||||
currentAttestationSlot := uint64(0)
|
||||
a.store.Lock()
|
||||
defer a.store.Unlock()
|
||||
if _, exists := a.store.m[pubkey]; exists {
|
||||
currentAttestationSlot = a.store.m[pubkey].Data.Slot
|
||||
}
|
||||
// If the attestation is newer than this attester's one in pool.
|
||||
if newAttestationSlot > currentAttestationSlot {
|
||||
a.store.m[pubkey] = attestation
|
||||
|
||||
log.WithFields(
|
||||
logrus.Fields{
|
||||
"attestationSlot": attestation.Data.Slot - params.BeaconConfig().GenesisSlot,
|
||||
"justifiedEpoch": attestation.Data.JustifiedEpoch - params.BeaconConfig().GenesisEpoch,
|
||||
},
|
||||
).Debug("Attestation store updated")
|
||||
|
||||
blockRoot := bytesutil.ToBytes32(attestation.Data.BeaconBlockRootHash32)
|
||||
votedBlock, err := a.beaconDB.Block(blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reportVoteMetrics(committee[i], votedBlock)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortAttestations sorts attestations by their slot number in ascending order.
|
||||
func (a *Service) sortAttestations(attestations []*pb.Attestation) []*pb.Attestation {
|
||||
sort.SliceStable(attestations, func(i, j int) bool {
|
||||
return attestations[i].Data.Slot < attestations[j].Data.Slot
|
||||
})
|
||||
|
||||
return attestations
|
||||
}
|
||||
@@ -1,506 +0,0 @@
|
||||
package attestation
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/internal"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
|
||||
var _ = TargetHandler(&Service{})
|
||||
|
||||
func TestUpdateLatestAttestation_UpdatesLatest(t *testing.T) {
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 64; i++ {
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: []byte{byte(i)},
|
||||
ActivationEpoch: params.BeaconConfig().GenesisEpoch,
|
||||
ExitEpoch: params.BeaconConfig().GenesisEpoch + 10,
|
||||
})
|
||||
}
|
||||
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.LatestBlock = block
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
|
||||
attestation := &pb.Attestation{
|
||||
AggregationBitfield: []byte{0x80},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
Shard: 1,
|
||||
},
|
||||
}
|
||||
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
pubkey := bytesutil.ToBytes48([]byte{byte(3)})
|
||||
if service.store.m[pubkey].Data.Slot !=
|
||||
attestation.Data.Slot {
|
||||
t.Errorf("Incorrect slot stored, wanted: %d, got: %d",
|
||||
attestation.Data.Slot, service.store.m[pubkey].Data.Slot)
|
||||
}
|
||||
|
||||
beaconState = &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 36,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
beaconState.LatestBlock = block
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatalf("could not save state: %v", err)
|
||||
}
|
||||
|
||||
attestation.Data.Slot = params.BeaconConfig().GenesisSlot + 36
|
||||
attestation.Data.Shard = 36
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
if service.store.m[pubkey].Data.Slot !=
|
||||
attestation.Data.Slot {
|
||||
t.Errorf("Incorrect slot stored, wanted: %d, got: %d",
|
||||
attestation.Data.Slot, service.store.m[pubkey].Data.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestationPool_UpdatesAttestationPool(t *testing.T) {
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 64; i++ {
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: []byte{byte(i)},
|
||||
ActivationEpoch: params.BeaconConfig().GenesisEpoch,
|
||||
ExitEpoch: params.BeaconConfig().GenesisEpoch + 10,
|
||||
})
|
||||
}
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.LatestBlock = block
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
attestation := &pb.Attestation{
|
||||
AggregationBitfield: []byte{0x80},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
Shard: 1,
|
||||
},
|
||||
}
|
||||
|
||||
if err := service.handleAttestation(context.Background(), attestation); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLatestAttestationTarget_CantGetAttestation(t *testing.T) {
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
if err := beaconDB.SaveState(ctx, &pb.BeaconState{
|
||||
ValidatorRegistry: []*pb.Validator{{}},
|
||||
LatestBlock: &pb.BeaconBlock{Slot: params.BeaconConfig().GenesisSlot},
|
||||
}); err != nil {
|
||||
t.Fatalf("could not save state: %v", err)
|
||||
}
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
headState, err := beaconDB.HeadState(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
index := uint64(100)
|
||||
want := fmt.Sprintf("invalid validator index %d", index)
|
||||
if _, err := service.LatestAttestationTarget(headState, index); !strings.Contains(err.Error(), want) {
|
||||
t.Errorf("Wanted error to contain %s, received %v", want, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLatestAttestationTarget_ReturnsLatestAttestedBlock(t *testing.T) {
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
pubKey := []byte{'A'}
|
||||
if err := beaconDB.SaveState(ctx, &pb.BeaconState{
|
||||
ValidatorRegistry: []*pb.Validator{{Pubkey: pubKey}},
|
||||
LatestBlock: &pb.BeaconBlock{Slot: params.BeaconConfig().GenesisSlot},
|
||||
}); err != nil {
|
||||
t.Fatalf("could not save state: %v", err)
|
||||
}
|
||||
|
||||
block := &pb.BeaconBlock{Slot: 999}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatalf("could not save block: %v", err)
|
||||
}
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
log.Fatalf("could not hash block: %v", err)
|
||||
}
|
||||
if err := beaconDB.SaveAttestationTarget(ctx, &pb.AttestationTarget{
|
||||
Slot: block.Slot,
|
||||
BlockRoot: blockRoot[:],
|
||||
ParentRoot: []byte{},
|
||||
}); err != nil {
|
||||
log.Fatalf("could not save att target: %v", err)
|
||||
}
|
||||
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
|
||||
attestation := &pb.Attestation{
|
||||
Data: &pb.AttestationData{
|
||||
BeaconBlockRootHash32: blockRoot[:],
|
||||
}}
|
||||
pubKey48 := bytesutil.ToBytes48(pubKey)
|
||||
service.store.m[pubKey48] = attestation
|
||||
|
||||
headState, err := beaconDB.HeadState(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
latestAttestedTarget, err := service.LatestAttestationTarget(headState, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not get latest attestation: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(blockRoot[:], latestAttestedTarget.BlockRoot) {
|
||||
t.Errorf("Wanted: %v, got: %v", blockRoot[:], latestAttestedTarget.BlockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateLatestAttestation_CacheEnabledAndMiss(t *testing.T) {
|
||||
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 64; i++ {
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: []byte{byte(i)},
|
||||
ActivationEpoch: params.BeaconConfig().GenesisEpoch,
|
||||
ExitEpoch: params.BeaconConfig().GenesisEpoch + 10,
|
||||
})
|
||||
}
|
||||
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.LatestBlock = block
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
|
||||
attestation := &pb.Attestation{
|
||||
AggregationBitfield: []byte{0x80},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
Shard: 1,
|
||||
},
|
||||
}
|
||||
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
pubkey := bytesutil.ToBytes48([]byte{byte(3)})
|
||||
if service.store.m[pubkey].Data.Slot !=
|
||||
attestation.Data.Slot {
|
||||
t.Errorf("Incorrect slot stored, wanted: %d, got: %d",
|
||||
attestation.Data.Slot, service.store.m[pubkey].Data.Slot)
|
||||
}
|
||||
|
||||
attestation.Data.Slot = params.BeaconConfig().GenesisSlot + 36
|
||||
attestation.Data.Shard = 36
|
||||
|
||||
beaconState = &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 36,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
beaconState.LatestBlock = block
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatalf("could not save state: %v", err)
|
||||
}
|
||||
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
if service.store.m[pubkey].Data.Slot !=
|
||||
attestation.Data.Slot {
|
||||
t.Errorf("Incorrect slot stored, wanted: %d, got: %d",
|
||||
attestation.Data.Slot, service.store.m[pubkey].Data.Slot)
|
||||
}
|
||||
|
||||
// Verify the committee for attestation's data slot was cached.
|
||||
fetchedCommittees, err := committeeCache.CommitteesInfoBySlot(attestation.Data.Slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantedCommittee := []uint64{38}
|
||||
if !reflect.DeepEqual(wantedCommittee, fetchedCommittees.Committees[0].Committee) {
|
||||
t.Errorf(
|
||||
"Result indices was an unexpected value. Wanted %d, got %d",
|
||||
wantedCommittee,
|
||||
fetchedCommittees.Committees[0].Committee,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateLatestAttestation_CacheEnabledAndHit(t *testing.T) {
|
||||
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 64; i++ {
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: []byte{byte(i)},
|
||||
ActivationEpoch: params.BeaconConfig().GenesisEpoch,
|
||||
ExitEpoch: params.BeaconConfig().GenesisEpoch + 10,
|
||||
})
|
||||
}
|
||||
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 2,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 2,
|
||||
}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.LatestBlock = block
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
|
||||
slot := params.BeaconConfig().GenesisSlot + 2
|
||||
shard := uint64(3)
|
||||
index := uint64(4)
|
||||
attestation := &pb.Attestation{
|
||||
AggregationBitfield: []byte{0x80},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: slot,
|
||||
Shard: shard,
|
||||
},
|
||||
}
|
||||
|
||||
csInSlot := &cache.CommitteesInSlot{
|
||||
Slot: slot,
|
||||
Committees: []*cache.CommitteeInfo{
|
||||
{Shard: shard, Committee: []uint64{index, 999}},
|
||||
}}
|
||||
|
||||
if err := committeeCache.AddCommittees(csInSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
pubkey := bytesutil.ToBytes48([]byte{byte(index)})
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
|
||||
if service.store.m[pubkey].Data.Slot !=
|
||||
attestation.Data.Slot {
|
||||
t.Errorf("Incorrect slot stored, wanted: %d, got: %d",
|
||||
attestation.Data.Slot, service.store.m[pubkey].Data.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateLatestAttestation_InvalidIndex(t *testing.T) {
|
||||
beaconDB := internal.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 64; i++ {
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: []byte{byte(i)},
|
||||
ActivationEpoch: params.BeaconConfig().GenesisEpoch,
|
||||
ExitEpoch: params.BeaconConfig().GenesisEpoch + 10,
|
||||
})
|
||||
}
|
||||
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.LatestBlock = block
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
attestation := &pb.Attestation{
|
||||
AggregationBitfield: []byte{0xC0},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
Shard: 1,
|
||||
},
|
||||
}
|
||||
|
||||
wanted := "bitfield points to an invalid index in the committee"
|
||||
|
||||
if err := service.UpdateLatestAttestation(ctx, attestation); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, wanted)
|
||||
}
|
||||
|
||||
func TestBatchUpdate_FromSync(t *testing.T) {
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 64; i++ {
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: []byte{byte(i)},
|
||||
ActivationEpoch: params.BeaconConfig().GenesisEpoch,
|
||||
ExitEpoch: params.BeaconConfig().GenesisEpoch + 10,
|
||||
})
|
||||
}
|
||||
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.LatestBlock = block
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
service.poolLimit = 9
|
||||
for i := 0; i < 10; i++ {
|
||||
attestation := &pb.Attestation{
|
||||
AggregationBitfield: []byte{0x80},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
Shard: 1,
|
||||
},
|
||||
}
|
||||
if err := service.handleAttestation(ctx, attestation); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
}
|
||||
if len(service.pooledAttestations) != 0 {
|
||||
t.Errorf("pooled attestations were not cleared out, still %d attestations in pool", len(service.pooledAttestations))
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateLatestAttestation_BatchUpdate(t *testing.T) {
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
ctx := context.Background()
|
||||
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 64; i++ {
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: []byte{byte(i)},
|
||||
ActivationEpoch: params.BeaconConfig().GenesisEpoch,
|
||||
ExitEpoch: params.BeaconConfig().GenesisEpoch + 10,
|
||||
})
|
||||
}
|
||||
|
||||
beaconState := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
ValidatorRegistry: validators,
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
}
|
||||
if err := beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.LatestBlock = block
|
||||
if err := beaconDB.UpdateChainHead(ctx, block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service := NewAttestationService(context.Background(), &Config{BeaconDB: beaconDB})
|
||||
attestations := make([]*pb.Attestation, 0)
|
||||
for i := 0; i < 10; i++ {
|
||||
attestations = append(attestations, &pb.Attestation{
|
||||
AggregationBitfield: []byte{0x80},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 1,
|
||||
Shard: 1,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if err := service.BatchUpdateLatestAttestation(ctx, attestations); err != nil {
|
||||
t.Fatalf("could not update latest attestation: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package attestation
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
var (
|
||||
validatorLastVoteGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "validators_last_vote",
|
||||
Help: "Votes of validators, updated when there's a new attestation",
|
||||
}, []string{
|
||||
"validatorIndex",
|
||||
})
|
||||
totalAttestationSeen = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_seen_attestations",
|
||||
Help: "Total number of attestations seen by the validators",
|
||||
})
|
||||
|
||||
attestationPoolLimit = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "attestation_pool_limit",
|
||||
Help: "The limit of the attestation pool",
|
||||
})
|
||||
attestationPoolSize = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "attestation_pool_size",
|
||||
Help: "The current size of the attestation pool",
|
||||
})
|
||||
)
|
||||
|
||||
func reportVoteMetrics(index uint64, block *pb.BeaconBlock) {
|
||||
// Don't update vote metrics if the incoming block is nil.
|
||||
if block == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s := params.BeaconConfig().GenesisSlot
|
||||
validatorLastVoteGauge.WithLabelValues(
|
||||
"v" + strconv.Itoa(int(index))).Set(float64(block.Slot - s))
|
||||
}
|
||||
@@ -3,73 +3,122 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"block_processing.go",
|
||||
"fork_choice.go",
|
||||
"chain_info.go",
|
||||
"info.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"receive_attestation.go",
|
||||
"receive_block.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/attestation:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/blockchain/forkchoice:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/p2p:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/slotutil:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
test_suite(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
tests = [
|
||||
":go_raceoff_test",
|
||||
":go_raceon_test",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_raceoff_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"block_processing_test.go",
|
||||
"fork_choice_reorg_test.go",
|
||||
"fork_choice_test.go",
|
||||
"chain_info_test.go",
|
||||
"receive_attestation_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/attestation:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/internal:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/forkutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/p2p:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/stateutil:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_x_net//context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_raceon_test",
|
||||
srcs = [
|
||||
"chain_info_norace_test.go",
|
||||
"service_norace_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
race = "on",
|
||||
tags = ["race_on"],
|
||||
deps = [
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_x_net//context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,361 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// BlockReceiver interface defines the methods in the blockchain service which
|
||||
// directly receives a new block from other services and applies the full processing pipeline.
|
||||
type BlockReceiver interface {
|
||||
CanonicalBlockFeed() *event.Feed
|
||||
ReceiveBlock(ctx context.Context, block *pb.BeaconBlock) (*pb.BeaconState, error)
|
||||
IsCanonical(slot uint64, hash []byte) bool
|
||||
UpdateCanonicalRoots(block *pb.BeaconBlock, root [32]byte)
|
||||
}
|
||||
|
||||
// BlockProcessor defines a common interface for methods useful for directly applying state transitions
|
||||
// to beacon blocks and generating a new beacon state from the Ethereum 2.0 core primitives.
|
||||
type BlockProcessor interface {
|
||||
VerifyBlockValidity(ctx context.Context, block *pb.BeaconBlock, beaconState *pb.BeaconState) error
|
||||
ApplyBlockStateTransition(ctx context.Context, block *pb.BeaconBlock, beaconState *pb.BeaconState) (*pb.BeaconState, error)
|
||||
CleanupBlockOperations(ctx context.Context, block *pb.BeaconBlock) error
|
||||
}
|
||||
|
||||
// BlockFailedProcessingErr represents a block failing a state transition function.
|
||||
type BlockFailedProcessingErr struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (b *BlockFailedProcessingErr) Error() string {
|
||||
return fmt.Sprintf("block failed processing: %v", b.err)
|
||||
}
|
||||
|
||||
// ReceiveBlock is a function that defines the operations that are preformed on
|
||||
// any block that is received from p2p layer or rpc. It performs the following actions: It checks the block to see
|
||||
// 1. Verify a block passes pre-processing conditions
|
||||
// 2. Save and broadcast the block via p2p to other peers
|
||||
// 3. Apply the block state transition function and account for skip slots.
|
||||
// 4. Process and cleanup any block operations, such as attestations and deposits, which would need to be
|
||||
// either included or flushed from the beacon node's runtime.
|
||||
func (c *ChainService) ReceiveBlock(ctx context.Context, block *pb.BeaconBlock) (*pb.BeaconState, error) {
|
||||
c.receiveBlockLock.Lock()
|
||||
defer c.receiveBlockLock.Unlock()
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlock")
|
||||
defer span.End()
|
||||
parentRoot := bytesutil.ToBytes32(block.ParentRootHash32)
|
||||
parent, err := c.beaconDB.Block(parentRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get parent block: %v", err)
|
||||
}
|
||||
if parent == nil {
|
||||
return nil, errors.New("parent does not exist in DB")
|
||||
}
|
||||
beaconState, err := c.beaconDB.HistoricalStateFromSlot(ctx, parent.Slot, parentRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not retrieve beacon state: %v", err)
|
||||
}
|
||||
saveLatestBlock := beaconState.LatestBlock
|
||||
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not hash beacon block")
|
||||
}
|
||||
// We first verify the block's basic validity conditions.
|
||||
if err := c.VerifyBlockValidity(ctx, block, beaconState); err != nil {
|
||||
return beaconState, fmt.Errorf("block with slot %d is not ready for processing: %v", block.Slot, err)
|
||||
}
|
||||
|
||||
// We save the block to the DB and broadcast it to our peers.
|
||||
if err := c.SaveAndBroadcastBlock(ctx, block); err != nil {
|
||||
return beaconState, fmt.Errorf(
|
||||
"could not save and broadcast beacon block with slot %d: %v",
|
||||
block.Slot-params.BeaconConfig().GenesisSlot, err,
|
||||
)
|
||||
}
|
||||
|
||||
log.WithField("slotNumber", block.Slot-params.BeaconConfig().GenesisSlot).Info(
|
||||
"Executing state transition")
|
||||
|
||||
// We then apply the block state transition accordingly to obtain the resulting beacon state.
|
||||
beaconState, err = c.ApplyBlockStateTransition(ctx, block, beaconState)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *BlockFailedProcessingErr:
|
||||
// If the block fails processing, we mark it as blacklisted and delete it from our DB.
|
||||
c.beaconDB.MarkEvilBlockHash(blockRoot)
|
||||
if err := c.beaconDB.DeleteBlock(block); err != nil {
|
||||
return nil, fmt.Errorf("could not delete bad block from db: %v", err)
|
||||
}
|
||||
return beaconState, err
|
||||
default:
|
||||
return beaconState, fmt.Errorf("could not apply block state transition: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slotNumber": block.Slot - params.BeaconConfig().GenesisSlot,
|
||||
"currentEpoch": helpers.SlotToEpoch(block.Slot) - params.BeaconConfig().GenesisEpoch,
|
||||
}).Info("State transition complete")
|
||||
|
||||
// Check state root
|
||||
if featureconfig.FeatureConfig().EnableCheckBlockStateRoot {
|
||||
// Calc state hash with previous block
|
||||
beaconState.LatestBlock = saveLatestBlock
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not hash beacon state: %v", err)
|
||||
}
|
||||
beaconState.LatestBlock = block
|
||||
if !bytes.Equal(block.StateRootHash32, stateRoot[:]) {
|
||||
return nil, fmt.Errorf("beacon state root is not equal to block state root: %#x != %#x", stateRoot, block.StateRootHash32)
|
||||
}
|
||||
}
|
||||
|
||||
// We process the block's contained deposits, attestations, and other operations
|
||||
// and that may need to be stored or deleted from the beacon node's persistent storage.
|
||||
if err := c.CleanupBlockOperations(ctx, block); err != nil {
|
||||
return beaconState, fmt.Errorf("could not process block deposits, attestations, and other operations: %v", err)
|
||||
}
|
||||
|
||||
log.WithField("slot", block.Slot-params.BeaconConfig().GenesisSlot).Info("Finished processing beacon block")
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ApplyBlockStateTransition runs the Ethereum 2.0 state transition function
|
||||
// to produce a new beacon state and also accounts for skip slots occurring.
|
||||
//
|
||||
// def apply_block_state_transition(block):
|
||||
// # process skipped slots
|
||||
// while (state.slot < block.slot - 1):
|
||||
// state = slot_state_transition(state, block=None)
|
||||
//
|
||||
// # process slot with block
|
||||
// state = slot_state_transition(state, block)
|
||||
//
|
||||
// # check state root
|
||||
// if block.state_root == hash(state):
|
||||
// return state, error
|
||||
// else:
|
||||
// return nil, error # or throw or whatever
|
||||
//
|
||||
func (c *ChainService) ApplyBlockStateTransition(
|
||||
ctx context.Context, block *pb.BeaconBlock, beaconState *pb.BeaconState,
|
||||
) (*pb.BeaconState, error) {
|
||||
// Retrieve the last processed beacon block's hash root.
|
||||
headRoot, err := c.ChainHeadRoot()
|
||||
if err != nil {
|
||||
return beaconState, fmt.Errorf("could not retrieve chain head root: %v", err)
|
||||
}
|
||||
|
||||
// Check for skipped slots.
|
||||
numSkippedSlots := 0
|
||||
for beaconState.Slot < block.Slot-1 {
|
||||
beaconState, err = c.runStateTransition(ctx, headRoot, nil, beaconState)
|
||||
if err != nil {
|
||||
return beaconState, err
|
||||
}
|
||||
numSkippedSlots++
|
||||
}
|
||||
if numSkippedSlots > 0 {
|
||||
log.Warnf("Processed %d skipped slots", numSkippedSlots)
|
||||
}
|
||||
|
||||
beaconState, err = c.runStateTransition(ctx, headRoot, block, beaconState)
|
||||
if err != nil {
|
||||
return beaconState, err
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// VerifyBlockValidity cross-checks the block against the pre-processing conditions from
|
||||
// Ethereum 2.0, namely:
|
||||
// The parent block with root block.parent_root has been processed and accepted.
|
||||
// The node has processed its state up to slot, block.slot - 1.
|
||||
// The Ethereum 1.0 block pointed to by the state.processed_pow_receipt_root has been processed and accepted.
|
||||
// The node's local clock time is greater than or equal to state.genesis_time + block.slot * SECONDS_PER_SLOT.
|
||||
func (c *ChainService) VerifyBlockValidity(
|
||||
ctx context.Context,
|
||||
block *pb.BeaconBlock,
|
||||
beaconState *pb.BeaconState,
|
||||
) error {
|
||||
if block.Slot == params.BeaconConfig().GenesisSlot {
|
||||
return fmt.Errorf("cannot process a genesis block: received block with slot %d",
|
||||
block.Slot-params.BeaconConfig().GenesisSlot)
|
||||
}
|
||||
powBlockFetcher := c.web3Service.Client().BlockByHash
|
||||
if err := b.IsValidBlock(ctx, beaconState, block,
|
||||
c.beaconDB.HasBlock, powBlockFetcher, c.genesisTime); err != nil {
|
||||
return fmt.Errorf("block does not fulfill pre-processing conditions %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveAndBroadcastBlock stores the block in persistent storage and then broadcasts it to
|
||||
// peers via p2p. Blocks which have already been saved are not processed again via p2p, which is why
|
||||
// the order of operations is important in this function to prevent infinite p2p loops.
|
||||
func (c *ChainService) SaveAndBroadcastBlock(ctx context.Context, block *pb.BeaconBlock) error {
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not tree hash incoming block: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveBlock(block); err != nil {
|
||||
return fmt.Errorf("failed to save block: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveAttestationTarget(ctx, &pb.AttestationTarget{
|
||||
Slot: block.Slot,
|
||||
BlockRoot: blockRoot[:],
|
||||
ParentRoot: block.ParentRootHash32,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to save attestation target: %v", err)
|
||||
}
|
||||
// Announce the new block to the network.
|
||||
c.p2p.Broadcast(ctx, &pb.BeaconBlockAnnounce{
|
||||
Hash: blockRoot[:],
|
||||
SlotNumber: block.Slot,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanupBlockOperations processes and cleans up any block operations relevant to the beacon node
|
||||
// such as attestations, exits, and deposits. We update the latest seen attestation by validator
|
||||
// in the local node's runtime, cleanup and remove pending deposits which have been included in the block
|
||||
// from our node's local cache, and process validator exits and more.
|
||||
func (c *ChainService) CleanupBlockOperations(ctx context.Context, block *pb.BeaconBlock) error {
|
||||
// Forward processed block to operation pool to remove individual operation from DB.
|
||||
if c.opsPoolService.IncomingProcessedBlockFeed().Send(block) == 0 {
|
||||
log.Error("Sent processed block to no subscribers")
|
||||
}
|
||||
|
||||
if err := c.attsService.BatchUpdateLatestAttestation(ctx, block.Body.Attestations); err != nil {
|
||||
return fmt.Errorf("failed to update latest attestation for store: %v", err)
|
||||
}
|
||||
|
||||
// Remove pending deposits from the deposit queue.
|
||||
for _, dep := range block.Body.Deposits {
|
||||
c.beaconDB.RemovePendingDeposit(ctx, dep)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// runStateTransition executes the Ethereum 2.0 core state transition for the beacon chain and
|
||||
// updates important checkpoints and local persistent data during epoch transitions. It serves as a wrapper
|
||||
// around the more low-level, core state transition function primitive.
|
||||
func (c *ChainService) runStateTransition(
|
||||
ctx context.Context,
|
||||
headRoot [32]byte,
|
||||
block *pb.BeaconBlock,
|
||||
beaconState *pb.BeaconState,
|
||||
) (*pb.BeaconState, error) {
|
||||
finalizedEpoch := beaconState.FinalizedEpoch
|
||||
newState, err := state.ExecuteStateTransition(
|
||||
ctx,
|
||||
beaconState,
|
||||
block,
|
||||
headRoot,
|
||||
&state.TransitionConfig{
|
||||
VerifySignatures: false, // We disable signature verification for now.
|
||||
Logging: true, // We enable logging in this state transition call.
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return beaconState, &BlockFailedProcessingErr{err}
|
||||
}
|
||||
// Prune the block cache on every new finalized epoch.
|
||||
if newState.FinalizedEpoch > finalizedEpoch {
|
||||
c.beaconDB.ClearBlockCache()
|
||||
}
|
||||
log.WithField(
|
||||
"slotsSinceGenesis", newState.Slot-params.BeaconConfig().GenesisSlot,
|
||||
).Info("Slot transition successfully processed")
|
||||
|
||||
if block != nil {
|
||||
log.WithField(
|
||||
"slotsSinceGenesis", newState.Slot-params.BeaconConfig().GenesisSlot,
|
||||
).Info("Block transition successfully processed")
|
||||
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Save Historical States.
|
||||
if err := c.beaconDB.SaveHistoricalState(ctx, beaconState, blockRoot); err != nil {
|
||||
return nil, fmt.Errorf("could not save historical state: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if helpers.IsEpochEnd(newState.Slot) {
|
||||
// Save activated validators of this epoch to public key -> index DB.
|
||||
if err := c.saveValidatorIdx(newState); err != nil {
|
||||
return newState, fmt.Errorf("could not save validator index: %v", err)
|
||||
}
|
||||
// Delete exited validators of this epoch to public key -> index DB.
|
||||
if err := c.deleteValidatorIdx(newState); err != nil {
|
||||
return newState, fmt.Errorf("could not delete validator index: %v", err)
|
||||
}
|
||||
// Update FFG checkpoints in DB.
|
||||
if err := c.updateFFGCheckPts(ctx, newState); err != nil {
|
||||
return newState, fmt.Errorf("could not update FFG checkpts: %v", err)
|
||||
}
|
||||
log.WithField(
|
||||
"SlotsSinceGenesis", newState.Slot-params.BeaconConfig().GenesisSlot,
|
||||
).Info("Epoch transition successfully processed")
|
||||
}
|
||||
return newState, nil
|
||||
}
|
||||
|
||||
// saveValidatorIdx saves the validators public key to index mapping in DB, these
|
||||
// validators were activated from current epoch. After it saves, current epoch key
|
||||
// is deleted from ActivatedValidators mapping.
|
||||
func (c *ChainService) saveValidatorIdx(state *pb.BeaconState) error {
|
||||
nextEpoch := helpers.CurrentEpoch(state) + 1
|
||||
activatedValidators := validators.ActivatedValFromEpoch(nextEpoch)
|
||||
var idxNotInState []uint64
|
||||
for _, idx := range activatedValidators {
|
||||
// If for some reason the activated validator indices is not in state,
|
||||
// we skip them and save them to process for next epoch.
|
||||
if int(idx) >= len(state.ValidatorRegistry) {
|
||||
idxNotInState = append(idxNotInState, idx)
|
||||
continue
|
||||
}
|
||||
pubKey := state.ValidatorRegistry[idx].Pubkey
|
||||
if err := c.beaconDB.SaveValidatorIndex(pubKey, int(idx)); err != nil {
|
||||
return fmt.Errorf("could not save validator index: %v", err)
|
||||
}
|
||||
}
|
||||
// Since we are processing next epoch, save the can't processed validator indices
|
||||
// to the epoch after that.
|
||||
validators.InsertActivatedIndices(nextEpoch+1, idxNotInState)
|
||||
validators.DeleteActivatedVal(helpers.CurrentEpoch(state))
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteValidatorIdx deletes the validators public key to index mapping in DB, the
|
||||
// validators were exited from current epoch. After it deletes, current epoch key
|
||||
// is deleted from ExitedValidators mapping.
|
||||
func (c *ChainService) deleteValidatorIdx(state *pb.BeaconState) error {
|
||||
exitedValidators := validators.ExitedValFromEpoch(helpers.CurrentEpoch(state) + 1)
|
||||
for _, idx := range exitedValidators {
|
||||
pubKey := state.ValidatorRegistry[idx].Pubkey
|
||||
if err := c.beaconDB.DeleteValidatorIndex(pubKey); err != nil {
|
||||
return fmt.Errorf("could not delete validator index: %v", err)
|
||||
}
|
||||
}
|
||||
validators.DeleteExitedVal(helpers.CurrentEpoch(state))
|
||||
return nil
|
||||
}
|
||||
@@ -1,893 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/attestation"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
v "github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/internal"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/trieutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
// Ensure ChainService implements interfaces.
|
||||
var _ = BlockProcessor(&ChainService{})
|
||||
|
||||
func initBlockStateRoot(t *testing.T, block *pb.BeaconBlock, chainService *ChainService) {
|
||||
parentRoot := bytesutil.ToBytes32(block.ParentRootHash32)
|
||||
parent, err := chainService.beaconDB.Block(parentRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState, err := chainService.beaconDB.HistoricalStateFromSlot(context.Background(), parent.Slot, parentRoot)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to retrieve state %v", err)
|
||||
}
|
||||
saveLatestBlock := beaconState.LatestBlock
|
||||
|
||||
computedState, err := chainService.ApplyBlockStateTransition(context.Background(), block, beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("could not apply block state transition: %v", err)
|
||||
}
|
||||
|
||||
computedState.LatestBlock = saveLatestBlock
|
||||
stateRoot, err := hashutil.HashProto(computedState)
|
||||
if err != nil {
|
||||
t.Fatalf("could not tree hash state: %v", err)
|
||||
}
|
||||
block.StateRootHash32 = stateRoot[:]
|
||||
t.Logf("state root after block: %#x", stateRoot)
|
||||
}
|
||||
|
||||
func TestReceiveBlock_FaultyPOWChain(t *testing.T) {
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
unixTime := uint64(time.Now().Unix())
|
||||
deposits, _ := setupInitialDeposits(t, 100)
|
||||
if err := db.InitializeState(context.Background(), unixTime, deposits, &pb.Eth1Data{}); err != nil {
|
||||
t.Fatalf("Could not initialize beacon state to disk: %v", err)
|
||||
}
|
||||
|
||||
if err := SetSlotInState(chainService, 1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
parentBlock := &pb.BeaconBlock{
|
||||
Slot: 1,
|
||||
}
|
||||
|
||||
parentRoot, err := hashutil.HashBeaconBlock(parentBlock)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to tree hash block %v", err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(parentBlock); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: 2,
|
||||
ParentRootHash32: parentRoot[:],
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte("a"),
|
||||
BlockHash32: []byte("b"),
|
||||
},
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := chainService.ReceiveBlock(context.Background(), block); err == nil {
|
||||
t.Errorf("Expected receive block to fail, received nil: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReceiveBlock_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
deposits, privKeys := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not tree hash state: %v", err)
|
||||
}
|
||||
genesis := b.NewGenesisBlock([]byte{})
|
||||
if err := chainService.beaconDB.SaveBlock(genesis); err != nil {
|
||||
t.Fatalf("Could not save block to db: %v", err)
|
||||
}
|
||||
parentHash, err := hashutil.HashBeaconBlock(genesis)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to get tree hash root of canonical head: %v", err)
|
||||
}
|
||||
|
||||
if err := db.SaveHistoricalState(ctx, beaconState, parentHash); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, genesis, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.Slot++
|
||||
randaoReveal := createRandaoReveal(t, beaconState, privKeys)
|
||||
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot,
|
||||
StateRootHash32: stateRoot[:],
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte("a"),
|
||||
BlockHash32: []byte("b"),
|
||||
},
|
||||
Body: &pb.BeaconBlockBody{
|
||||
Attestations: nil,
|
||||
},
|
||||
}
|
||||
|
||||
initBlockStateRoot(t, block, chainService)
|
||||
|
||||
if err := chainService.beaconDB.SaveJustifiedBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveFinalizedBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := chainService.ReceiveBlock(context.Background(), block); err != nil {
|
||||
t.Errorf("Block failed processing: %v", err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Finished processing beacon block")
|
||||
}
|
||||
|
||||
func TestReceiveBlock_UsesParentBlockState(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
deposits, _ := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not tree hash state: %v", err)
|
||||
}
|
||||
|
||||
parentHash, genesisBlock := setupGenesisBlock(t, chainService)
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, genesisBlock, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveHistoricalState(ctx, beaconState, parentHash); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// We ensure the block uses the right state parent if its ancestor is not block.Slot-1.
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 4,
|
||||
StateRootHash32: stateRoot[:],
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: []byte{},
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte("a"),
|
||||
BlockHash32: []byte("b"),
|
||||
},
|
||||
Body: &pb.BeaconBlockBody{
|
||||
Attestations: nil,
|
||||
},
|
||||
}
|
||||
initBlockStateRoot(t, block, chainService)
|
||||
if err := chainService.beaconDB.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := chainService.ReceiveBlock(context.Background(), block); err != nil {
|
||||
t.Errorf("Block failed processing: %v", err)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Finished processing beacon block")
|
||||
}
|
||||
|
||||
func TestReceiveBlock_DeletesBadBlock(t *testing.T) {
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
EnableCheckBlockStateRoot: false,
|
||||
})
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
deposits, _ := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not tree hash state: %v", err)
|
||||
}
|
||||
|
||||
parentHash, genesisBlock := setupGenesisBlock(t, chainService)
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, genesisBlock, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveHistoricalState(ctx, beaconState, parentHash); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.Slot++
|
||||
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot,
|
||||
StateRootHash32: stateRoot[:],
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: []byte{},
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte("a"),
|
||||
BlockHash32: []byte("b"),
|
||||
},
|
||||
Body: &pb.BeaconBlockBody{
|
||||
Attestations: []*pb.Attestation{
|
||||
{
|
||||
Data: &pb.AttestationData{
|
||||
JustifiedEpoch: params.BeaconConfig().GenesisSlot * 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = chainService.ReceiveBlock(context.Background(), block)
|
||||
switch err.(type) {
|
||||
case *BlockFailedProcessingErr:
|
||||
t.Log("Block failed processing as expected")
|
||||
default:
|
||||
t.Errorf("Unexpected block processing error: %v", err)
|
||||
}
|
||||
|
||||
savedBlock, err := db.Block(blockRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if savedBlock != nil {
|
||||
t.Errorf("Expected bad block to have been deleted, received: %v", savedBlock)
|
||||
}
|
||||
// We also verify the block has been blacklisted.
|
||||
if !db.IsEvilBlockHash(blockRoot) {
|
||||
t.Error("Expected block root to have been blacklisted")
|
||||
}
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
EnableCheckBlockStateRoot: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestReceiveBlock_CheckBlockStateRoot_GoodState(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
attsService := attestation.NewAttestationService(
|
||||
context.Background(),
|
||||
&attestation.Config{BeaconDB: db})
|
||||
chainService := setupBeaconChain(t, db, attsService)
|
||||
deposits, privKeys := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
|
||||
parentHash, genesisBlock := setupGenesisBlock(t, chainService)
|
||||
if err := chainService.beaconDB.SaveHistoricalState(ctx, beaconState, parentHash); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Slot++
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, genesisBlock, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.Slot++
|
||||
goodStateBlock := &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot,
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: createRandaoReveal(t, beaconState, privKeys),
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
beaconState.Slot--
|
||||
initBlockStateRoot(t, goodStateBlock, chainService)
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(goodStateBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = chainService.ReceiveBlock(context.Background(), goodStateBlock)
|
||||
if err != nil {
|
||||
t.Fatalf("error exists for good block %v", err)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Executing state transition")
|
||||
}
|
||||
|
||||
func TestReceiveBlock_CheckBlockStateRoot_BadState(t *testing.T) {
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
deposits, privKeys := setupInitialDeposits(t, 100)
|
||||
ctx := context.Background()
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
parentHash, genesisBlock := setupGenesisBlock(t, chainService)
|
||||
if err := chainService.beaconDB.SaveHistoricalState(ctx, beaconState, parentHash); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Slot++
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, genesisBlock, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.Slot++
|
||||
invalidStateBlock := &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot,
|
||||
StateRootHash32: []byte{'b', 'a', 'd', ' ', 'h', 'a', 's', 'h'},
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: createRandaoReveal(t, beaconState, privKeys),
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
beaconState.Slot--
|
||||
|
||||
_, err = chainService.ReceiveBlock(context.Background(), invalidStateBlock)
|
||||
if err == nil {
|
||||
t.Fatal("no error for wrong block state root")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "beacon state root is not equal to block state root: ") {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReceiveBlock_RemovesPendingDeposits(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
attsService := attestation.NewAttestationService(
|
||||
context.Background(),
|
||||
&attestation.Config{BeaconDB: db})
|
||||
chainService := setupBeaconChain(t, db, attsService)
|
||||
deposits, privKeys := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveJustifiedState(beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveFinalizedState(beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not tree hash state: %v", err)
|
||||
}
|
||||
parentHash, genesisBlock := setupGenesisBlock(t, chainService)
|
||||
beaconState.Slot++
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, genesisBlock, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
currentSlot := params.BeaconConfig().GenesisSlot
|
||||
randaoReveal := createRandaoReveal(t, beaconState, privKeys)
|
||||
|
||||
pendingDeposits := []*pb.Deposit{
|
||||
createPreChainStartDeposit(t, []byte{'F'}, beaconState.DepositIndex),
|
||||
}
|
||||
pendingDepositsData := make([][]byte, len(pendingDeposits))
|
||||
for i, pd := range pendingDeposits {
|
||||
pendingDepositsData[i] = pd.DepositData
|
||||
}
|
||||
depositTrie, err := trieutil.GenerateTrieFromItems(pendingDepositsData, int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not generate deposit trie: %v", err)
|
||||
}
|
||||
for i := range pendingDeposits {
|
||||
pendingDeposits[i].MerkleTreeIndex = 0
|
||||
proof, err := depositTrie.MerkleProof(int(pendingDeposits[i].MerkleTreeIndex))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not generate proof: %v", err)
|
||||
}
|
||||
pendingDeposits[i].MerkleProofHash32S = proof
|
||||
}
|
||||
depositRoot := depositTrie.Root()
|
||||
beaconState.LatestEth1Data.DepositRootHash32 = depositRoot[:]
|
||||
if err := db.SaveHistoricalState(context.Background(), beaconState, parentHash); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: currentSlot + 1,
|
||||
StateRootHash32: stateRoot[:],
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte("a"),
|
||||
BlockHash32: []byte("b"),
|
||||
},
|
||||
Body: &pb.BeaconBlockBody{
|
||||
Deposits: pendingDeposits,
|
||||
},
|
||||
}
|
||||
|
||||
beaconState.LatestBlock = block
|
||||
beaconState.Slot--
|
||||
beaconState.DepositIndex = 0
|
||||
if err := chainService.beaconDB.SaveState(ctx, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
initBlockStateRoot(t, block, chainService)
|
||||
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
log.Fatalf("could not hash block: %v", err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveJustifiedBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveFinalizedBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, dep := range pendingDeposits {
|
||||
db.InsertPendingDeposit(chainService.ctx, dep, big.NewInt(0))
|
||||
}
|
||||
|
||||
if len(db.PendingDeposits(chainService.ctx, nil)) != len(pendingDeposits) || len(pendingDeposits) == 0 {
|
||||
t.Fatalf("Expected %d pending deposits", len(pendingDeposits))
|
||||
}
|
||||
|
||||
beaconState.Slot--
|
||||
if err := chainService.beaconDB.SaveState(ctx, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveHistoricalState(context.Background(), beaconState, blockRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
computedState, err := chainService.ReceiveBlock(context.Background(), block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i := 0; i < len(beaconState.ValidatorRegistry); i++ {
|
||||
pubKey := bytesutil.ToBytes48(beaconState.ValidatorRegistry[i].Pubkey)
|
||||
attsService.InsertAttestationIntoStore(pubKey, &pb.Attestation{
|
||||
Data: &pb.AttestationData{
|
||||
BeaconBlockRootHash32: blockRoot[:],
|
||||
}},
|
||||
)
|
||||
}
|
||||
if err := chainService.ApplyForkChoiceRule(context.Background(), block, computedState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(db.PendingDeposits(chainService.ctx, nil)) != 0 {
|
||||
t.Fatalf("Expected 0 pending deposits, but there are %+v", db.PendingDeposits(chainService.ctx, nil))
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Executing state transition")
|
||||
}
|
||||
|
||||
// Scenario graph: http://bit.ly/2K1k2KZ
|
||||
//
|
||||
//digraph G {
|
||||
// rankdir=LR;
|
||||
// node [shape="none"];
|
||||
//
|
||||
// subgraph blocks {
|
||||
// rankdir=LR;
|
||||
// node [shape="box"];
|
||||
// a->b;
|
||||
// b->c;
|
||||
// c->e;
|
||||
// c->f;
|
||||
// f->g;
|
||||
// e->h;
|
||||
// }
|
||||
//
|
||||
// { rank=same; 1; a;}
|
||||
// { rank=same; 2; b;}
|
||||
// { rank=same; 3; c;}
|
||||
// { rank=same; 5; e;}
|
||||
// { rank=same; 6; f;}
|
||||
// { rank=same; 7; g;}
|
||||
// { rank=same; 8; h;}
|
||||
//
|
||||
// 1->2->3->4->5->6->7->8->9[arrowhead=none];
|
||||
//}
|
||||
func TestReceiveBlock_OnChainSplit(t *testing.T) {
|
||||
// The scenario to test is that we think that the canonical head is block H
|
||||
// and then we receive block G. We don't have block F, so we request it. Then
|
||||
// we process F, the G. The expected behavior is that we load the historical
|
||||
// state from slot 3 where the common ancestor block C is present.
|
||||
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
deposits, privKeys := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not tree hash state: %v", err)
|
||||
}
|
||||
parentHash, genesisBlock := setupGenesisBlock(t, chainService)
|
||||
beaconState.LatestBlock = genesisBlock
|
||||
if err := db.UpdateChainHead(ctx, genesisBlock, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveFinalizedState(beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genesisSlot := params.BeaconConfig().GenesisSlot
|
||||
|
||||
// Top chain slots (see graph)
|
||||
blockSlots := []uint64{1, 2, 3, 5, 8}
|
||||
for _, slot := range blockSlots {
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: genesisSlot + slot,
|
||||
StateRootHash32: stateRoot[:],
|
||||
ParentRootHash32: parentHash[:],
|
||||
RandaoReveal: createRandaoReveal(t, beaconState, privKeys),
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
initBlockStateRoot(t, block, chainService)
|
||||
computedState, err := chainService.ReceiveBlock(ctx, block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stateRoot, err = hashutil.HashProto(computedState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = db.SaveBlock(block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
computedState.LatestBlock = block
|
||||
if err = db.UpdateChainHead(ctx, block, computedState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
parentHash, err = hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Common ancestor is block at slot 3
|
||||
commonAncestor, err := db.CanonicalBlockBySlot(ctx, genesisSlot+3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
parentHash, err = hashutil.HashBeaconBlock(commonAncestor)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState, err = db.HistoricalStateFromSlot(ctx, commonAncestor.Slot, parentHash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stateRoot, err = hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Then we receive the block `f` from slot 6
|
||||
blockF := &pb.BeaconBlock{
|
||||
Slot: genesisSlot + 6,
|
||||
ParentRootHash32: parentHash[:],
|
||||
StateRootHash32: stateRoot[:],
|
||||
RandaoReveal: createRandaoReveal(t, beaconState, privKeys),
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
rootF, _ := hashutil.HashBeaconBlock(blockF)
|
||||
if err := db.SaveHistoricalState(ctx, beaconState, rootF); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
initBlockStateRoot(t, blockF, chainService)
|
||||
|
||||
computedState, err := chainService.ReceiveBlock(ctx, blockF)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateRoot, err = hashutil.HashProto(computedState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.SaveBlock(blockF); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
parentHash, err = hashutil.HashBeaconBlock(blockF)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Then we apply block `g` from slot 7
|
||||
blockG := &pb.BeaconBlock{
|
||||
Slot: genesisSlot + 7,
|
||||
ParentRootHash32: parentHash[:],
|
||||
StateRootHash32: stateRoot[:],
|
||||
RandaoReveal: createRandaoReveal(t, computedState, privKeys),
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
initBlockStateRoot(t, blockG, chainService)
|
||||
|
||||
computedState, err = chainService.ReceiveBlock(ctx, blockG)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if computedState.Slot != blockG.Slot {
|
||||
t.Errorf("Unexpect state slot %d, wanted %d", computedState.Slot, blockG.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsBlockReadyForProcessing_ValidBlock(t *testing.T) {
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
unixTime := uint64(time.Now().Unix())
|
||||
deposits, privKeys := setupInitialDeposits(t, 100)
|
||||
if err := db.InitializeState(context.Background(), unixTime, deposits, &pb.Eth1Data{}); err != nil {
|
||||
t.Fatalf("Could not initialize beacon state to disk: %v", err)
|
||||
}
|
||||
beaconState, err := db.HeadState(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't get genesis state: %v", err)
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
ParentRootHash32: []byte{'a'},
|
||||
}
|
||||
|
||||
if err := chainService.VerifyBlockValidity(ctx, block, beaconState); err == nil {
|
||||
t.Fatal("block processing succeeded despite block having no parent saved")
|
||||
}
|
||||
|
||||
beaconState.Slot = params.BeaconConfig().GenesisSlot + 10
|
||||
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not tree hash state: %v", err)
|
||||
}
|
||||
genesis := b.NewGenesisBlock([]byte{})
|
||||
if err := chainService.beaconDB.SaveBlock(genesis); err != nil {
|
||||
t.Fatalf("cannot save block: %v", err)
|
||||
}
|
||||
parentRoot, err := hashutil.HashBeaconBlock(genesis)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get root of canonical head: %v", err)
|
||||
}
|
||||
|
||||
beaconState.LatestEth1Data = &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{2},
|
||||
BlockHash32: []byte{3},
|
||||
}
|
||||
beaconState.Slot = params.BeaconConfig().GenesisSlot
|
||||
|
||||
currentSlot := params.BeaconConfig().GenesisSlot + 1
|
||||
attestationSlot := params.BeaconConfig().GenesisSlot
|
||||
|
||||
randaoReveal := createRandaoReveal(t, beaconState, privKeys)
|
||||
block2 := &pb.BeaconBlock{
|
||||
Slot: currentSlot,
|
||||
StateRootHash32: stateRoot[:],
|
||||
ParentRootHash32: parentRoot[:],
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte("a"),
|
||||
BlockHash32: []byte("b"),
|
||||
},
|
||||
Body: &pb.BeaconBlockBody{
|
||||
Attestations: []*pb.Attestation{{
|
||||
AggregationBitfield: []byte{128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
Data: &pb.AttestationData{
|
||||
Slot: attestationSlot,
|
||||
JustifiedBlockRootHash32: parentRoot[:],
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
if err := chainService.VerifyBlockValidity(ctx, block2, beaconState); err != nil {
|
||||
t.Fatalf("block processing failed despite being a valid block: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteValidatorIdx_DeleteWorks(t *testing.T) {
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
epoch := uint64(2)
|
||||
v.InsertActivatedIndices(epoch+1, []uint64{0, 1, 2})
|
||||
v.InsertExitedVal(epoch+1, []uint64{0, 2})
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 3; i++ {
|
||||
pubKeyBuf := make([]byte, params.BeaconConfig().BLSPubkeyLength)
|
||||
binary.PutUvarint(pubKeyBuf, uint64(i))
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: pubKeyBuf,
|
||||
})
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: validators,
|
||||
Slot: epoch * params.BeaconConfig().SlotsPerEpoch,
|
||||
}
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
if err := chainService.saveValidatorIdx(state); err != nil {
|
||||
t.Fatalf("Could not save validator idx: %v", err)
|
||||
}
|
||||
if err := chainService.deleteValidatorIdx(state); err != nil {
|
||||
t.Fatalf("Could not delete validator idx: %v", err)
|
||||
}
|
||||
wantedIdx := uint64(1)
|
||||
idx, err := chainService.beaconDB.ValidatorIndex(validators[wantedIdx].Pubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not get validator index: %v", err)
|
||||
}
|
||||
if wantedIdx != idx {
|
||||
t.Errorf("Wanted: %d, got: %d", wantedIdx, idx)
|
||||
}
|
||||
|
||||
wantedIdx = uint64(2)
|
||||
if chainService.beaconDB.HasValidator(validators[wantedIdx].Pubkey) {
|
||||
t.Errorf("Validator index %d should have been deleted", wantedIdx)
|
||||
}
|
||||
if v.ExitedValFromEpoch(epoch) != nil {
|
||||
t.Errorf("Activated validators mapping for epoch %d still there", epoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveValidatorIdx_SaveRetrieveWorks(t *testing.T) {
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
epoch := uint64(1)
|
||||
v.InsertActivatedIndices(epoch+1, []uint64{0, 1, 2})
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 3; i++ {
|
||||
pubKeyBuf := make([]byte, params.BeaconConfig().BLSPubkeyLength)
|
||||
binary.PutUvarint(pubKeyBuf, uint64(i))
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: pubKeyBuf,
|
||||
})
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: validators,
|
||||
Slot: epoch * params.BeaconConfig().SlotsPerEpoch,
|
||||
}
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
if err := chainService.saveValidatorIdx(state); err != nil {
|
||||
t.Fatalf("Could not save validator idx: %v", err)
|
||||
}
|
||||
|
||||
wantedIdx := uint64(2)
|
||||
idx, err := chainService.beaconDB.ValidatorIndex(validators[wantedIdx].Pubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not get validator index: %v", err)
|
||||
}
|
||||
if wantedIdx != idx {
|
||||
t.Errorf("Wanted: %d, got: %d", wantedIdx, idx)
|
||||
}
|
||||
|
||||
if v.ActivatedValFromEpoch(epoch) != nil {
|
||||
t.Errorf("Activated validators mapping for epoch %d still there", epoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveValidatorIdx_IdxNotInState(t *testing.T) {
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
epoch := uint64(100)
|
||||
|
||||
// Tried to insert 5 active indices to DB with only 3 validators in state.
|
||||
v.InsertActivatedIndices(epoch+1, []uint64{0, 1, 2, 3, 4})
|
||||
var validators []*pb.Validator
|
||||
for i := 0; i < 3; i++ {
|
||||
pubKeyBuf := make([]byte, params.BeaconConfig().BLSPubkeyLength)
|
||||
binary.PutUvarint(pubKeyBuf, uint64(i))
|
||||
validators = append(validators, &pb.Validator{
|
||||
Pubkey: pubKeyBuf,
|
||||
})
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: validators,
|
||||
Slot: epoch * params.BeaconConfig().SlotsPerEpoch,
|
||||
}
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
if err := chainService.saveValidatorIdx(state); err != nil {
|
||||
t.Fatalf("Could not save validator idx: %v", err)
|
||||
}
|
||||
|
||||
wantedIdx := uint64(2)
|
||||
idx, err := chainService.beaconDB.ValidatorIndex(validators[wantedIdx].Pubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not get validator index: %v", err)
|
||||
}
|
||||
if wantedIdx != idx {
|
||||
t.Errorf("Wanted: %d, got: %d", wantedIdx, idx)
|
||||
}
|
||||
|
||||
if v.ActivatedValFromEpoch(epoch) != nil {
|
||||
t.Errorf("Activated validators mapping for epoch %d still there", epoch)
|
||||
}
|
||||
|
||||
// Verify the skipped validators are included in the next epoch.
|
||||
if !reflect.DeepEqual(v.ActivatedValFromEpoch(epoch+2), []uint64{3, 4}) {
|
||||
t.Error("Did not get wanted validator from activation queue")
|
||||
}
|
||||
}
|
||||
200
beacon-chain/blockchain/chain_info.go
Normal file
200
beacon-chain/blockchain/chain_info.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// ChainInfoFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves chain info related data.
|
||||
type ChainInfoFetcher interface {
|
||||
HeadFetcher
|
||||
CanonicalRootFetcher
|
||||
FinalizationFetcher
|
||||
}
|
||||
|
||||
// GenesisTimeFetcher retrieves the Eth2 genesis timestamp.
|
||||
type GenesisTimeFetcher interface {
|
||||
GenesisTime() time.Time
|
||||
}
|
||||
|
||||
// HeadFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves head related data.
|
||||
type HeadFetcher interface {
|
||||
HeadSlot() uint64
|
||||
HeadRoot() []byte
|
||||
HeadBlock() *ethpb.SignedBeaconBlock
|
||||
HeadState(ctx context.Context) (*pb.BeaconState, error)
|
||||
HeadValidatorsIndices(epoch uint64) ([]uint64, error)
|
||||
HeadSeed(epoch uint64) ([32]byte, error)
|
||||
}
|
||||
|
||||
// CanonicalRootFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves canonical roots related data.
|
||||
type CanonicalRootFetcher interface {
|
||||
CanonicalRoot(slot uint64) []byte
|
||||
}
|
||||
|
||||
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
|
||||
type ForkFetcher interface {
|
||||
CurrentFork() *pb.Fork
|
||||
}
|
||||
|
||||
// FinalizationFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves finalization and justification related data.
|
||||
type FinalizationFetcher interface {
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckpt() *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckpt() *ethpb.Checkpoint
|
||||
}
|
||||
|
||||
// ParticipationFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves validator participation related data.
|
||||
type ParticipationFetcher interface {
|
||||
Participation(epoch uint64) *precompute.Balance
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from head state.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
if s.headState == nil || s.headState.FinalizedCheckpoint == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
// If head state exists but there hasn't been a finalized check point,
|
||||
// the check point's root should refer to genesis block root.
|
||||
if bytes.Equal(s.headState.FinalizedCheckpoint.Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
return ðpb.Checkpoint{Root: s.genesisRoot[:]}
|
||||
}
|
||||
|
||||
return s.headState.FinalizedCheckpoint
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from head state.
|
||||
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
if s.headState == nil || s.headState.CurrentJustifiedCheckpoint == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
// If head state exists but there hasn't been a justified check point,
|
||||
// the check point root should refer to genesis block root.
|
||||
if bytes.Equal(s.headState.CurrentJustifiedCheckpoint.Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
return ðpb.Checkpoint{Root: s.genesisRoot[:]}
|
||||
}
|
||||
|
||||
return s.headState.CurrentJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt returns the previous justified checkpoint from head state.
|
||||
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
if s.headState == nil || s.headState.PreviousJustifiedCheckpoint == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
// If head state exists but there hasn't been a justified check point,
|
||||
// the check point root should refer to genesis block root.
|
||||
if bytes.Equal(s.headState.PreviousJustifiedCheckpoint.Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
return ðpb.Checkpoint{Root: s.genesisRoot[:]}
|
||||
}
|
||||
|
||||
return s.headState.PreviousJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// HeadSlot returns the slot of the head of the chain.
|
||||
func (s *Service) HeadSlot() uint64 {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.headSlot
|
||||
}
|
||||
|
||||
// HeadRoot returns the root of the head of the chain.
|
||||
func (s *Service) HeadRoot() []byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
root := s.canonicalRoots[s.headSlot]
|
||||
if len(root) != 0 {
|
||||
return root
|
||||
}
|
||||
|
||||
return params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
|
||||
// HeadBlock returns the head block of the chain.
|
||||
func (s *Service) HeadBlock() *ethpb.SignedBeaconBlock {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return proto.Clone(s.headBlock).(*ethpb.SignedBeaconBlock)
|
||||
}
|
||||
|
||||
// HeadState returns the head state of the chain.
|
||||
// If the head state is nil from service struct,
|
||||
// it will attempt to get from DB and error if nil again.
|
||||
func (s *Service) HeadState(ctx context.Context) (*pb.BeaconState, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if s.headState == nil {
|
||||
return s.beaconDB.HeadState(ctx)
|
||||
}
|
||||
|
||||
return proto.Clone(s.headState).(*pb.BeaconState), nil
|
||||
}
|
||||
|
||||
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
|
||||
func (s *Service) HeadValidatorsIndices(epoch uint64) ([]uint64, error) {
|
||||
if s.headState == nil {
|
||||
return []uint64{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(s.headState, epoch)
|
||||
}
|
||||
|
||||
// HeadSeed returns the seed from the head view of a given epoch.
|
||||
func (s *Service) HeadSeed(epoch uint64) ([32]byte, error) {
|
||||
if s.headState == nil {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
|
||||
return helpers.Seed(s.headState, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
// CanonicalRoot returns the canonical root of a given slot.
|
||||
func (s *Service) CanonicalRoot(slot uint64) []byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.canonicalRoots[slot]
|
||||
}
|
||||
|
||||
// GenesisTime returns the genesis time of beacon chain.
|
||||
func (s *Service) GenesisTime() time.Time {
|
||||
return s.genesisTime
|
||||
}
|
||||
|
||||
// CurrentFork retrieves the latest fork information of the beacon chain.
|
||||
func (s *Service) CurrentFork() *pb.Fork {
|
||||
if s.headState == nil {
|
||||
return &pb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
}
|
||||
return proto.Clone(s.headState.Fork).(*pb.Fork)
|
||||
}
|
||||
|
||||
// Participation returns the participation stats of a given epoch.
|
||||
func (s *Service) Participation(epoch uint64) *precompute.Balance {
|
||||
s.epochParticipationLock.RLock()
|
||||
defer s.epochParticipationLock.RUnlock()
|
||||
|
||||
return s.epochParticipation[epoch]
|
||||
}
|
||||
77
beacon-chain/blockchain/chain_info_norace_test.go
Normal file
77
beacon-chain/blockchain/chain_info_norace_test.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
)
|
||||
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 777}},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.HeadSlot()
|
||||
}
|
||||
|
||||
func TestHeadRoot_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 777}},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.HeadRoot()
|
||||
}
|
||||
|
||||
func TestHeadBlock_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 777}},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.HeadBlock()
|
||||
}
|
||||
|
||||
func TestHeadState_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 777}},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.HeadState(context.Background())
|
||||
}
|
||||
184
beacon-chain/blockchain/chain_info_test.go
Normal file
184
beacon-chain/blockchain/chain_info_test.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
// Ensure Service implements chain info interface.
|
||||
var _ = ChainInfoFetcher(&Service{})
|
||||
var _ = GenesisTimeFetcher(&Service{})
|
||||
var _ = ForkFetcher(&Service{})
|
||||
|
||||
func TestFinalizedCheckpt_Nil(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState, _ = testutil.DeterministicGenesisState(t, 1)
|
||||
if !bytes.Equal(c.FinalizedCheckpt().Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
t.Error("Incorrect pre chain start value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
c := setupBeaconChain(t, db)
|
||||
if !bytes.Equal(c.HeadRoot(), params.BeaconConfig().ZeroHash[:]) {
|
||||
t.Error("Incorrect pre chain start value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 5}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState = &pb.BeaconState{FinalizedCheckpoint: cp}
|
||||
|
||||
if c.FinalizedCheckpt().Epoch != cp.Epoch {
|
||||
t.Errorf("Finalized epoch at genesis should be %d, got: %d", cp.Epoch, c.FinalizedCheckpt().Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState = &pb.BeaconState{FinalizedCheckpoint: cp}
|
||||
c.genesisRoot = [32]byte{'A'}
|
||||
|
||||
if !bytes.Equal(c.FinalizedCheckpt().Root, c.genesisRoot[:]) {
|
||||
t.Errorf("Got: %v, wanted: %v", c.FinalizedCheckpt().Root, c.genesisRoot[:])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 6}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState = &pb.BeaconState{CurrentJustifiedCheckpoint: cp}
|
||||
|
||||
if c.CurrentJustifiedCheckpt().Epoch != cp.Epoch {
|
||||
t.Errorf("Current Justifiied epoch at genesis should be %d, got: %d", cp.Epoch, c.CurrentJustifiedCheckpt().Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState = &pb.BeaconState{CurrentJustifiedCheckpoint: cp}
|
||||
c.genesisRoot = [32]byte{'B'}
|
||||
|
||||
if !bytes.Equal(c.CurrentJustifiedCheckpt().Root, c.genesisRoot[:]) {
|
||||
t.Errorf("Got: %v, wanted: %v", c.CurrentJustifiedCheckpt().Root, c.genesisRoot[:])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 7}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState = &pb.BeaconState{PreviousJustifiedCheckpoint: cp}
|
||||
|
||||
if c.PreviousJustifiedCheckpt().Epoch != cp.Epoch {
|
||||
t.Errorf("Previous Justifiied epoch at genesis should be %d, got: %d", cp.Epoch, c.PreviousJustifiedCheckpt().Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.headState = &pb.BeaconState{PreviousJustifiedCheckpoint: cp}
|
||||
c.genesisRoot = [32]byte{'C'}
|
||||
|
||||
if !bytes.Equal(c.PreviousJustifiedCheckpt().Root, c.genesisRoot[:]) {
|
||||
t.Errorf("Got: %v, wanted: %v", c.PreviousJustifiedCheckpt().Root, c.genesisRoot[:])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
c.headSlot = 100
|
||||
if c.HeadSlot() != 100 {
|
||||
t.Errorf("Wanted head slot: %d, got: %d", 100, c.HeadSlot())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{canonicalRoots: make(map[uint64][]byte)}
|
||||
c.headSlot = 100
|
||||
c.canonicalRoots[c.headSlot] = []byte{'A'}
|
||||
if !bytes.Equal([]byte{'A'}, c.HeadRoot()) {
|
||||
t.Errorf("Wanted head root: %v, got: %d", []byte{'A'}, c.HeadRoot())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}}
|
||||
c := &Service{headBlock: b}
|
||||
if !reflect.DeepEqual(b, c.HeadBlock()) {
|
||||
t.Error("incorrect head block received")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
s := &pb.BeaconState{Slot: 2}
|
||||
c := &Service{headState: s}
|
||||
headState, err := c.HeadState(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(s, headState) {
|
||||
t.Error("incorrect head state received")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenesisTime_CanRetrieve(t *testing.T) {
|
||||
c := &Service{genesisTime: time.Unix(999, 0)}
|
||||
wanted := time.Unix(999, 0)
|
||||
if c.GenesisTime() != wanted {
|
||||
t.Error("Did not get wanted genesis time")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||
f := &pb.Fork{Epoch: 999}
|
||||
s := &pb.BeaconState{Fork: f}
|
||||
c := &Service{headState: s}
|
||||
if !reflect.DeepEqual(c.CurrentFork(), f) {
|
||||
t.Error("Recieved incorrect fork version")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanonicalRoot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{canonicalRoots: make(map[uint64][]byte)}
|
||||
slot := uint64(123)
|
||||
r := []byte{'B'}
|
||||
c.canonicalRoots[slot] = r
|
||||
if !bytes.Equal(r, c.CanonicalRoot(slot)) {
|
||||
t.Errorf("Wanted head root: %v, got: %d", []byte{'A'}, c.CanonicalRoot(slot))
|
||||
}
|
||||
}
|
||||
@@ -1,498 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
reorgCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "reorg_counter",
|
||||
Help: "The number of chain reorganization events that have happened in the fork choice rule",
|
||||
})
|
||||
)
|
||||
var blkAncestorCache = cache.NewBlockAncestorCache()
|
||||
|
||||
// ForkChoice interface defines the methods for applying fork choice rule
|
||||
// operations to the blockchain.
|
||||
type ForkChoice interface {
|
||||
ApplyForkChoiceRule(ctx context.Context, block *pb.BeaconBlock, computedState *pb.BeaconState) error
|
||||
}
|
||||
|
||||
// TargetsFetcher defines a struct which can retrieve latest attestation targets
|
||||
// from a given justified state.
|
||||
type TargetsFetcher interface {
|
||||
AttestationTargets(justifiedState *pb.BeaconState) (map[uint64]*pb.AttestationTarget, error)
|
||||
}
|
||||
|
||||
// updateFFGCheckPts checks whether the existing FFG check points saved in DB
|
||||
// are not older than the ones just processed in state. If it's older, we update
|
||||
// the db with the latest FFG check points, both justification and finalization.
|
||||
func (c *ChainService) updateFFGCheckPts(ctx context.Context, state *pb.BeaconState) error {
|
||||
lastJustifiedSlot := helpers.StartSlot(state.JustifiedEpoch)
|
||||
savedJustifiedBlock, err := c.beaconDB.JustifiedBlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the last processed justification slot in state is greater than
|
||||
// the slot of justified block saved in DB.
|
||||
if lastJustifiedSlot > savedJustifiedBlock.Slot {
|
||||
// Retrieve the new justified block from DB using the new justified slot and save it.
|
||||
newJustifiedBlock, err := c.beaconDB.CanonicalBlockBySlot(ctx, lastJustifiedSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the new justified slot is a skip slot in db then we keep getting it's ancestors
|
||||
// until we can get a block.
|
||||
lastAvailBlkSlot := lastJustifiedSlot
|
||||
for newJustifiedBlock == nil {
|
||||
log.WithField("slot", lastAvailBlkSlot-params.BeaconConfig().GenesisSlot).Debug("Missing block in DB, looking one slot back")
|
||||
lastAvailBlkSlot--
|
||||
newJustifiedBlock, err = c.beaconDB.CanonicalBlockBySlot(ctx, lastAvailBlkSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newJustifiedRoot, err := hashutil.HashBeaconBlock(newJustifiedBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Fetch justified state from historical states db.
|
||||
newJustifiedState, err := c.beaconDB.HistoricalStateFromSlot(ctx, newJustifiedBlock.Slot, newJustifiedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.beaconDB.SaveJustifiedBlock(newJustifiedBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.beaconDB.SaveJustifiedState(newJustifiedState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
lastFinalizedSlot := helpers.StartSlot(state.FinalizedEpoch)
|
||||
savedFinalizedBlock, err := c.beaconDB.FinalizedBlock()
|
||||
// If the last processed finalized slot in state is greater than
|
||||
// the slot of finalized block saved in DB.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if lastFinalizedSlot > savedFinalizedBlock.Slot {
|
||||
// Retrieve the new finalized block from DB using the new finalized slot and save it.
|
||||
newFinalizedBlock, err := c.beaconDB.CanonicalBlockBySlot(ctx, lastFinalizedSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the new finalized slot is a skip slot in db then we keep getting it's ancestors
|
||||
// until we can get a block.
|
||||
lastAvailBlkSlot := lastFinalizedSlot
|
||||
for newFinalizedBlock == nil {
|
||||
log.WithField("slot", lastAvailBlkSlot-params.BeaconConfig().GenesisSlot).Debug("Missing block in DB, looking one slot back")
|
||||
lastAvailBlkSlot--
|
||||
newFinalizedBlock, err = c.beaconDB.CanonicalBlockBySlot(ctx, lastAvailBlkSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newFinalizedRoot, err := hashutil.HashBeaconBlock(newFinalizedBlock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Generate the new finalized state with using new finalized block and
|
||||
// save it.
|
||||
newFinalizedState, err := c.beaconDB.HistoricalStateFromSlot(ctx, lastFinalizedSlot, newFinalizedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.beaconDB.SaveFinalizedBlock(newFinalizedBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.beaconDB.SaveFinalizedState(newFinalizedState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyForkChoiceRule determines the current beacon chain head using LMD
|
||||
// GHOST as a block-vote weighted function to select a canonical head in
|
||||
// Ethereum Serenity. The inputs are the the recently processed block and its
|
||||
// associated state.
|
||||
func (c *ChainService) ApplyForkChoiceRule(
|
||||
ctx context.Context,
|
||||
block *pb.BeaconBlock,
|
||||
postState *pb.BeaconState,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ApplyForkChoiceRule")
|
||||
defer span.End()
|
||||
log.Info("Applying LMD-GHOST Fork Choice Rule")
|
||||
|
||||
justifiedState, err := c.beaconDB.JustifiedState()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve justified state: %v", err)
|
||||
}
|
||||
attestationTargets, err := c.AttestationTargets(justifiedState)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve attestation target: %v", err)
|
||||
}
|
||||
justifiedHead, err := c.beaconDB.JustifiedBlock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve justified head: %v", err)
|
||||
}
|
||||
|
||||
newHead, err := c.lmdGhost(ctx, justifiedHead, justifiedState, attestationTargets)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not run fork choice: %v", err)
|
||||
}
|
||||
newHeadRoot, err := hashutil.HashBeaconBlock(newHead)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not hash new head block: %v", err)
|
||||
}
|
||||
c.canonicalBlocksLock.Lock()
|
||||
defer c.canonicalBlocksLock.Unlock()
|
||||
c.canonicalBlocks[newHead.Slot] = newHeadRoot[:]
|
||||
|
||||
currentHead, err := c.beaconDB.ChainHead()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not retrieve chain head: %v", err)
|
||||
}
|
||||
currentHeadRoot, err := hashutil.HashBeaconBlock(currentHead)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not hash current head block: %v", err)
|
||||
}
|
||||
|
||||
isDescendant, err := c.isDescendant(currentHead, newHead)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not check if block is descendant: %v", err)
|
||||
}
|
||||
|
||||
newState := postState
|
||||
if !isDescendant && !proto.Equal(currentHead, newHead) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentSlot": currentHead.Slot - params.BeaconConfig().GenesisSlot,
|
||||
"currentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(currentHeadRoot[:])),
|
||||
"newSlot": newHead.Slot - params.BeaconConfig().GenesisSlot,
|
||||
"newRoot": fmt.Sprintf("%#x", bytesutil.Trunc(newHeadRoot[:])),
|
||||
}).Warn("Reorg happened")
|
||||
// Only regenerate head state if there was a reorg.
|
||||
newState, err = c.beaconDB.HistoricalStateFromSlot(ctx, newHead.Slot, newHeadRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not gen state: %v", err)
|
||||
}
|
||||
|
||||
for revertedSlot := currentHead.Slot; revertedSlot > newHead.Slot; revertedSlot-- {
|
||||
delete(c.canonicalBlocks, revertedSlot)
|
||||
}
|
||||
reorgCount.Inc()
|
||||
}
|
||||
|
||||
if proto.Equal(currentHead, newHead) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"currentSlot": currentHead.Slot - params.BeaconConfig().GenesisSlot,
|
||||
"currentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(currentHeadRoot[:])),
|
||||
}).Warn("Head did not change after fork choice, current head has the most votes")
|
||||
}
|
||||
|
||||
// If we receive forked blocks.
|
||||
if newHead.Slot != newState.Slot {
|
||||
newState, err = c.beaconDB.HistoricalStateFromSlot(ctx, newHead.Slot, newHeadRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not gen state: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.beaconDB.UpdateChainHead(ctx, newHead, newState); err != nil {
|
||||
return fmt.Errorf("failed to update chain: %v", err)
|
||||
}
|
||||
h, err := hashutil.HashBeaconBlock(newHead)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not hash head: %v", err)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"headRoot": fmt.Sprintf("%#x", bytesutil.Trunc(h[:])),
|
||||
"headSlot": newHead.Slot - params.BeaconConfig().GenesisSlot,
|
||||
"stateSlot": newState.Slot - params.BeaconConfig().GenesisSlot,
|
||||
}).Info("Chain head block and state updated")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// lmdGhost applies the Latest Message Driven, Greediest Heaviest Observed Sub-Tree
|
||||
// fork-choice rule defined in the Ethereum Serenity specification for the beacon chain.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) -> BeaconBlock:
|
||||
// """
|
||||
// Execute the LMD-GHOST algorithm to find the head ``BeaconBlock``.
|
||||
// """
|
||||
// validators = start_state.validator_registry
|
||||
// active_validator_indices = get_active_validator_indices(validators, slot_to_epoch(start_state.slot))
|
||||
// attestation_targets = [
|
||||
// (validator_index, get_latest_attestation_target(store, validator_index))
|
||||
// for validator_index in active_validator_indices
|
||||
// ]
|
||||
//
|
||||
// def get_vote_count(block: BeaconBlock) -> int:
|
||||
// return sum(
|
||||
// get_effective_balance(start_state.validator_balances[validator_index]) // FORK_CHOICE_BALANCE_INCREMENT
|
||||
// for validator_index, target in attestation_targets
|
||||
// if get_ancestor(store, target, block.slot) == block
|
||||
// )
|
||||
//
|
||||
// head = start_block
|
||||
// while 1:
|
||||
// children = get_children(store, head)
|
||||
// if len(children) == 0:
|
||||
// return head
|
||||
// head = max(children, key=get_vote_count)
|
||||
func (c *ChainService) lmdGhost(
|
||||
ctx context.Context,
|
||||
startBlock *pb.BeaconBlock,
|
||||
startState *pb.BeaconState,
|
||||
voteTargets map[uint64]*pb.AttestationTarget,
|
||||
) (*pb.BeaconBlock, error) {
|
||||
highestSlot := c.beaconDB.HighestBlockSlot()
|
||||
head := startBlock
|
||||
for {
|
||||
children, err := c.BlockChildren(ctx, head, highestSlot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch block children: %v", err)
|
||||
}
|
||||
if len(children) == 0 {
|
||||
return head, nil
|
||||
}
|
||||
maxChild := children[0]
|
||||
|
||||
maxChildVotes, err := VoteCount(maxChild, startState, voteTargets, c.beaconDB)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to determine vote count for block: %v", err)
|
||||
}
|
||||
for i := 1; i < len(children); i++ {
|
||||
candidateChildVotes, err := VoteCount(children[i], startState, voteTargets, c.beaconDB)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to determine vote count for block: %v", err)
|
||||
}
|
||||
maxChildRoot, err := hashutil.HashBeaconBlock(maxChild)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
candidateChildRoot, err := hashutil.HashBeaconBlock(children[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if candidateChildVotes > maxChildVotes ||
|
||||
(candidateChildVotes == maxChildVotes && bytesutil.LowerThan(maxChildRoot[:], candidateChildRoot[:])) {
|
||||
maxChild = children[i]
|
||||
}
|
||||
}
|
||||
head = maxChild
|
||||
}
|
||||
}
|
||||
|
||||
// BlockChildren returns the child blocks of the given block up to a given
|
||||
// highest slot.
|
||||
//
|
||||
// ex:
|
||||
// /- C - E
|
||||
// A - B - D - F
|
||||
// \- G
|
||||
// Input: B. Output: [C, D, G]
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// get_children(store: Store, block: BeaconBlock) -> List[BeaconBlock]
|
||||
// returns the child blocks of the given block.
|
||||
func (c *ChainService) BlockChildren(ctx context.Context, block *pb.BeaconBlock, highestSlot uint64) ([]*pb.BeaconBlock, error) {
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var children []*pb.BeaconBlock
|
||||
startSlot := block.Slot + 1
|
||||
for i := startSlot; i <= highestSlot; i++ {
|
||||
kids, err := c.beaconDB.BlocksBySlot(ctx, i)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get block by slot: %v", err)
|
||||
}
|
||||
children = append(children, kids...)
|
||||
}
|
||||
|
||||
filteredChildren := []*pb.BeaconBlock{}
|
||||
for _, kid := range children {
|
||||
parentRoot := bytesutil.ToBytes32(kid.ParentRootHash32)
|
||||
if blockRoot == parentRoot {
|
||||
filteredChildren = append(filteredChildren, kid)
|
||||
}
|
||||
}
|
||||
return filteredChildren, nil
|
||||
}
|
||||
|
||||
// isDescendant checks if the new head block is a descendant block of the current head.
|
||||
func (c *ChainService) isDescendant(currentHead *pb.BeaconBlock, newHead *pb.BeaconBlock) (bool, error) {
|
||||
currentHeadRoot, err := hashutil.HashBeaconBlock(currentHead)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
for newHead.Slot > currentHead.Slot {
|
||||
if bytesutil.ToBytes32(newHead.ParentRootHash32) == currentHeadRoot {
|
||||
return true, nil
|
||||
}
|
||||
newHead, err = c.beaconDB.Block(bytesutil.ToBytes32(newHead.ParentRootHash32))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newHead == nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// AttestationTargets retrieves the list of attestation targets since last finalized epoch,
|
||||
// each attestation target consists of validator index and its attestation target (i.e. the block
|
||||
// which the validator attested to)
|
||||
func (c *ChainService) AttestationTargets(state *pb.BeaconState) (map[uint64]*pb.AttestationTarget, error) {
|
||||
indices := helpers.ActiveValidatorIndices(state.ValidatorRegistry, helpers.CurrentEpoch(state))
|
||||
attestationTargets := make(map[uint64]*pb.AttestationTarget)
|
||||
for i, index := range indices {
|
||||
target, err := c.attsService.LatestAttestationTarget(state, index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not retrieve attestation target: %v", err)
|
||||
}
|
||||
if target == nil {
|
||||
continue
|
||||
}
|
||||
attestationTargets[uint64(i)] = target
|
||||
}
|
||||
return attestationTargets, nil
|
||||
}
|
||||
|
||||
// VoteCount determines the number of votes on a beacon block by counting the number
|
||||
// of target blocks that have such beacon block as a common ancestor.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_vote_count(block: BeaconBlock) -> int:
|
||||
// return sum(
|
||||
// get_effective_balance(start_state.validator_balances[validator_index]) // FORK_CHOICE_BALANCE_INCREMENT
|
||||
// for validator_index, target in attestation_targets
|
||||
// if get_ancestor(store, target, block.slot) == block
|
||||
// )
|
||||
func VoteCount(block *pb.BeaconBlock, state *pb.BeaconState, targets map[uint64]*pb.AttestationTarget, beaconDB *db.BeaconDB) (int, error) {
|
||||
balances := 0
|
||||
var ancestorRoot []byte
|
||||
var err error
|
||||
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for validatorIndex, target := range targets {
|
||||
ancestorRoot, err = cachedAncestor(target, block.Slot, beaconDB)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// This covers the following case, we start at B5, and want to process B6 and B7
|
||||
// B6 can be processed, B7 can not be processed because it's pointed to the
|
||||
// block older than current block 5.
|
||||
// B4 - B5 - B6
|
||||
// \ - - - - - B7
|
||||
if ancestorRoot == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if bytes.Equal(blockRoot[:], ancestorRoot) {
|
||||
balances += int(helpers.EffectiveBalance(state, validatorIndex))
|
||||
}
|
||||
}
|
||||
return balances, nil
|
||||
}
|
||||
|
||||
// BlockAncestor obtains the ancestor at of a block at a certain slot.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock:
|
||||
// """
|
||||
// Get the ancestor of ``block`` with slot number ``slot``; return ``None`` if not found.
|
||||
// """
|
||||
// if block.slot == slot:
|
||||
// return block
|
||||
// elif block.slot < slot:
|
||||
// return None
|
||||
// else:
|
||||
// return get_ancestor(store, store.get_parent(block), slot)
|
||||
func BlockAncestor(targetBlock *pb.AttestationTarget, slot uint64, beaconDB *db.BeaconDB) ([]byte, error) {
|
||||
if targetBlock.Slot == slot {
|
||||
return targetBlock.BlockRoot[:], nil
|
||||
}
|
||||
if targetBlock.Slot < slot {
|
||||
return nil, nil
|
||||
}
|
||||
parentRoot := bytesutil.ToBytes32(targetBlock.ParentRoot)
|
||||
parent, err := beaconDB.Block(parentRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get parent block: %v", err)
|
||||
}
|
||||
if parent == nil {
|
||||
return nil, fmt.Errorf("parent block does not exist: %v", err)
|
||||
}
|
||||
newTarget := &pb.AttestationTarget{
|
||||
Slot: parent.Slot,
|
||||
BlockRoot: parentRoot[:],
|
||||
ParentRoot: parent.ParentRootHash32,
|
||||
}
|
||||
return BlockAncestor(newTarget, slot, beaconDB)
|
||||
}
|
||||
|
||||
// cachedAncestor retrieves the cached ancestor target from block ancestor cache,
|
||||
// if it's not there it looks up the block tree get it and cache it.
|
||||
func cachedAncestor(target *pb.AttestationTarget, height uint64, beaconDB *db.BeaconDB) ([]byte, error) {
|
||||
// check if the ancestor block of from a given block height was cached.
|
||||
cachedAncestorInfo, err := blkAncestorCache.AncestorBySlot(target.BlockRoot, height)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
if cachedAncestorInfo != nil {
|
||||
return cachedAncestorInfo.Target.BlockRoot, nil
|
||||
}
|
||||
|
||||
ancestorRoot, err := BlockAncestor(target, height, beaconDB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ancestor, err := beaconDB.Block(bytesutil.ToBytes32(ancestorRoot))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ancestor == nil {
|
||||
return nil, nil
|
||||
}
|
||||
ancestorTarget := &pb.AttestationTarget{
|
||||
Slot: ancestor.Slot,
|
||||
BlockRoot: ancestorRoot,
|
||||
ParentRoot: ancestor.ParentRootHash32,
|
||||
}
|
||||
if err := blkAncestorCache.AddBlockAncestor(&cache.AncestorInfo{
|
||||
Height: height,
|
||||
Hash: target.BlockRoot,
|
||||
Target: ancestorTarget,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ancestorRoot, nil
|
||||
}
|
||||
@@ -1,221 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/internal"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
type mockAttestationHandler struct {
|
||||
targets map[uint64]*pb.AttestationTarget
|
||||
}
|
||||
|
||||
func (m *mockAttestationHandler) LatestAttestationTarget(beaconState *pb.BeaconState, idx uint64) (*pb.AttestationTarget, error) {
|
||||
return m.targets[idx], nil
|
||||
}
|
||||
|
||||
func (m *mockAttestationHandler) BatchUpdateLatestAttestation(ctx context.Context, atts []*pb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestApplyForkChoice_ChainSplitReorg(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
beaconDB := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, beaconDB)
|
||||
|
||||
ctx := context.Background()
|
||||
deposits, _ := setupInitialDeposits(t, 100)
|
||||
eth1Data := &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{},
|
||||
BlockHash32: []byte{},
|
||||
}
|
||||
justifiedState, err := state.GenesisBeaconState(deposits, 0, eth1Data)
|
||||
if err != nil {
|
||||
t.Fatalf("Can't generate genesis state: %v", err)
|
||||
}
|
||||
|
||||
chainService := setupBeaconChain(t, beaconDB, nil)
|
||||
|
||||
// Construct a forked chain that looks as follows:
|
||||
// /------B1 ----B3 ----- B5 (current head)
|
||||
// B0 --B2 -------------B4
|
||||
blocks, roots := constructForkedChain(t, justifiedState)
|
||||
|
||||
// We then setup a canonical chain of the following blocks:
|
||||
// B0->B1->B3->B5.
|
||||
if err := chainService.beaconDB.SaveBlock(blocks[0]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
justifiedState.LatestBlock = blocks[0]
|
||||
if err := chainService.beaconDB.SaveJustifiedState(justifiedState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveJustifiedBlock(blocks[0]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, blocks[0], justifiedState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
canonicalBlockIndices := []int{1, 3, 5}
|
||||
postState := proto.Clone(justifiedState).(*pb.BeaconState)
|
||||
for _, canonicalIndex := range canonicalBlockIndices {
|
||||
postState, err = chainService.ApplyBlockStateTransition(ctx, blocks[canonicalIndex], postState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveBlock(blocks[canonicalIndex]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.UpdateChainHead(ctx, blocks[canonicalIndex], postState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
chainHead, err := chainService.beaconDB.ChainHead()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if chainHead.Slot != justifiedState.Slot+5 {
|
||||
t.Errorf(
|
||||
"Expected chain head with slot %d, received %d",
|
||||
justifiedState.Slot+5-params.BeaconConfig().GenesisSlot,
|
||||
chainHead.Slot-params.BeaconConfig().GenesisSlot,
|
||||
)
|
||||
}
|
||||
|
||||
// We then save forked blocks and their historical states (but do not update chain head).
|
||||
// The fork is from B0->B2->B4.
|
||||
forkedBlockIndices := []int{2, 4}
|
||||
forkState := proto.Clone(justifiedState).(*pb.BeaconState)
|
||||
for _, forkIndex := range forkedBlockIndices {
|
||||
forkState, err = chainService.ApplyBlockStateTransition(ctx, blocks[forkIndex], forkState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveBlock(blocks[forkIndex]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveHistoricalState(ctx, forkState, roots[forkIndex]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Give the block from the forked chain, B4, the most votes.
|
||||
voteTargets := make(map[uint64]*pb.AttestationTarget)
|
||||
voteTargets[0] = &pb.AttestationTarget{
|
||||
Slot: blocks[5].Slot,
|
||||
BlockRoot: roots[5][:],
|
||||
ParentRoot: blocks[5].ParentRootHash32,
|
||||
}
|
||||
for i := 1; i < len(deposits); i++ {
|
||||
voteTargets[uint64(i)] = &pb.AttestationTarget{
|
||||
Slot: blocks[4].Slot,
|
||||
BlockRoot: roots[4][:],
|
||||
ParentRoot: blocks[4].ParentRootHash32,
|
||||
}
|
||||
}
|
||||
attHandler := &mockAttestationHandler{
|
||||
targets: voteTargets,
|
||||
}
|
||||
chainService.attsService = attHandler
|
||||
|
||||
block4State, err := chainService.beaconDB.HistoricalStateFromSlot(ctx, blocks[4].Slot, roots[4])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Applying the fork choice rule should reorg to B4 successfully.
|
||||
if err := chainService.ApplyForkChoiceRule(ctx, blocks[4], block4State); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newHead, err := chainService.beaconDB.ChainHead()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !proto.Equal(newHead, blocks[4]) {
|
||||
t.Errorf(
|
||||
"Expected chain head %v, received %v",
|
||||
blocks[4],
|
||||
newHead,
|
||||
)
|
||||
}
|
||||
want := "Reorg happened"
|
||||
testutil.AssertLogsContain(t, hook, want)
|
||||
}
|
||||
|
||||
func constructForkedChain(t *testing.T, beaconState *pb.BeaconState) ([]*pb.BeaconBlock, [][32]byte) {
|
||||
// Construct the following chain:
|
||||
// /------B1 ----B3 ----- B5 (current head)
|
||||
// B0 --B2 -------------B4
|
||||
blocks := make([]*pb.BeaconBlock, 6)
|
||||
roots := make([][32]byte, 6)
|
||||
var err error
|
||||
blocks[0] = &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot,
|
||||
ParentRootHash32: []byte{'A'},
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
roots[0], err = hashutil.HashBeaconBlock(blocks[0])
|
||||
if err != nil {
|
||||
t.Fatalf("Could not hash block: %v", err)
|
||||
}
|
||||
|
||||
blocks[1] = &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 2,
|
||||
ParentRootHash32: roots[0][:],
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
roots[1], err = hashutil.HashBeaconBlock(blocks[1])
|
||||
if err != nil {
|
||||
t.Fatalf("Could not hash block: %v", err)
|
||||
}
|
||||
|
||||
blocks[2] = &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 1,
|
||||
ParentRootHash32: roots[0][:],
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
roots[2], err = hashutil.HashBeaconBlock(blocks[2])
|
||||
if err != nil {
|
||||
t.Fatalf("Could not hash block: %v", err)
|
||||
}
|
||||
|
||||
blocks[3] = &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 3,
|
||||
ParentRootHash32: roots[1][:],
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
roots[3], err = hashutil.HashBeaconBlock(blocks[3])
|
||||
if err != nil {
|
||||
t.Fatalf("Could not hash block: %v", err)
|
||||
}
|
||||
|
||||
blocks[4] = &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 4,
|
||||
ParentRootHash32: roots[2][:],
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
roots[4], err = hashutil.HashBeaconBlock(blocks[4])
|
||||
if err != nil {
|
||||
t.Fatalf("Could not hash block: %v", err)
|
||||
}
|
||||
|
||||
blocks[5] = &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 5,
|
||||
ParentRootHash32: roots[3][:],
|
||||
Body: &pb.BeaconBlockBody{},
|
||||
}
|
||||
roots[5], err = hashutil.HashBeaconBlock(blocks[5])
|
||||
if err != nil {
|
||||
t.Fatalf("Could not hash block: %v", err)
|
||||
}
|
||||
return blocks, roots
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
72
beacon-chain/blockchain/forkchoice/BUILD.bazel
Normal file
72
beacon-chain/blockchain/forkchoice/BUILD.bazel
Normal file
@@ -0,0 +1,72 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"process_attestation.go",
|
||||
"process_block.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/forkchoice",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/stateutil:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"benchmark_test.go",
|
||||
"lmd_ghost_yaml_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"service_test.go",
|
||||
"tree_test.go",
|
||||
],
|
||||
data = ["lmd_ghost_test.yaml"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/stateutil:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@in_gopkg_yaml_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
168
beacon-chain/blockchain/forkchoice/benchmark_test.go
Normal file
168
beacon-chain/blockchain/forkchoice/benchmark_test.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
)
|
||||
|
||||
func BenchmarkForkChoiceTree1(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(b)
|
||||
defer testDB.TeardownDB(b, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Benchmark fork choice with 1024 validators
|
||||
validators := make([]*ethpb.Validator, 1024)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Spread out the votes evenly for all 3 leaf nodes
|
||||
for i := 0; i < len(validators); i++ {
|
||||
switch {
|
||||
case i < 256:
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[1]}
|
||||
case i > 768:
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[7]}
|
||||
default:
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[8]}
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkForkChoiceTree2(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(b)
|
||||
defer testDB.TeardownDB(b, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree2(db)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Benchmark fork choice with 1024 validators
|
||||
validators := make([]*ethpb.Validator, 1024)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Spread out the votes evenly for all the leaf nodes. 8 to 15
|
||||
nodeIndex := 8
|
||||
for i := 0; i < len(validators); i++ {
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[nodeIndex]}
|
||||
if i%155 == 0 {
|
||||
nodeIndex++
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkForkChoiceTree3(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(b)
|
||||
defer testDB.TeardownDB(b, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree3(db)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Benchmark fork choice with 1024 validators
|
||||
validators := make([]*ethpb.Validator, 1024)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// All validators vote on the same head
|
||||
for i := 0; i < len(validators); i++ {
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[len(roots)-1]}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
9
beacon-chain/blockchain/forkchoice/doc.go
Normal file
9
beacon-chain/blockchain/forkchoice/doc.go
Normal file
@@ -0,0 +1,9 @@
|
||||
/*
|
||||
Package forkchoice implements the Latest Message Driven GHOST (Greediest Heaviest Observed
|
||||
Sub-Tree) algorithm as the Ethereum Serenity beacon chain fork choice rule. This algorithm is designed to
|
||||
properly detect the canonical chain based on validator votes even in the presence of high network
|
||||
latency, network partitions, and many conflicting blocks. To read more about fork choice, read the
|
||||
official accompanying document:
|
||||
https://github.com/ethereum/eth2.0-specs/blob/v0.9.0/specs/core/0_fork-choice.md
|
||||
*/
|
||||
package forkchoice
|
||||
59
beacon-chain/blockchain/forkchoice/lmd_ghost_test.yaml
Normal file
59
beacon-chain/blockchain/forkchoice/lmd_ghost_test.yaml
Normal file
@@ -0,0 +1,59 @@
|
||||
test_cases:
|
||||
# GHOST chooses b3 with the heaviest weight
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b1'
|
||||
- id: 'b3'
|
||||
parent: 'b1'
|
||||
weights:
|
||||
b0: 0
|
||||
b1: 0
|
||||
b2: 5
|
||||
b3: 10
|
||||
head: 'b3'
|
||||
# GHOST chooses b1 with the heaviest weight
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
- id: 'b3'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
b1: 5
|
||||
b2: 4
|
||||
b3: 3
|
||||
head: 'b1'
|
||||
# Equal weights children, GHOST chooses b2 because it is higher lexicographically than b3
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
- id: 'b3'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
b1: 5
|
||||
b2: 6
|
||||
b3: 6
|
||||
head: 'b3'
|
||||
# Equal weights children, GHOST chooses b2 because it is higher lexicographically than b1
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
b1: 0
|
||||
b2: 0
|
||||
head: 'b2'
|
||||
140
beacon-chain/blockchain/forkchoice/lmd_ghost_yaml_test.go
Normal file
140
beacon-chain/blockchain/forkchoice/lmd_ghost_yaml_test.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
TestCases []struct {
|
||||
Blocks []struct {
|
||||
ID string `yaml:"id"`
|
||||
Parent string `yaml:"parent"`
|
||||
} `yaml:"blocks"`
|
||||
Weights map[string]int `yaml:"weights"`
|
||||
Head string `yaml:"head"`
|
||||
} `yaml:"test_cases"`
|
||||
}
|
||||
|
||||
func TestGetHeadFromYaml(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
filename, _ := filepath.Abs("./lmd_ghost_test.yaml")
|
||||
yamlFile, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var c *Config
|
||||
err = yaml.Unmarshal(yamlFile, &c)
|
||||
|
||||
params.UseMainnetConfig()
|
||||
|
||||
for _, test := range c.TestCases {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
blksRoot := make(map[int][]byte)
|
||||
// Construct block tree from yaml.
|
||||
for _, blk := range test.Blocks {
|
||||
// genesis block condition
|
||||
if blk.ID == blk.Parent {
|
||||
b := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
if err := db.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: b}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.HashTreeRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blksRoot[0] = root[:]
|
||||
} else {
|
||||
slot, err := strconv.Atoi(blk.ID[1:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
parentSlot, err := strconv.Atoi(blk.Parent[1:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: uint64(slot), ParentRoot: blksRoot[parentSlot]}}
|
||||
if err := db.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blksRoot[slot] = root[:]
|
||||
if err := db.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
// Assign validator votes to the blocks as weights.
|
||||
count := 0
|
||||
for blk, votes := range test.Weights {
|
||||
slot, err := strconv.Atoi(blk[1:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
max := count + votes
|
||||
for i := count; i < max; i++ {
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: blksRoot[slot]}
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(blksRoot[0])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{Root: blksRoot[0]}, ðpb.Checkpoint{Root: blksRoot[0]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
head, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
headSlot, err := strconv.Atoi(test.Head[1:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantedHead := blksRoot[headSlot]
|
||||
|
||||
if !bytes.Equal(head, wantedHead) {
|
||||
t.Errorf("wanted root %#x, got root %#x", wantedHead, head)
|
||||
}
|
||||
|
||||
testDB.TeardownDB(t, db)
|
||||
}
|
||||
}
|
||||
40
beacon-chain/blockchain/forkchoice/log.go
Normal file
40
beacon-chain/blockchain/forkchoice/log.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "forkchoice")
|
||||
|
||||
// logs epoch related data during epoch boundary.
|
||||
func logEpochData(beaconState *pb.BeaconState) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"epoch": helpers.CurrentEpoch(beaconState),
|
||||
"finalizedEpoch": beaconState.FinalizedCheckpoint.Epoch,
|
||||
"justifiedEpoch": beaconState.CurrentJustifiedCheckpoint.Epoch,
|
||||
"previousJustifiedEpoch": beaconState.PreviousJustifiedCheckpoint.Epoch,
|
||||
}).Info("Starting next epoch")
|
||||
activeVals, err := helpers.ActiveValidatorIndices(beaconState, helpers.CurrentEpoch(beaconState))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get active validator indices")
|
||||
return
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"totalValidators": len(beaconState.Validators),
|
||||
"activeValidators": len(activeVals),
|
||||
"averageBalance": fmt.Sprintf("%.5f ETH", averageBalance(beaconState.Balances)),
|
||||
}).Info("Validator registry information")
|
||||
}
|
||||
|
||||
func averageBalance(balances []uint64) float64 {
|
||||
total := uint64(0)
|
||||
for i := 0; i < len(balances); i++ {
|
||||
total += balances[i]
|
||||
}
|
||||
return float64(total) / float64(len(balances)) / float64(params.BeaconConfig().GweiPerEth)
|
||||
}
|
||||
161
beacon-chain/blockchain/forkchoice/metrics.go
Normal file
161
beacon-chain/blockchain/forkchoice/metrics.go
Normal file
@@ -0,0 +1,161 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
var (
|
||||
beaconFinalizedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_finalized_epoch",
|
||||
Help: "Last finalized epoch of the processed state",
|
||||
})
|
||||
beaconFinalizedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_finalized_root",
|
||||
Help: "Last finalized root of the processed state",
|
||||
})
|
||||
cacheFinalizedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "cache_finalized_epoch",
|
||||
Help: "Last cached finalized epoch",
|
||||
})
|
||||
cacheFinalizedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "cache_finalized_root",
|
||||
Help: "Last cached finalized root",
|
||||
})
|
||||
beaconCurrentJustifiedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_current_justified_epoch",
|
||||
Help: "Current justified epoch of the processed state",
|
||||
})
|
||||
beaconCurrentJustifiedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_current_justified_root",
|
||||
Help: "Current justified root of the processed state",
|
||||
})
|
||||
beaconPrevJustifiedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_previous_justified_epoch",
|
||||
Help: "Previous justified epoch of the processed state",
|
||||
})
|
||||
beaconPrevJustifiedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_previous_justified_root",
|
||||
Help: "Previous justified root of the processed state",
|
||||
})
|
||||
sigFailsToVerify = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "att_signature_failed_to_verify_with_cache",
|
||||
Help: "Number of attestation signatures that failed to verify with cache on, but succeeded without cache",
|
||||
})
|
||||
validatorsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "validator_count",
|
||||
Help: "The total number of validators, in GWei",
|
||||
}, []string{"state"})
|
||||
validatorsBalance = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "validators_total_balance",
|
||||
Help: "The total balance of validators, in GWei",
|
||||
}, []string{"state"})
|
||||
validatorsEffectiveBalance = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "validators_total_effective_balance",
|
||||
Help: "The total effective balance of validators, in GWei",
|
||||
}, []string{"state"})
|
||||
currentEth1DataDepositCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "current_eth1_data_deposit_count",
|
||||
Help: "The current eth1 deposit count in the last processed state eth1data field.",
|
||||
})
|
||||
totalEligibleBalances = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_eligible_balances",
|
||||
Help: "The total amount of ether, in gwei, that has been used in voting attestation target of previous epoch",
|
||||
})
|
||||
totalVotedTargetBalances = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_voted_target_balances",
|
||||
Help: "The total amount of ether, in gwei, that is eligible for voting of previous epoch",
|
||||
})
|
||||
)
|
||||
|
||||
func reportEpochMetrics(state *pb.BeaconState) {
|
||||
currentEpoch := state.Slot / params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Validator instances
|
||||
pendingInstances := 0
|
||||
activeInstances := 0
|
||||
slashingInstances := 0
|
||||
slashedInstances := 0
|
||||
exitingInstances := 0
|
||||
exitedInstances := 0
|
||||
// Validator balances
|
||||
pendingBalance := uint64(0)
|
||||
activeBalance := uint64(0)
|
||||
activeEffectiveBalance := uint64(0)
|
||||
exitingBalance := uint64(0)
|
||||
exitingEffectiveBalance := uint64(0)
|
||||
slashingBalance := uint64(0)
|
||||
slashingEffectiveBalance := uint64(0)
|
||||
|
||||
for i, validator := range state.Validators {
|
||||
if validator.Slashed {
|
||||
if currentEpoch < validator.ExitEpoch {
|
||||
slashingInstances++
|
||||
slashingBalance += state.Balances[i]
|
||||
slashingEffectiveBalance += validator.EffectiveBalance
|
||||
} else {
|
||||
slashedInstances++
|
||||
}
|
||||
continue
|
||||
}
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
if currentEpoch < validator.ExitEpoch {
|
||||
exitingInstances++
|
||||
exitingBalance += state.Balances[i]
|
||||
exitingEffectiveBalance += validator.EffectiveBalance
|
||||
} else {
|
||||
exitedInstances++
|
||||
}
|
||||
continue
|
||||
}
|
||||
if currentEpoch < validator.ActivationEpoch {
|
||||
pendingInstances++
|
||||
pendingBalance += state.Balances[i]
|
||||
continue
|
||||
}
|
||||
activeInstances++
|
||||
activeBalance += state.Balances[i]
|
||||
activeEffectiveBalance += validator.EffectiveBalance
|
||||
}
|
||||
validatorsCount.WithLabelValues("Pending").Set(float64(pendingInstances))
|
||||
validatorsCount.WithLabelValues("Active").Set(float64(activeInstances))
|
||||
validatorsCount.WithLabelValues("Exiting").Set(float64(exitingInstances))
|
||||
validatorsCount.WithLabelValues("Exited").Set(float64(exitedInstances))
|
||||
validatorsCount.WithLabelValues("Slashing").Set(float64(slashingInstances))
|
||||
validatorsCount.WithLabelValues("Slashed").Set(float64(slashedInstances))
|
||||
validatorsBalance.WithLabelValues("Pending").Set(float64(pendingBalance))
|
||||
validatorsBalance.WithLabelValues("Active").Set(float64(activeBalance))
|
||||
validatorsBalance.WithLabelValues("Exiting").Set(float64(exitingBalance))
|
||||
validatorsBalance.WithLabelValues("Slashing").Set(float64(slashingBalance))
|
||||
validatorsEffectiveBalance.WithLabelValues("Active").Set(float64(activeEffectiveBalance))
|
||||
validatorsEffectiveBalance.WithLabelValues("Exiting").Set(float64(exitingEffectiveBalance))
|
||||
validatorsEffectiveBalance.WithLabelValues("Slashing").Set(float64(slashingEffectiveBalance))
|
||||
|
||||
// Last justified slot
|
||||
if state.CurrentJustifiedCheckpoint != nil {
|
||||
beaconCurrentJustifiedEpoch.Set(float64(state.CurrentJustifiedCheckpoint.Epoch))
|
||||
beaconCurrentJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.CurrentJustifiedCheckpoint.Root)))
|
||||
}
|
||||
// Last previous justified slot
|
||||
if state.PreviousJustifiedCheckpoint != nil {
|
||||
beaconPrevJustifiedEpoch.Set(float64(state.PreviousJustifiedCheckpoint.Epoch))
|
||||
beaconPrevJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.PreviousJustifiedCheckpoint.Root)))
|
||||
}
|
||||
// Last finalized slot
|
||||
if state.FinalizedCheckpoint != nil {
|
||||
beaconFinalizedEpoch.Set(float64(state.FinalizedCheckpoint.Epoch))
|
||||
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(state.FinalizedCheckpoint.Root)))
|
||||
}
|
||||
if state.Eth1Data != nil {
|
||||
currentEth1DataDepositCount.Set(float64(state.Eth1Data.DepositCount))
|
||||
}
|
||||
|
||||
if precompute.Balances != nil {
|
||||
totalEligibleBalances.Set(float64(precompute.Balances.PrevEpoch))
|
||||
totalVotedTargetBalances.Set(float64(precompute.Balances.PrevEpochTargetAttesters))
|
||||
}
|
||||
}
|
||||
309
beacon-chain/blockchain/forkchoice/process_attestation.go
Normal file
309
beacon-chain/blockchain/forkchoice/process_attestation.go
Normal file
@@ -0,0 +1,309 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ErrTargetRootNotInDB returns when the target block root of an attestation cannot be found in the
|
||||
// beacon database.
|
||||
var ErrTargetRootNotInDB = errors.New("target root does not exist in db")
|
||||
|
||||
// OnAttestation is called whenever an attestation is received, it updates validators latest vote,
|
||||
// as well as the fork choice store struct.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def on_attestation(store: Store, attestation: Attestation) -> None:
|
||||
// """
|
||||
// Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
|
||||
//
|
||||
// An ``attestation`` that is asserted as invalid may be valid at a later time,
|
||||
// consider scheduling it for later processing in such case.
|
||||
// """
|
||||
// target = attestation.data.target
|
||||
//
|
||||
// # Attestations must be from the current or previous epoch
|
||||
// current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
// # Use GENESIS_EPOCH for previous when genesis to avoid underflow
|
||||
// previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH
|
||||
// assert target.epoch in [current_epoch, previous_epoch]
|
||||
// assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
|
||||
//
|
||||
// # Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
|
||||
// assert target.root in store.blocks
|
||||
// # Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
|
||||
// base_state = store.block_states[target.root].copy()
|
||||
// assert store.time >= base_state.genesis_time + compute_start_slot_at_epoch(target.epoch) * SECONDS_PER_SLOT
|
||||
//
|
||||
// # Attestations must be for a known block. If block is unknown, delay consideration until the block is found
|
||||
// assert attestation.data.beacon_block_root in store.blocks
|
||||
// # Attestations must not be for blocks in the future. If not, the attestation should not be considered
|
||||
// assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
|
||||
//
|
||||
// # Store target checkpoint state if not yet seen
|
||||
// if target not in store.checkpoint_states:
|
||||
// process_slots(base_state, compute_start_slot_at_epoch(target.epoch))
|
||||
// store.checkpoint_states[target] = base_state
|
||||
// target_state = store.checkpoint_states[target]
|
||||
//
|
||||
// # Attestations can only affect the fork choice of subsequent slots.
|
||||
// # Delay consideration in the fork choice until their slot is in the past.
|
||||
// assert store.time >= (attestation.data.slot + 1) * SECONDS_PER_SLOT
|
||||
//
|
||||
// # Get state at the `target` to validate attestation and calculate the committees
|
||||
// indexed_attestation = get_indexed_attestation(target_state, attestation)
|
||||
// assert is_valid_indexed_attestation(target_state, indexed_attestation)
|
||||
//
|
||||
// # Update latest messages
|
||||
// for i in indexed_attestation.attesting_indices:
|
||||
// if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
|
||||
// store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=attestation.data.beacon_block_root)
|
||||
func (s *Store) OnAttestation(ctx context.Context, a *ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.onAttestation")
|
||||
defer span.End()
|
||||
|
||||
tgt := proto.Clone(a.Data.Target).(*ethpb.Checkpoint)
|
||||
tgtSlot := helpers.StartSlot(tgt.Epoch)
|
||||
|
||||
if helpers.SlotToEpoch(a.Data.Slot) != a.Data.Target.Epoch {
|
||||
return fmt.Errorf("data slot is not in the same epoch as target %d != %d", helpers.SlotToEpoch(a.Data.Slot), a.Data.Target.Epoch)
|
||||
}
|
||||
|
||||
// Verify beacon node has seen the target block before.
|
||||
if !s.db.HasBlock(ctx, bytesutil.ToBytes32(tgt.Root)) {
|
||||
return ErrTargetRootNotInDB
|
||||
}
|
||||
|
||||
// Verify attestation target has had a valid pre state produced by the target block.
|
||||
baseState, err := s.verifyAttPreState(ctx, tgt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify attestation target is from current epoch or previous epoch.
|
||||
if err := s.verifyAttTargetEpoch(ctx, baseState.GenesisTime, uint64(time.Now().Unix()), tgt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify Attestations cannot be from future epochs.
|
||||
if err := helpers.VerifySlotTime(baseState.GenesisTime, tgtSlot); err != nil {
|
||||
return errors.Wrap(err, "could not verify attestation target slot")
|
||||
}
|
||||
|
||||
// Verify attestation beacon block is known and not from the future.
|
||||
if err := s.verifyBeaconBlock(ctx, a.Data); err != nil {
|
||||
return errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
|
||||
// Store target checkpoint state if not yet seen.
|
||||
baseState, err = s.saveCheckpointState(ctx, baseState, tgt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := helpers.VerifySlotTime(baseState.GenesisTime, a.Data.Slot+1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Use the target state to to validate attestation and calculate the committees.
|
||||
indexedAtt, err := s.verifyAttestation(ctx, baseState, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update every validator's latest vote.
|
||||
if err := s.updateAttVotes(ctx, indexedAtt, tgt.Root, tgt.Epoch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.SaveAttestation(ctx, a); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"Slot": a.Data.Slot,
|
||||
"Index": a.Data.CommitteeIndex,
|
||||
"AggregatedBitfield": fmt.Sprintf("%08b", a.AggregationBits),
|
||||
"BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.BeaconBlockRoot)),
|
||||
})
|
||||
log.Debug("Updated latest votes")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyAttPreState validates input attested check point has a valid pre-state.
|
||||
func (s *Store) verifyAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*pb.BeaconState, error) {
|
||||
baseState, err := s.db.State(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
if baseState == nil {
|
||||
return nil, fmt.Errorf("pre state of target block %d does not exist", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
return baseState, nil
|
||||
}
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
func (s *Store) verifyAttTargetEpoch(ctx context.Context, genesisTime uint64, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := (nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
currentEpoch := helpers.SlotToEpoch(currentSlot)
|
||||
var prevEpoch uint64
|
||||
// Prevents previous epoch under flow
|
||||
if currentEpoch > 1 {
|
||||
prevEpoch = currentEpoch - 1
|
||||
}
|
||||
if c.Epoch != prevEpoch && c.Epoch != currentEpoch {
|
||||
return fmt.Errorf("target epoch %d does not match current epoch %d or prev epoch %d", c.Epoch, currentEpoch, prevEpoch)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyBeaconBlock verifies beacon head block is known and not from the future.
|
||||
func (s *Store) verifyBeaconBlock(ctx context.Context, data *ethpb.AttestationData) error {
|
||||
b, err := s.db.Block(ctx, bytesutil.ToBytes32(data.BeaconBlockRoot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b == nil || b.Block == nil {
|
||||
return fmt.Errorf("beacon block %#x does not exist", bytesutil.Trunc(data.BeaconBlockRoot))
|
||||
}
|
||||
if b.Block.Slot > data.Slot {
|
||||
return fmt.Errorf("could not process attestation for future block, %d > %d", b.Block.Slot, data.Slot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveCheckpointState saves and returns the processed state with the associated check point.
|
||||
func (s *Store) saveCheckpointState(ctx context.Context, baseState *pb.BeaconState, c *ethpb.Checkpoint) (*pb.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.saveCheckpointState")
|
||||
defer span.End()
|
||||
|
||||
s.checkpointStateLock.Lock()
|
||||
defer s.checkpointStateLock.Unlock()
|
||||
cachedState, err := s.checkpointState.StateByCheckpoint(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get cached checkpoint state")
|
||||
}
|
||||
if cachedState != nil {
|
||||
return cachedState, nil
|
||||
}
|
||||
|
||||
// Advance slots only when it's higher than current state slot.
|
||||
if helpers.StartSlot(c.Epoch) > baseState.Slot {
|
||||
stateCopy := proto.Clone(baseState).(*pb.BeaconState)
|
||||
stateCopy, err = state.ProcessSlots(ctx, stateCopy, helpers.StartSlot(c.Epoch))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to %d", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
|
||||
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: c,
|
||||
State: stateCopy,
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
|
||||
return stateCopy, nil
|
||||
}
|
||||
|
||||
return baseState, nil
|
||||
}
|
||||
|
||||
// verifyAttestation validates input attestation is valid.
|
||||
func (s *Store) verifyAttestation(ctx context.Context, baseState *pb.BeaconState, a *ethpb.Attestation) (*ethpb.IndexedAttestation, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(baseState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexedAtt, err := blocks.ConvertToIndexed(ctx, a, committee)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert attestation to indexed attestation")
|
||||
}
|
||||
|
||||
if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil {
|
||||
|
||||
// TODO(3603): Delete the following signature verify fallback when issue 3603 closes.
|
||||
// When signature fails to verify with committee cache enabled at run time,
|
||||
// the following re-runs the same signature verify routine without cache in play.
|
||||
// This provides extra assurance that committee cache can't break run time.
|
||||
if err == blocks.ErrSigFailedToVerify {
|
||||
committee, err = helpers.BeaconCommitteeWithoutCache(baseState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert attestation to indexed attestation without cache")
|
||||
}
|
||||
indexedAtt, err = blocks.ConvertToIndexed(ctx, a, committee)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert attestation to indexed attestation")
|
||||
}
|
||||
if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify indexed attestation without cache")
|
||||
}
|
||||
sigFailsToVerify.Inc()
|
||||
return indexedAtt, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "could not verify indexed attestation")
|
||||
}
|
||||
return indexedAtt, nil
|
||||
}
|
||||
|
||||
// updateAttVotes updates validator's latest votes based on the incoming attestation.
|
||||
func (s *Store) updateAttVotes(
|
||||
ctx context.Context,
|
||||
indexedAtt *ethpb.IndexedAttestation,
|
||||
tgtRoot []byte,
|
||||
tgtEpoch uint64) error {
|
||||
|
||||
indices := indexedAtt.AttestingIndices
|
||||
s.voteLock.Lock()
|
||||
defer s.voteLock.Unlock()
|
||||
for _, i := range indices {
|
||||
vote, ok := s.latestVoteMap[i]
|
||||
if !ok || tgtEpoch > vote.Epoch {
|
||||
s.latestVoteMap[i] = &pb.ValidatorLatestVote{
|
||||
Epoch: tgtEpoch,
|
||||
Root: tgtRoot,
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// aggregatedAttestation returns the aggregated attestation after checking saved one in db.
|
||||
func (s *Store) aggregatedAttestations(ctx context.Context, att *ethpb.Attestation) ([]*ethpb.Attestation, error) {
|
||||
r, err := ssz.HashTreeRoot(att.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
saved, err := s.db.AttestationsByDataRoot(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if saved == nil {
|
||||
return []*ethpb.Attestation{att}, nil
|
||||
}
|
||||
|
||||
aggregated, err := helpers.AggregateAttestations(append(saved, att))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return aggregated, nil
|
||||
}
|
||||
369
beacon-chain/blockchain/forkchoice/process_attestation_test.go
Normal file
369
beacon-chain/blockchain/forkchoice/process_attestation_test.go
Normal file
@@ -0,0 +1,369 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
func TestStore_OnAttestation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
_, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
BlkWithOutState := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 0}}
|
||||
if err := db.SaveBlock(ctx, BlkWithOutState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithOutStateRoot, _ := ssz.HashTreeRoot(BlkWithOutState.Block)
|
||||
|
||||
BlkWithStateBadAtt := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}}
|
||||
if err := db.SaveBlock(ctx, BlkWithStateBadAtt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithStateBadAttRoot, _ := ssz.HashTreeRoot(BlkWithStateBadAtt.Block)
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, BlkWithStateBadAttRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
BlkWithValidState := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2}}
|
||||
if err := db.SaveBlock(ctx, BlkWithValidState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithValidStateRoot, _ := ssz.HashTreeRoot(BlkWithValidState.Block)
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{
|
||||
Fork: &pb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
}, BlkWithValidStateRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
s *pb.BeaconState
|
||||
wantErr bool
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "attestation's data slot not aligned with target vote",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "data slot is not in the same epoch as target 1 != 0",
|
||||
},
|
||||
{
|
||||
name: "attestation's target root not in db",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: []byte{'A'}}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "target root does not exist in db",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "pre state of target block 0 does not exist",
|
||||
},
|
||||
{
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "does not match current epoch",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := store.GenesisStore(
|
||||
ctx,
|
||||
ðpb.Checkpoint{Root: BlkWithValidStateRoot[:]},
|
||||
ðpb.Checkpoint{Root: BlkWithValidStateRoot[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err := store.OnAttestation(ctx, tt.a)
|
||||
if tt.wantErr {
|
||||
if !strings.Contains(err.Error(), tt.wantErrString) {
|
||||
t.Errorf("Store.OnAttestation() error = %v, wantErr = %v", err, tt.wantErrString)
|
||||
}
|
||||
} else {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseDemoBeaconConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
s := &pb.BeaconState{
|
||||
Fork: &pb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
StateRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{},
|
||||
JustificationBits: []byte{0},
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
}
|
||||
r := [32]byte{'g'}
|
||||
if err := store.db.SaveState(ctx, s, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{Root: r[:]}, ðpb.Checkpoint{Root: r[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
|
||||
s1, err := store.saveCheckpointState(ctx, s, cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s1.Slot != 1*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot)
|
||||
}
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: []byte{'B'}}
|
||||
s2, err := store.saveCheckpointState(ctx, s, cp2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s2.Slot != 2*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot)
|
||||
}
|
||||
|
||||
s1, err = store.saveCheckpointState(ctx, nil, cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s1.Slot != 1*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot)
|
||||
}
|
||||
|
||||
s1, err = store.checkpointState.StateByCheckpoint(cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s1.Slot != 1*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot)
|
||||
}
|
||||
|
||||
s2, err = store.checkpointState.StateByCheckpoint(cp2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s2.Slot != 2*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot)
|
||||
}
|
||||
|
||||
s.Slot = params.BeaconConfig().SlotsPerEpoch + 1
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{Root: r[:]}, ðpb.Checkpoint{Root: r[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'C'}}
|
||||
s3, err := store.saveCheckpointState(ctx, s, cp3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s3.Slot != s.Slot {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", s.Slot, s3.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_ReturnAggregatedAttestation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
a1 := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0x02}}
|
||||
err := store.db.SaveAttestation(ctx, a1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a2 := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0x03}}
|
||||
saved, err := store.aggregatedAttestations(ctx, a2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
epoch := uint64(1)
|
||||
baseState, _ := testutil.DeterministicGenesisState(t, 1)
|
||||
baseState.Slot = epoch * params.BeaconConfig().SlotsPerEpoch
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch}
|
||||
returned, err := store.saveCheckpointState(ctx, baseState, checkpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(baseState, returned) {
|
||||
t.Error("Incorrectly returned base state")
|
||||
}
|
||||
|
||||
cached, err := store.checkpointState.StateByCheckpoint(checkpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if cached != nil {
|
||||
t.Error("State shouldn't have been cached")
|
||||
}
|
||||
|
||||
epoch = uint64(2)
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch}
|
||||
returned, err = store.saveCheckpointState(ctx, baseState, newCheckpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
baseState, err = state.ProcessSlots(ctx, baseState, helpers.StartSlot(newCheckpoint.Epoch))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(baseState, returned) {
|
||||
t.Error("Incorrectly returned base state")
|
||||
}
|
||||
|
||||
cached, err = store.checkpointState.StateByCheckpoint(newCheckpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(returned, cached) {
|
||||
t.Error("Incorrectly cached base state")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
if err := store.verifyAttTargetEpoch(
|
||||
ctx,
|
||||
0,
|
||||
params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
|
||||
ðpb.Checkpoint{}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
if err := store.verifyAttTargetEpoch(
|
||||
ctx,
|
||||
0,
|
||||
params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
|
||||
ðpb.Checkpoint{Epoch: 1}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
if err := store.verifyAttTargetEpoch(
|
||||
ctx,
|
||||
0,
|
||||
2*params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
|
||||
ðpb.Checkpoint{}); !strings.Contains(err.Error(),
|
||||
"target epoch 0 does not match current epoch 2 or prev epoch 1") {
|
||||
t.Error("Did not receive wanted error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
s := NewForkChoiceService(ctx, db)
|
||||
d := ðpb.AttestationData{}
|
||||
if err := s.verifyBeaconBlock(ctx, d); !strings.Contains(err.Error(), "beacon block does not exist") {
|
||||
t.Error("Did not receive the wanted error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
s := NewForkChoiceService(ctx, db)
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2}}
|
||||
s.db.SaveBlock(ctx, b)
|
||||
r, _ := ssz.HashTreeRoot(b.Block)
|
||||
d := ðpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
|
||||
|
||||
if err := s.verifyBeaconBlock(ctx, d); !strings.Contains(err.Error(), "could not process attestation for future block") {
|
||||
t.Error("Did not receive the wanted error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
s := NewForkChoiceService(ctx, db)
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2}}
|
||||
s.db.SaveBlock(ctx, b)
|
||||
r, _ := ssz.HashTreeRoot(b.Block)
|
||||
d := ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
|
||||
|
||||
if err := s.verifyBeaconBlock(ctx, d); err != nil {
|
||||
t.Error("Did not receive the wanted error")
|
||||
}
|
||||
}
|
||||
572
beacon-chain/blockchain/forkchoice/process_block.go
Normal file
572
beacon-chain/blockchain/forkchoice/process_block.go
Normal file
@@ -0,0 +1,572 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// OnBlock is called when a gossip block is received. It runs regular state transition on the block and
|
||||
// update fork choice store.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def on_block(store: Store, block: BeaconBlock) -> None:
|
||||
// # Make a copy of the state to avoid mutability issues
|
||||
// assert block.parent_root in store.block_states
|
||||
// pre_state = store.block_states[block.parent_root].copy()
|
||||
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
|
||||
// assert store.time >= pre_state.genesis_time + block.slot * SECONDS_PER_SLOT
|
||||
// # Add new block to the store
|
||||
// store.blocks[signing_root(block)] = block
|
||||
// # Check block is a descendant of the finalized block
|
||||
// assert (
|
||||
// get_ancestor(store, signing_root(block), store.blocks[store.finalized_checkpoint.root].slot) ==
|
||||
// store.finalized_checkpoint.root
|
||||
// )
|
||||
// # Check that block is later than the finalized epoch slot
|
||||
// assert block.slot > compute_start_slot_of_epoch(store.finalized_checkpoint.epoch)
|
||||
// # Check the block is valid and compute the post-state
|
||||
// state = state_transition(pre_state, block)
|
||||
// # Add new state for this block to the store
|
||||
// store.block_states[signing_root(block)] = state
|
||||
//
|
||||
// # Update justified checkpoint
|
||||
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
||||
// store.best_justified_checkpoint = state.current_justified_checkpoint
|
||||
//
|
||||
// # Update finalized checkpoint
|
||||
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
// store.finalized_checkpoint = state.finalized_checkpoint
|
||||
func (s *Store) OnBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.onBlock")
|
||||
defer span.End()
|
||||
|
||||
if signed == nil || signed.Block == nil {
|
||||
return errors.New("nil block")
|
||||
}
|
||||
|
||||
b := signed.Block
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preStateValidatorCount := len(preState.Validators)
|
||||
|
||||
root, err := ssz.HashTreeRoot(b)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Slot,
|
||||
"root": fmt.Sprintf("0x%s...", hex.EncodeToString(root[:])[:8]),
|
||||
}).Info("Executing state transition on block")
|
||||
postState, err := state.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
|
||||
if err := s.db.SaveBlock(ctx, signed); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
|
||||
}
|
||||
if err := s.db.SaveState(ctx, postState, root); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
if postState.CurrentJustifiedCheckpoint.Epoch > s.justifiedCheckpt.Epoch {
|
||||
if err := s.updateJustified(ctx, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
// Prune the block cache and helper caches on every new finalized epoch.
|
||||
if postState.FinalizedCheckpoint.Epoch > s.finalizedCheckpt.Epoch {
|
||||
if err := s.db.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
|
||||
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
|
||||
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if endSlot > startSlot {
|
||||
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
|
||||
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
|
||||
startSlot, endSlot)
|
||||
}
|
||||
}
|
||||
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint
|
||||
}
|
||||
|
||||
// Update validator indices in database as needed.
|
||||
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
// Save the unseen attestations from block to db.
|
||||
if err := s.saveNewBlockAttestations(ctx, b.Body.Attestations); err != nil {
|
||||
return errors.Wrap(err, "could not save attestations")
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
if postState.Slot >= s.nextEpochBoundarySlot {
|
||||
logEpochData(postState)
|
||||
reportEpochMetrics(postState)
|
||||
|
||||
// Update committees cache at epoch boundary slot.
|
||||
if featureconfig.Get().EnableNewCache {
|
||||
if err := helpers.UpdateCommitteeCache(postState, helpers.CurrentEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
s.nextEpochBoundarySlot = helpers.StartSlot(helpers.NextEpoch(postState))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnBlockInitialSyncStateTransition is called when an initial sync block is received.
|
||||
// It runs state transition on the block and without any BLS verification. The BLS verification
|
||||
// includes proposer signature, randao and attestation's aggregated signature. It also does not save
|
||||
// attestations.
|
||||
func (s *Store) OnBlockInitialSyncStateTransition(ctx context.Context, signed *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.onBlock")
|
||||
defer span.End()
|
||||
|
||||
if signed == nil || signed.Block == nil {
|
||||
return errors.New("nil block")
|
||||
}
|
||||
|
||||
b := signed.Block
|
||||
|
||||
s.initSyncStateLock.Lock()
|
||||
defer s.initSyncStateLock.Unlock()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
preState, err := s.cachedPreState(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preStateValidatorCount := len(preState.Validators)
|
||||
|
||||
log.WithField("slot", b.Slot).Debug("Executing state transition on block")
|
||||
|
||||
postState, err := state.ExecuteStateTransitionNoVerify(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
|
||||
if err := s.db.SaveBlock(ctx, signed); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
|
||||
}
|
||||
root, err := ssz.HashTreeRoot(b)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
|
||||
}
|
||||
|
||||
if featureconfig.Get().InitSyncCacheState {
|
||||
s.initSyncState[root] = postState
|
||||
} else {
|
||||
if err := s.db.SaveState(ctx, postState, root); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
if postState.CurrentJustifiedCheckpoint.Epoch > s.justifiedCheckpt.Epoch {
|
||||
if err := s.updateJustified(ctx, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
// Prune the block cache and helper caches on every new finalized epoch.
|
||||
if postState.FinalizedCheckpoint.Epoch > s.finalizedCheckpt.Epoch {
|
||||
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
|
||||
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if endSlot > startSlot {
|
||||
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
|
||||
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
|
||||
startSlot, endSlot)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.saveInitState(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save init sync finalized state")
|
||||
}
|
||||
|
||||
if err := s.db.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint
|
||||
}
|
||||
|
||||
// Update validator indices in database as needed.
|
||||
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
|
||||
if flags.Get().EnableArchive {
|
||||
// Save the unseen attestations from block to db.
|
||||
if err := s.saveNewBlockAttestations(ctx, b.Body.Attestations); err != nil {
|
||||
return errors.Wrap(err, "could not save attestations")
|
||||
}
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
if postState.Slot >= s.nextEpochBoundarySlot {
|
||||
reportEpochMetrics(postState)
|
||||
|
||||
s.nextEpochBoundarySlot = helpers.StartSlot(helpers.NextEpoch(postState))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
func (s *Store) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*pb.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.getBlockPreState")
|
||||
defer span.End()
|
||||
|
||||
// Verify incoming block has a valid pre state.
|
||||
preState, err := s.verifyBlkPreState(ctx, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify block slot time is not from the feature.
|
||||
if err := helpers.VerifySlotTime(preState.GenesisTime, b.Slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify block is a descendent of a finalized block.
|
||||
if err := s.verifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot), b.Slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify block is later than the finalized epoch slot.
|
||||
if err := s.verifyBlkFinalizedSlot(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
// verifyBlkPreState validates input block has a valid pre-state.
|
||||
func (s *Store) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) (*pb.BeaconState, error) {
|
||||
preState, err := s.db.State(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
|
||||
}
|
||||
if preState == nil {
|
||||
return nil, fmt.Errorf("pre state of slot %d does not exist", b.Slot)
|
||||
}
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
// verifyBlkDescendant validates input block root is a descendant of the
|
||||
// current finalized block root.
|
||||
func (s *Store) verifyBlkDescendant(ctx context.Context, root [32]byte, slot uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.verifyBlkDescendant")
|
||||
defer span.End()
|
||||
|
||||
finalizedBlkSigned, err := s.db.Block(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
||||
if err != nil || finalizedBlkSigned == nil || finalizedBlkSigned.Block == nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
finalizedBlk := finalizedBlkSigned.Block
|
||||
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block root")
|
||||
}
|
||||
if !bytes.Equal(bFinalizedRoot, s.finalizedCheckpt.Root) {
|
||||
err := fmt.Errorf("block from slot %d is not a descendent of the current finalized block slot %d, %#x != %#x",
|
||||
slot, finalizedBlk.Slot, bytesutil.Trunc(bFinalizedRoot), bytesutil.Trunc(s.finalizedCheckpt.Root))
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
||||
// to current finalized slot.
|
||||
func (s *Store) verifyBlkFinalizedSlot(b *ethpb.BeaconBlock) error {
|
||||
finalizedSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if finalizedSlot >= b.Slot {
|
||||
return fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot, finalizedSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveNewValidators saves newly added validator index from state to db. Does nothing if validator count has not
|
||||
// changed.
|
||||
func (s *Store) saveNewValidators(ctx context.Context, preStateValidatorCount int, postState *pb.BeaconState) error {
|
||||
postStateValidatorCount := len(postState.Validators)
|
||||
if preStateValidatorCount != postStateValidatorCount {
|
||||
for i := preStateValidatorCount; i < postStateValidatorCount; i++ {
|
||||
pubKey := postState.Validators[i].PublicKey
|
||||
if err := s.db.SaveValidatorIndex(ctx, bytesutil.ToBytes48(pubKey), uint64(i)); err != nil {
|
||||
return errors.Wrapf(err, "could not save activated validator: %d", i)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"index": i,
|
||||
"pubKey": hex.EncodeToString(bytesutil.Trunc(pubKey)),
|
||||
"totalValidatorCount": i + 1,
|
||||
}).Info("New validator index saved in DB")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveNewBlockAttestations saves the new attestations in block to DB.
|
||||
func (s *Store) saveNewBlockAttestations(ctx context.Context, atts []*ethpb.Attestation) error {
|
||||
attestations := make([]*ethpb.Attestation, 0, len(atts))
|
||||
for _, att := range atts {
|
||||
aggregated, err := s.aggregatedAttestations(ctx, att)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
attestations = append(attestations, aggregated...)
|
||||
}
|
||||
if err := s.db.SaveAttestations(ctx, atts); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rmStatesOlderThanLastFinalized deletes the states in db since last finalized check point.
|
||||
func (s *Store) rmStatesOlderThanLastFinalized(ctx context.Context, startSlot uint64, endSlot uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.rmStatesBySlots")
|
||||
defer span.End()
|
||||
|
||||
// Make sure start slot is not a skipped slot
|
||||
for i := startSlot; i > 0; i-- {
|
||||
filter := filters.NewFilter().SetStartSlot(i).SetEndSlot(i)
|
||||
b, err := s.db.Blocks(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
startSlot = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure finalized slot is not a skipped slot.
|
||||
for i := endSlot; i > 0; i-- {
|
||||
filter := filters.NewFilter().SetStartSlot(i).SetEndSlot(i)
|
||||
b, err := s.db.Blocks(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
endSlot = i - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Do not remove genesis state
|
||||
if startSlot == 0 {
|
||||
startSlot++
|
||||
}
|
||||
// If end slot comes less than start slot
|
||||
if endSlot < startSlot {
|
||||
endSlot = startSlot
|
||||
}
|
||||
|
||||
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot)
|
||||
roots, err := s.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
roots, err = s.filterBlockRoots(ctx, roots)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.DeleteStates(ctx, roots); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldUpdateCurrentJustified prevents bouncing attack, by only update conflicting justified
|
||||
// checkpoints in the fork choice if in the early slots of the epoch.
|
||||
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
|
||||
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
|
||||
func (s *Store) shouldUpdateCurrentJustified(ctx context.Context, newJustifiedCheckpt *ethpb.Checkpoint) (bool, error) {
|
||||
if helpers.SlotsSinceEpochStarts(s.currentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
||||
return true, nil
|
||||
}
|
||||
newJustifiedBlockSigned, err := s.db.Block(ctx, bytesutil.ToBytes32(newJustifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newJustifiedBlockSigned == nil || newJustifiedBlockSigned.Block == nil {
|
||||
return false, errors.New("nil new justified block")
|
||||
}
|
||||
newJustifiedBlock := newJustifiedBlockSigned.Block
|
||||
if newJustifiedBlock.Slot <= helpers.StartSlot(s.justifiedCheckpt.Epoch) {
|
||||
return false, nil
|
||||
}
|
||||
justifiedBlockSigned, err := s.db.Block(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if justifiedBlockSigned == nil || justifiedBlockSigned.Block == nil {
|
||||
return false, errors.New("nil justified block")
|
||||
}
|
||||
justifiedBlock := justifiedBlockSigned.Block
|
||||
b, err := s.ancestor(ctx, newJustifiedCheckpt.Root, justifiedBlock.Slot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !bytes.Equal(b, s.justifiedCheckpt.Root) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *Store) updateJustified(ctx context.Context, state *pb.BeaconState) error {
|
||||
if state.CurrentJustifiedCheckpoint.Epoch > s.bestJustifiedCheckpt.Epoch {
|
||||
s.bestJustifiedCheckpt = state.CurrentJustifiedCheckpoint
|
||||
}
|
||||
canUpdate, err := s.shouldUpdateCurrentJustified(ctx, state.CurrentJustifiedCheckpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if canUpdate {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint
|
||||
}
|
||||
|
||||
if featureconfig.Get().InitSyncCacheState {
|
||||
justifiedRoot := bytesutil.ToBytes32(state.CurrentJustifiedCheckpoint.Root)
|
||||
justifiedState := s.initSyncState[justifiedRoot]
|
||||
if err := s.db.SaveState(ctx, justifiedState, justifiedRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save justified state")
|
||||
}
|
||||
}
|
||||
|
||||
return s.db.SaveJustifiedCheckpoint(ctx, state.CurrentJustifiedCheckpoint)
|
||||
}
|
||||
|
||||
// currentSlot returns the current slot based on time.
|
||||
func (s *Store) currentSlot() uint64 {
|
||||
return (uint64(time.Now().Unix()) - s.genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
}
|
||||
|
||||
// updates justified check point in store if a better check point is known
|
||||
func (s *Store) updateJustifiedCheckpoint() {
|
||||
// Update at epoch boundary slot only
|
||||
if !helpers.IsEpochStart(s.currentSlot()) {
|
||||
return
|
||||
}
|
||||
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = s.bestJustifiedCheckpt
|
||||
}
|
||||
}
|
||||
|
||||
// This receives cached state in memory for initial sync only during initial sync.
|
||||
func (s *Store) cachedPreState(ctx context.Context, b *ethpb.BeaconBlock) (*pb.BeaconState, error) {
|
||||
if featureconfig.Get().InitSyncCacheState {
|
||||
preState := s.initSyncState[bytesutil.ToBytes32(b.ParentRoot)]
|
||||
var err error
|
||||
if preState == nil {
|
||||
preState, err = s.db.State(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
|
||||
}
|
||||
if preState == nil {
|
||||
return nil, fmt.Errorf("pre state of slot %d does not exist", b.Slot)
|
||||
}
|
||||
}
|
||||
return proto.Clone(preState).(*pb.BeaconState), nil
|
||||
}
|
||||
|
||||
preState, err := s.db.State(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
|
||||
}
|
||||
if preState == nil {
|
||||
return nil, fmt.Errorf("pre state of slot %d does not exist", b.Slot)
|
||||
}
|
||||
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
// This saves every finalized state in DB during initial sync, needed as part of optimization to
|
||||
// use cache state during initial sync in case of restart.
|
||||
func (s *Store) saveInitState(ctx context.Context, state *pb.BeaconState) error {
|
||||
if !featureconfig.Get().InitSyncCacheState {
|
||||
return nil
|
||||
}
|
||||
finalizedRoot := bytesutil.ToBytes32(state.FinalizedCheckpoint.Root)
|
||||
fs := s.initSyncState[finalizedRoot]
|
||||
|
||||
if err := s.db.SaveState(ctx, fs, finalizedRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
for r, oldState := range s.initSyncState {
|
||||
if oldState.Slot < state.FinalizedCheckpoint.Epoch*params.BeaconConfig().SlotsPerEpoch {
|
||||
delete(s.initSyncState, r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This filters block roots that are not known as head root and finalized root in DB.
|
||||
// It serves as the last line of defence before we prune states.
|
||||
func (s *Store) filterBlockRoots(ctx context.Context, roots [][32]byte) ([][32]byte, error) {
|
||||
f, err := s.db.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fRoot := f.Root
|
||||
h, err := s.db.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hRoot, err := ssz.SigningRoot(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filtered := make([][32]byte, 0, len(roots))
|
||||
for _, root := range roots {
|
||||
if bytes.Equal(root[:], fRoot[:]) || bytes.Equal(root[:], hRoot[:]) {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, root)
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
610
beacon-chain/blockchain/forkchoice/process_block_test.go
Normal file
610
beacon-chain/blockchain/forkchoice/process_block_test.go
Normal file
@@ -0,0 +1,610 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/stateutil"
|
||||
)
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
genesisStateRoot, err := stateutil.HashTreeRootState(&pb.BeaconState{})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
if err := db.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
validGenesisRoot, err := ssz.HashTreeRoot(genesis.Block)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, validGenesisRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
roots, err := blockTree1(db, validGenesisRoot[:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
random := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: validGenesisRoot[:]}}
|
||||
if err := db.SaveBlock(ctx, random); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
randomParentRoot, err := ssz.HashTreeRoot(random.Block)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, randomParentRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
randomParentRoot2 := roots[1]
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, bytesutil.ToBytes32(randomParentRoot2)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk *ethpb.BeaconBlock
|
||||
s *pb.BeaconState
|
||||
time uint64
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "parent block root does not have a state",
|
||||
blk: ðpb.BeaconBlock{},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "pre state of slot 0 does not exist",
|
||||
},
|
||||
{
|
||||
name: "block is from the feature",
|
||||
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot[:], Slot: params.BeaconConfig().FarFutureEpoch},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "could not process slot from the future",
|
||||
},
|
||||
{
|
||||
name: "could not get finalized block",
|
||||
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot[:]},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "block from slot 0 is not a descendent of the current finalized block",
|
||||
},
|
||||
{
|
||||
name: "same slot as finalized block",
|
||||
blk: ðpb.BeaconBlock{Slot: 0, ParentRoot: randomParentRoot2},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{Root: validGenesisRoot[:]}, ðpb.Checkpoint{Root: validGenesisRoot[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.finalizedCheckpt.Root = roots[0]
|
||||
|
||||
err := store.OnBlock(ctx, ðpb.SignedBeaconBlock{Block: tt.blk})
|
||||
if !strings.Contains(err.Error(), tt.wantErrString) {
|
||||
t.Errorf("Store.OnBlock() error = %v, wantErr = %v", err, tt.wantErrString)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SaveNewValidators(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
preCount := 2 // validators 0 and validators 1
|
||||
s := &pb.BeaconState{Validators: []*ethpb.Validator{
|
||||
{PublicKey: []byte{0}}, {PublicKey: []byte{1}},
|
||||
{PublicKey: []byte{2}}, {PublicKey: []byte{3}},
|
||||
}}
|
||||
if err := store.saveNewValidators(ctx, preCount, s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !db.HasValidatorIndex(ctx, bytesutil.ToBytes48([]byte{2})) {
|
||||
t.Error("Wanted validator saved in db")
|
||||
}
|
||||
if !db.HasValidatorIndex(ctx, bytesutil.ToBytes48([]byte{3})) {
|
||||
t.Error("Wanted validator saved in db")
|
||||
}
|
||||
if db.HasValidatorIndex(ctx, bytesutil.ToBytes48([]byte{1})) {
|
||||
t.Error("validator not suppose to be saved in db")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SavesNewBlockAttestations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
a1 := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b101}}
|
||||
a2 := ðpb.Attestation{Data: ðpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b110}}
|
||||
r1, _ := ssz.HashTreeRoot(a1.Data)
|
||||
r2, _ := ssz.HashTreeRoot(a2.Data)
|
||||
|
||||
if err := store.saveNewBlockAttestations(ctx, []*ethpb.Attestation{a1, a2}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
saved, err := store.db.AttestationsByDataRoot(ctx, r1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a1}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
|
||||
saved, err = store.db.AttestationsByDataRoot(ctx, r2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
|
||||
a1 = ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b111}}
|
||||
a2 = ðpb.Attestation{Data: ðpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b111}}
|
||||
|
||||
if err := store.saveNewBlockAttestations(ctx, []*ethpb.Attestation{a1, a2}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
saved, err = store.db.AttestationsByDataRoot(ctx, r1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a1}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
|
||||
saved, err = store.db.AttestationsByDataRoot(ctx, r2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveStateSinceLastFinalized(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
// Save 100 blocks in DB, each has a state.
|
||||
numBlocks := 100
|
||||
totalBlocks := make([]*ethpb.SignedBeaconBlock, numBlocks)
|
||||
blockRoots := make([][32]byte, 0)
|
||||
for i := 0; i < len(totalBlocks); i++ {
|
||||
totalBlocks[i] = ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: uint64(i),
|
||||
},
|
||||
}
|
||||
r, err := ssz.HashTreeRoot(totalBlocks[i].Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{Slot: uint64(i)}, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveBlock(ctx, totalBlocks[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blockRoots = append(blockRoots, r)
|
||||
if err := store.db.SaveHeadBlockRoot(ctx, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// New finalized epoch: 1
|
||||
finalizedEpoch := uint64(1)
|
||||
finalizedSlot := finalizedEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
endSlot := helpers.StartSlot(finalizedEpoch+1) - 1 // Inclusive
|
||||
if err := store.rmStatesOlderThanLastFinalized(ctx, 0, endSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, r := range blockRoots {
|
||||
s, err := store.db.State(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Also verifies genesis state didnt get deleted
|
||||
if s != nil && s.Slot != finalizedSlot && s.Slot != 0 && s.Slot < endSlot {
|
||||
t.Errorf("State with slot %d should not be in DB", s.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
// New finalized epoch: 5
|
||||
newFinalizedEpoch := uint64(5)
|
||||
newFinalizedSlot := newFinalizedEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
endSlot = helpers.StartSlot(newFinalizedEpoch+1) - 1 // Inclusive
|
||||
if err := store.rmStatesOlderThanLastFinalized(ctx, helpers.StartSlot(finalizedEpoch+1)-1, endSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, r := range blockRoots {
|
||||
s, err := store.db.State(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Also verifies genesis state didnt get deleted
|
||||
if s != nil && s.Slot != newFinalizedSlot && s.Slot != finalizedSlot && s.Slot != 0 && s.Slot < endSlot {
|
||||
t.Errorf("State with slot %d should not be in DB", s.Slot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
store.genesisTime = uint64(time.Now().Unix())
|
||||
|
||||
update, err := store.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !update {
|
||||
t.Error("Should be able to update justified, received false")
|
||||
}
|
||||
|
||||
lastJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{ParentRoot: []byte{'G'}}}
|
||||
lastJustifiedRoot, _ := ssz.HashTreeRoot(lastJustifiedBlk.Block)
|
||||
newJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: lastJustifiedRoot[:]}}
|
||||
newJustifiedRoot, _ := ssz.HashTreeRoot(newJustifiedBlk.Block)
|
||||
if err := store.db.SaveBlock(ctx, newJustifiedBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveBlock(ctx, lastJustifiedBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
||||
store.genesisTime = uint64(time.Now().Unix()) - diff
|
||||
store.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
||||
update, err = store.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !update {
|
||||
t.Error("Should be able to update justified, received false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
lastJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{ParentRoot: []byte{'G'}}}
|
||||
lastJustifiedRoot, _ := ssz.HashTreeRoot(lastJustifiedBlk.Block)
|
||||
newJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{ParentRoot: lastJustifiedRoot[:]}}
|
||||
newJustifiedRoot, _ := ssz.HashTreeRoot(newJustifiedBlk.Block)
|
||||
if err := store.db.SaveBlock(ctx, newJustifiedBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveBlock(ctx, lastJustifiedBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
||||
store.genesisTime = uint64(time.Now().Unix()) - diff
|
||||
store.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
||||
|
||||
update, err := store.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if update {
|
||||
t.Error("Should not be able to update justified, received true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateJustifiedCheckpoint_Update(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
store.genesisTime = uint64(time.Now().Unix())
|
||||
|
||||
store.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
store.bestJustifiedCheckpt = ðpb.Checkpoint{Epoch: 1, Root: []byte{'B'}}
|
||||
store.updateJustifiedCheckpoint()
|
||||
|
||||
if !bytes.Equal(store.justifiedCheckpt.Root, []byte{'B'}) {
|
||||
t.Error("Justified check point root did not update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateJustifiedCheckpoint_NoUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
store.genesisTime = uint64(time.Now().Unix()) - params.BeaconConfig().SecondsPerSlot
|
||||
|
||||
store.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
store.bestJustifiedCheckpt = ðpb.Checkpoint{Epoch: 1, Root: []byte{'B'}}
|
||||
store.updateJustifiedCheckpoint()
|
||||
|
||||
if bytes.Equal(store.justifiedCheckpt.Root, []byte{'B'}) {
|
||||
t.Error("Justified check point root was not suppose to update")
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
// Save 5 blocks in DB, each has a state.
|
||||
numBlocks := 5
|
||||
totalBlocks := make([]*ethpb.SignedBeaconBlock, numBlocks)
|
||||
blockRoots := make([][32]byte, 0)
|
||||
for i := 0; i < len(totalBlocks); i++ {
|
||||
totalBlocks[i] = ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: uint64(i),
|
||||
},
|
||||
}
|
||||
r, err := ssz.HashTreeRoot(totalBlocks[i].Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{Slot: uint64(i)}, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveBlock(ctx, totalBlocks[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blockRoots = append(blockRoots, r)
|
||||
}
|
||||
if err := store.db.SaveHeadBlockRoot(ctx, blockRoots[0]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.rmStatesOlderThanLastFinalized(ctx, 10, 11); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Since 5-10 are skip slots, block with slot 4 should be deleted
|
||||
s, err := store.db.State(ctx, blockRoots[4])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s != nil {
|
||||
t.Error("Did not delete state for start slot")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromCache(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
s := &pb.BeaconState{Slot: 1}
|
||||
r := [32]byte{'A'}
|
||||
b := ðpb.BeaconBlock{Slot: 1, ParentRoot: r[:]}
|
||||
store.initSyncState[r] = s
|
||||
|
||||
wanted := "pre state of slot 1 does not exist"
|
||||
if _, err := store.cachedPreState(ctx, b); !strings.Contains(err.Error(), wanted) {
|
||||
t.Fatal("Not expected error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromCacheWithFeature(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
config := &featureconfig.Flags{
|
||||
InitSyncCacheState: true,
|
||||
}
|
||||
featureconfig.Init(config)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
s := &pb.BeaconState{Slot: 1}
|
||||
r := [32]byte{'A'}
|
||||
b := ðpb.BeaconBlock{Slot: 1, ParentRoot: r[:]}
|
||||
store.initSyncState[r] = s
|
||||
|
||||
received, err := store.cachedPreState(ctx, b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(s, received) {
|
||||
t.Error("cached state not the same")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
r := [32]byte{'A'}
|
||||
b := ðpb.BeaconBlock{Slot: 1, ParentRoot: r[:]}
|
||||
|
||||
_, err := store.cachedPreState(ctx, b)
|
||||
wanted := "pre state of slot 1 does not exist"
|
||||
if err.Error() != wanted {
|
||||
t.Error("Did not get wanted error")
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{Slot: 1}
|
||||
store.db.SaveState(ctx, s, r)
|
||||
|
||||
received, err := store.cachedPreState(ctx, b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(s, received) {
|
||||
t.Error("cached state not the same")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveInitState_CanSaveDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
config := &featureconfig.Flags{
|
||||
InitSyncCacheState: true,
|
||||
}
|
||||
featureconfig.Init(config)
|
||||
|
||||
for i := uint64(0); i < 64; i++ {
|
||||
b := ðpb.BeaconBlock{Slot: i}
|
||||
s := &pb.BeaconState{Slot: i}
|
||||
r, _ := ssz.HashTreeRoot(b)
|
||||
store.initSyncState[r] = s
|
||||
}
|
||||
|
||||
// Set finalized root as slot 32
|
||||
finalizedRoot, _ := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 32})
|
||||
|
||||
if err := store.saveInitState(ctx, &pb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{
|
||||
Epoch: 1, Root: finalizedRoot[:]}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify finalized state is saved in DB
|
||||
finalizedState, err := store.db.State(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if finalizedState == nil {
|
||||
t.Error("finalized state can't be nil")
|
||||
}
|
||||
|
||||
// Verify cached state is properly pruned
|
||||
if len(store.initSyncState) != int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
t.Errorf("wanted: %d, got: %d", len(store.initSyncState), params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
signedBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(ctx, signedBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, err := ssz.HashTreeRoot(signedBlock.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
store.bestJustifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
store.initSyncState[r] = &pb.BeaconState{}
|
||||
if err := db.SaveState(ctx, &pb.BeaconState{}, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Could update
|
||||
s := &pb.BeaconState{CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: r[:]}}
|
||||
if err := store.updateJustified(context.Background(), s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if store.bestJustifiedCheckpt.Epoch != s.CurrentJustifiedCheckpoint.Epoch {
|
||||
t.Error("Incorrect justified epoch in store")
|
||||
}
|
||||
|
||||
// Could not update
|
||||
store.bestJustifiedCheckpt.Epoch = 2
|
||||
if err := store.updateJustified(context.Background(), s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if store.bestJustifiedCheckpt.Epoch != 2 {
|
||||
t.Error("Incorrect justified epoch in store")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterBlockRoots_CanFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
fBlock := ðpb.BeaconBlock{}
|
||||
fRoot, _ := ssz.HashTreeRoot(fBlock)
|
||||
hBlock := ðpb.BeaconBlock{Slot: 1}
|
||||
headRoot, _ := ssz.HashTreeRoot(hBlock)
|
||||
if err := store.db.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: fBlock}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, fRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: fRoot[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: hBlock}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
roots := [][32]byte{{'C'}, {'D'}, headRoot, {'E'}, fRoot, {'F'}}
|
||||
wanted := [][32]byte{{'C'}, {'D'}, {'E'}, {'F'}}
|
||||
|
||||
received, err := store.filterBlockRoots(ctx, roots)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(wanted, received) {
|
||||
t.Error("Did not filter correctly")
|
||||
}
|
||||
}
|
||||
433
beacon-chain/blockchain/forkchoice/service.go
Normal file
433
beacon-chain/blockchain/forkchoice/service.go
Normal file
@@ -0,0 +1,433 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/stateutil"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ForkChoicer defines a common interface for methods useful for directly applying fork choice
|
||||
// to beacon blocks to compute head.
|
||||
type ForkChoicer interface {
|
||||
Head(ctx context.Context) ([]byte, error)
|
||||
OnBlock(ctx context.Context, b *ethpb.SignedBeaconBlock) error
|
||||
OnBlockInitialSyncStateTransition(ctx context.Context, b *ethpb.SignedBeaconBlock) error
|
||||
OnAttestation(ctx context.Context, a *ethpb.Attestation) error
|
||||
GenesisStore(ctx context.Context, justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) error
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
}
|
||||
|
||||
// Store represents a service struct that handles the forkchoice
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Store struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
db db.Database
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
checkpointState *cache.CheckpointStateCache
|
||||
checkpointStateLock sync.Mutex
|
||||
genesisTime uint64
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
latestVoteMap map[uint64]*pb.ValidatorLatestVote
|
||||
voteLock sync.RWMutex
|
||||
initSyncState map[[32]byte]*pb.BeaconState
|
||||
initSyncStateLock sync.RWMutex
|
||||
nextEpochBoundarySlot uint64
|
||||
}
|
||||
|
||||
// NewForkChoiceService instantiates a new service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewForkChoiceService(ctx context.Context, db db.Database) *Store {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Store{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
db: db,
|
||||
checkpointState: cache.NewCheckpointStateCache(),
|
||||
latestVoteMap: make(map[uint64]*pb.ValidatorLatestVote),
|
||||
initSyncState: make(map[[32]byte]*pb.BeaconState),
|
||||
}
|
||||
}
|
||||
|
||||
// GenesisStore initializes the store struct before beacon chain
|
||||
// starts to advance.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_genesis_store(genesis_state: BeaconState) -> Store:
|
||||
// genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))
|
||||
// root = signing_root(genesis_block)
|
||||
// justified_checkpoint = Checkpoint(epoch=GENESIS_EPOCH, root=root)
|
||||
// finalized_checkpoint = Checkpoint(epoch=GENESIS_EPOCH, root=root)
|
||||
// return Store(
|
||||
// time=genesis_state.genesis_time,
|
||||
// justified_checkpoint=justified_checkpoint,
|
||||
// finalized_checkpoint=finalized_checkpoint,
|
||||
// blocks={root: genesis_block},
|
||||
// block_states={root: genesis_state.copy()},
|
||||
// checkpoint_states={justified_checkpoint: genesis_state.copy()},
|
||||
// )
|
||||
func (s *Store) GenesisStore(
|
||||
ctx context.Context,
|
||||
justifiedCheckpoint *ethpb.Checkpoint,
|
||||
finalizedCheckpoint *ethpb.Checkpoint) error {
|
||||
|
||||
s.justifiedCheckpt = proto.Clone(justifiedCheckpoint).(*ethpb.Checkpoint)
|
||||
s.bestJustifiedCheckpt = proto.Clone(justifiedCheckpoint).(*ethpb.Checkpoint)
|
||||
s.finalizedCheckpt = proto.Clone(finalizedCheckpoint).(*ethpb.Checkpoint)
|
||||
s.prevFinalizedCheckpt = proto.Clone(finalizedCheckpoint).(*ethpb.Checkpoint)
|
||||
|
||||
justifiedState, err := s.db.State(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve last justified state")
|
||||
}
|
||||
|
||||
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: s.justifiedCheckpt,
|
||||
State: justifiedState,
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state in check point cache")
|
||||
}
|
||||
|
||||
s.genesisTime = justifiedState.GenesisTime
|
||||
if err := s.cacheGenesisState(ctx); err != nil {
|
||||
return errors.Wrap(err, "could not cache initial sync state")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This sets up gensis for initial sync state cache.
|
||||
func (s *Store) cacheGenesisState(ctx context.Context) error {
|
||||
if !featureconfig.Get().InitSyncCacheState {
|
||||
return nil
|
||||
}
|
||||
|
||||
genesisState, err := s.db.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateRoot, err := stateutil.HashTreeRootState(genesisState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash genesis state")
|
||||
}
|
||||
genesisBlk := blocks.NewGenesisBlock(stateRoot[:])
|
||||
genesisBlkRoot, err := ssz.HashTreeRoot(genesisBlk.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
s.initSyncState[genesisBlkRoot] = genesisState
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ancestor returns the block root of an ancestry block from the input block root.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
|
||||
// block = store.blocks[root]
|
||||
// if block.slot > slot:
|
||||
// return get_ancestor(store, block.parent_root, slot)
|
||||
// elif block.slot == slot:
|
||||
// return root
|
||||
// else:
|
||||
// return Bytes32() # root is older than queried slot: no results.
|
||||
func (s *Store) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.ancestor")
|
||||
defer span.End()
|
||||
|
||||
// Stop recursive ancestry lookup if context is cancelled.
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
signed, err := s.db.Block(ctx, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get ancestor block")
|
||||
}
|
||||
if signed == nil || signed.Block == nil {
|
||||
return nil, errors.New("nil block")
|
||||
}
|
||||
b := signed.Block
|
||||
|
||||
// If we dont have the ancestor in the DB, simply return nil so rest of fork choice
|
||||
// operation can proceed. This is not an error condition.
|
||||
if b == nil || b.Slot < slot {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if b.Slot == slot {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
return s.ancestor(ctx, b.ParentRoot, slot)
|
||||
}
|
||||
|
||||
// latestAttestingBalance returns the staked balance of a block from the input block root.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_latest_attesting_balance(store: Store, root: Hash) -> Gwei:
|
||||
// state = store.checkpoint_states[store.justified_checkpoint]
|
||||
// active_indices = get_active_validator_indices(state, get_current_epoch(state))
|
||||
// return Gwei(sum(
|
||||
// state.validators[i].effective_balance for i in active_indices
|
||||
// if (i in store.latest_messages
|
||||
// and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
|
||||
// ))
|
||||
func (s *Store) latestAttestingBalance(ctx context.Context, root []byte) (uint64, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.latestAttestingBalance")
|
||||
defer span.End()
|
||||
|
||||
lastJustifiedState, err := s.checkpointState.StateByCheckpoint(s.JustifiedCheckpt())
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not retrieve cached state via last justified check point")
|
||||
}
|
||||
if lastJustifiedState == nil {
|
||||
return 0, errors.Wrapf(err, "could not get justified state at epoch %d", s.JustifiedCheckpt().Epoch)
|
||||
}
|
||||
|
||||
lastJustifiedEpoch := helpers.CurrentEpoch(lastJustifiedState)
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(lastJustifiedState, lastJustifiedEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get active indices for last justified checkpoint")
|
||||
}
|
||||
|
||||
wantedBlkSigned, err := s.db.Block(ctx, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get target block")
|
||||
}
|
||||
if wantedBlkSigned == nil || wantedBlkSigned.Block == nil {
|
||||
return 0, errors.New("nil wanted block")
|
||||
}
|
||||
wantedBlk := wantedBlkSigned.Block
|
||||
|
||||
balances := uint64(0)
|
||||
s.voteLock.RLock()
|
||||
defer s.voteLock.RUnlock()
|
||||
for _, i := range activeIndices {
|
||||
vote, ok := s.latestVoteMap[i]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
wantedRoot, err := s.ancestor(ctx, vote.Root, wantedBlk.Slot)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "could not get ancestor root for slot %d", wantedBlk.Slot)
|
||||
}
|
||||
if bytes.Equal(wantedRoot, root) {
|
||||
balances += lastJustifiedState.Validators[i].EffectiveBalance
|
||||
}
|
||||
}
|
||||
return balances, nil
|
||||
}
|
||||
|
||||
// Head returns the head of the beacon chain.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_head(store: Store) -> Root:
|
||||
// # Get filtered block tree that only includes viable branches
|
||||
// blocks = get_filtered_block_tree(store)
|
||||
// # Execute the LMD-GHOST fork choice
|
||||
// head = store.justified_checkpoint.root
|
||||
// justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
|
||||
// while True:
|
||||
// children = [
|
||||
// root for root in blocks.keys()
|
||||
// if blocks[root].parent_root == head and blocks[root].slot > justified_slot
|
||||
// ]
|
||||
// if len(children) == 0:
|
||||
// return head
|
||||
// # Sort by latest attesting balance with ties broken lexicographically
|
||||
// head = max(children, key=lambda root: (get_latest_attesting_balance(store, root), root))
|
||||
func (s *Store) Head(ctx context.Context) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.head")
|
||||
defer span.End()
|
||||
|
||||
head := s.JustifiedCheckpt().Root
|
||||
filteredBlocks, err := s.getFilterBlockTree(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
justifiedSlot := helpers.StartSlot(s.justifiedCheckpt.Epoch)
|
||||
for {
|
||||
children := make([][32]byte, 0, len(filteredBlocks))
|
||||
for root, block := range filteredBlocks {
|
||||
if bytes.Equal(block.ParentRoot, head) && block.Slot > justifiedSlot {
|
||||
children = append(children, root)
|
||||
}
|
||||
}
|
||||
|
||||
if len(children) == 0 {
|
||||
return head, nil
|
||||
}
|
||||
|
||||
// if a block has one child, then we don't have to lookup anything to
|
||||
// know that this child will be the best child.
|
||||
head = children[0][:]
|
||||
if len(children) > 1 {
|
||||
highest, err := s.latestAttestingBalance(ctx, head)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get latest balance")
|
||||
}
|
||||
for _, child := range children[1:] {
|
||||
balance, err := s.latestAttestingBalance(ctx, child[:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get latest balance")
|
||||
}
|
||||
// When there's a tie, it's broken lexicographically to favor the higher one.
|
||||
if balance > highest ||
|
||||
balance == highest && bytes.Compare(child[:], head) > 0 {
|
||||
highest = balance
|
||||
head = child[:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getFilterBlockTree retrieves a filtered block tree from store, it only returns branches
|
||||
// whose leaf state's justified and finalized info agrees with what's in the store.
|
||||
// Rationale: https://notes.ethereum.org/Fj-gVkOSTpOyUx-zkWjuwg?view
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_filtered_block_tree(store: Store) -> Dict[Root, BeaconBlock]:
|
||||
// """
|
||||
// Retrieve a filtered block true from ``store``, only returning branches
|
||||
// whose leaf state's justified/finalized info agrees with that in ``store``.
|
||||
// """
|
||||
// base = store.justified_checkpoint.root
|
||||
// blocks: Dict[Root, BeaconBlock] = {}
|
||||
// filter_block_tree(store, base, blocks)
|
||||
// return blocks
|
||||
func (s *Store) getFilterBlockTree(ctx context.Context) (map[[32]byte]*ethpb.BeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.getFilterBlockTree")
|
||||
defer span.End()
|
||||
|
||||
baseRoot := bytesutil.ToBytes32(s.justifiedCheckpt.Root)
|
||||
filteredBlocks := make(map[[32]byte]*ethpb.BeaconBlock)
|
||||
if _, err := s.filterBlockTree(ctx, baseRoot, filteredBlocks); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filteredBlocks, nil
|
||||
}
|
||||
|
||||
// filterBlockTree filters for branches that see latest finalized and justified info as correct on-chain
|
||||
// before running Head.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool:
|
||||
// block = store.blocks[block_root]
|
||||
// children = [
|
||||
// root for root in store.blocks.keys()
|
||||
// if store.blocks[root].parent_root == block_root
|
||||
// ]
|
||||
// # If any children branches contain expected finalized/justified checkpoints,
|
||||
// # add to filtered block-tree and signal viability to parent.
|
||||
// if any(children):
|
||||
// filter_block_tree_result = [filter_block_tree(store, child, blocks) for child in children]
|
||||
// if any(filter_block_tree_result):
|
||||
// blocks[block_root] = block
|
||||
// return True
|
||||
// return False
|
||||
// # If leaf block, check finalized/justified checkpoints as matching latest.
|
||||
// head_state = store.block_states[block_root]
|
||||
// correct_justified = (
|
||||
// store.justified_checkpoint.epoch == GENESIS_EPOCH
|
||||
// or head_state.current_justified_checkpoint == store.justified_checkpoint
|
||||
// )
|
||||
// correct_finalized = (
|
||||
// store.finalized_checkpoint.epoch == GENESIS_EPOCH
|
||||
// or head_state.finalized_checkpoint == store.finalized_checkpoint
|
||||
// )
|
||||
// # If expected finalized/justified, add to viable block-tree and signal viability to parent.
|
||||
// if correct_justified and correct_finalized:
|
||||
// blocks[block_root] = block
|
||||
// return True
|
||||
// # Otherwise, branch not viable
|
||||
// return False
|
||||
func (s *Store) filterBlockTree(ctx context.Context, blockRoot [32]byte, filteredBlocks map[[32]byte]*ethpb.BeaconBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.filterBlockTree")
|
||||
defer span.End()
|
||||
signed, err := s.db.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if signed == nil || signed.Block == nil {
|
||||
return false, errors.New("nil block")
|
||||
}
|
||||
block := signed.Block
|
||||
|
||||
filter := filters.NewFilter().SetParentRoot(blockRoot[:])
|
||||
childrenRoots, err := s.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(childrenRoots) != 0 {
|
||||
var filtered bool
|
||||
for _, childRoot := range childrenRoots {
|
||||
didFilter, err := s.filterBlockTree(ctx, childRoot, filteredBlocks)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if didFilter {
|
||||
filtered = true
|
||||
}
|
||||
}
|
||||
if filtered {
|
||||
filteredBlocks[blockRoot] = block
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
headState, err := s.db.State(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if headState == nil {
|
||||
return false, fmt.Errorf("no state matching block root %v", hex.EncodeToString(blockRoot[:]))
|
||||
}
|
||||
|
||||
correctJustified := s.justifiedCheckpt.Epoch == 0 ||
|
||||
proto.Equal(s.justifiedCheckpt, headState.CurrentJustifiedCheckpoint)
|
||||
correctFinalized := s.finalizedCheckpt.Epoch == 0 ||
|
||||
proto.Equal(s.finalizedCheckpt, headState.FinalizedCheckpoint)
|
||||
if correctJustified && correctFinalized {
|
||||
filteredBlocks[blockRoot] = block
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// JustifiedCheckpt returns the latest justified check point from fork choice store.
|
||||
func (s *Store) JustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return proto.Clone(s.justifiedCheckpt).(*ethpb.Checkpoint)
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized check point from fork choice store.
|
||||
func (s *Store) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return proto.Clone(s.finalizedCheckpt).(*ethpb.Checkpoint)
|
||||
}
|
||||
517
beacon-chain/blockchain/forkchoice/service_test.go
Normal file
517
beacon-chain/blockchain/forkchoice/service_test.go
Normal file
@@ -0,0 +1,517 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/stateutil"
|
||||
)
|
||||
|
||||
func TestStore_GenesisStoreOk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
genesisTime := time.Unix(9999, 0)
|
||||
genesisState := &pb.BeaconState{GenesisTime: uint64(genesisTime.Unix())}
|
||||
genesisStateRoot, err := stateutil.HashTreeRootState(genesisState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
genesisBlkRoot, err := ssz.HashTreeRoot(genesisBlk.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: genesisBlkRoot[:]}
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(store.justifiedCheckpt, checkPoint) {
|
||||
t.Error("Justified check point from genesis store did not match")
|
||||
}
|
||||
if !reflect.DeepEqual(store.finalizedCheckpt, checkPoint) {
|
||||
t.Error("Finalized check point from genesis store did not match")
|
||||
}
|
||||
|
||||
cachedState, err := store.checkpointState.StateByCheckpoint(checkPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(cachedState, genesisState) {
|
||||
t.Error("Incorrect genesis state cached")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_AncestorOk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
type args struct {
|
||||
root []byte
|
||||
slot uint64
|
||||
}
|
||||
|
||||
// /- B1
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
tests := []struct {
|
||||
args *args
|
||||
want []byte
|
||||
}{
|
||||
{args: &args{roots[1], 0}, want: roots[0]},
|
||||
{args: &args{roots[8], 0}, want: roots[0]},
|
||||
{args: &args{roots[8], 4}, want: roots[4]},
|
||||
{args: &args{roots[7], 4}, want: roots[4]},
|
||||
{args: &args{roots[7], 0}, want: roots[0]},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got, err := store.ancestor(ctx, tt.args.root, tt.args.slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("Store.ancestor(ctx, ) = %v, want %v", got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_AncestorNotPartOfTheChain(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// /- B1
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
root, err := store.ancestor(ctx, roots[8], 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if root != nil {
|
||||
t.Error("block at slot 1 is not part of the chain")
|
||||
}
|
||||
root, err = store.ancestor(ctx, roots[8], 2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if root != nil {
|
||||
t.Error("block at slot 2 is not part of the chain")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LatestAttestingBalance(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
validators := make([]*ethpb.Validator, 100)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
stateRoot, err := stateutil.HashTreeRootState(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := blocks.NewGenesisBlock(stateRoot[:])
|
||||
blkRoot, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, s, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: blkRoot[:]}
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// /- B1 (33 votes)
|
||||
// B0 /- B5 - B7 (33 votes)
|
||||
// \- B3 - B4 - B6 - B8 (34 votes)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
switch {
|
||||
case i < 33:
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[1]}
|
||||
case i > 66:
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[7]}
|
||||
default:
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[8]}
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
root []byte
|
||||
want uint64
|
||||
}{
|
||||
{root: roots[0], want: 100 * 1e9},
|
||||
{root: roots[1], want: 33 * 1e9},
|
||||
{root: roots[3], want: 67 * 1e9},
|
||||
{root: roots[4], want: 67 * 1e9},
|
||||
{root: roots[7], want: 33 * 1e9},
|
||||
{root: roots[8], want: 34 * 1e9},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got, err := store.latestAttestingBalance(ctx, tt.root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("Store.latestAttestingBalance(ctx, ) = %v, want %v", got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_ChildrenBlocksFromParentRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
filter := filters.NewFilter().SetParentRoot(roots[0]).SetStartSlot(0)
|
||||
children, err := store.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(children, [][32]byte{bytesutil.ToBytes32(roots[1]), bytesutil.ToBytes32(roots[3])}) {
|
||||
t.Error("Did not receive correct children roots")
|
||||
}
|
||||
|
||||
filter = filters.NewFilter().SetParentRoot(roots[0]).SetStartSlot(2)
|
||||
children, err = store.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(children, [][32]byte{bytesutil.ToBytes32(roots[3])}) {
|
||||
t.Error("Did not receive correct children roots")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetHead(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
validators := make([]*ethpb.Validator, 100)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
stateRoot, err := stateutil.HashTreeRootState(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := blocks.NewGenesisBlock(stateRoot[:])
|
||||
blkRoot, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, s, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: blkRoot[:]}
|
||||
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// /- B1 (33 votes)
|
||||
// B0 /- B5 - B7 (33 votes)
|
||||
// \- B3 - B4 - B6 - B8 (34 votes)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
switch {
|
||||
case i < 33:
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[1]}
|
||||
case i > 66:
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[7]}
|
||||
default:
|
||||
store.latestVoteMap[uint64(i)] = &pb.ValidatorLatestVote{Root: roots[8]}
|
||||
}
|
||||
}
|
||||
|
||||
// Default head is B8
|
||||
head, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(head, roots[8]) {
|
||||
t.Error("Incorrect head")
|
||||
}
|
||||
|
||||
// 1 validator switches vote to B7 to gain 34%, enough to switch head
|
||||
store.latestVoteMap[uint64(50)] = &pb.ValidatorLatestVote{Root: roots[7]}
|
||||
|
||||
head, err = store.Head(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(head, roots[7]) {
|
||||
t.Error("Incorrect head")
|
||||
}
|
||||
|
||||
// 18 validators switches vote to B1 to gain 51%, enough to switch head
|
||||
for i := 0; i < 18; i++ {
|
||||
idx := 50 + uint64(i)
|
||||
store.latestVoteMap[uint64(idx)] = &pb.ValidatorLatestVote{Root: roots[1]}
|
||||
}
|
||||
head, err = store.Head(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(head, roots[1]) {
|
||||
t.Log(head)
|
||||
t.Error("Incorrect head")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheGenesisState_Correct(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
config := &featureconfig.Flags{
|
||||
InitSyncCacheState: true,
|
||||
}
|
||||
featureconfig.Init(config)
|
||||
|
||||
b := ðpb.BeaconBlock{Slot: 1}
|
||||
r, _ := ssz.HashTreeRoot(b)
|
||||
s := &pb.BeaconState{GenesisTime: 99}
|
||||
|
||||
store.db.SaveState(ctx, s, r)
|
||||
store.db.SaveGenesisBlockRoot(ctx, r)
|
||||
|
||||
if err := store.cacheGenesisState(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, state := range store.initSyncState {
|
||||
if !reflect.DeepEqual(s, state) {
|
||||
t.Error("Did not get wanted state")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetFilterBlockTree_CorrectLeaf(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{}
|
||||
stateRoot, err := stateutil.HashTreeRootState(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := blocks.NewGenesisBlock(stateRoot[:])
|
||||
blkRoot, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, s, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: blkRoot[:]}
|
||||
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tree, err := store.getFilterBlockTree(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wanted := make(map[[32]byte]*ethpb.BeaconBlock)
|
||||
for _, root := range roots {
|
||||
root32 := bytesutil.ToBytes32(root)
|
||||
b, _ := store.db.Block(ctx, root32)
|
||||
if b != nil {
|
||||
wanted[root32] = b.Block
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(tree, wanted) {
|
||||
t.Error("Did not filter tree correctly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetFilterBlockTree_IncorrectLeaf(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{}
|
||||
stateRoot, err := stateutil.HashTreeRootState(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := blocks.NewGenesisBlock(stateRoot[:])
|
||||
blkRoot, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, s, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: blkRoot[:]}
|
||||
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Filter for incorrect leaves for 1, 7 and 8
|
||||
store.db.SaveState(ctx, &pb.BeaconState{CurrentJustifiedCheckpoint: ðpb.Checkpoint{}}, bytesutil.ToBytes32(roots[1]))
|
||||
store.db.SaveState(ctx, &pb.BeaconState{CurrentJustifiedCheckpoint: ðpb.Checkpoint{}}, bytesutil.ToBytes32(roots[7]))
|
||||
store.db.SaveState(ctx, &pb.BeaconState{CurrentJustifiedCheckpoint: ðpb.Checkpoint{}}, bytesutil.ToBytes32(roots[8]))
|
||||
store.justifiedCheckpt.Epoch = 1
|
||||
tree, err := store.getFilterBlockTree(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(tree) != 0 {
|
||||
t.Error("filtered tree should be 0 length")
|
||||
}
|
||||
|
||||
// Set leave 1 as correct
|
||||
store.db.SaveState(ctx, &pb.BeaconState{CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: store.justifiedCheckpt.Root}}, bytesutil.ToBytes32(roots[1]))
|
||||
tree, err = store.getFilterBlockTree(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wanted := make(map[[32]byte]*ethpb.BeaconBlock)
|
||||
root32 := bytesutil.ToBytes32(roots[0])
|
||||
b, err = store.db.Block(ctx, root32)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted[root32] = b.Block
|
||||
root32 = bytesutil.ToBytes32(roots[1])
|
||||
b, err = store.db.Block(ctx, root32)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted[root32] = b.Block
|
||||
|
||||
if !reflect.DeepEqual(tree, wanted) {
|
||||
t.Error("Did not filter tree correctly")
|
||||
}
|
||||
}
|
||||
153
beacon-chain/blockchain/forkchoice/tree_test.go
Normal file
153
beacon-chain/blockchain/forkchoice/tree_test.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
)
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
// /- B1
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
// (B1, and B3 are all from the same slots)
|
||||
func blockTree1(db db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: genesisRoot}
|
||||
r0, _ := ssz.HashTreeRoot(b0)
|
||||
b1 := ðpb.BeaconBlock{Slot: 1, ParentRoot: r0[:]}
|
||||
r1, _ := ssz.HashTreeRoot(b1)
|
||||
b3 := ðpb.BeaconBlock{Slot: 3, ParentRoot: r0[:]}
|
||||
r3, _ := ssz.HashTreeRoot(b3)
|
||||
b4 := ðpb.BeaconBlock{Slot: 4, ParentRoot: r3[:]}
|
||||
r4, _ := ssz.HashTreeRoot(b4)
|
||||
b5 := ðpb.BeaconBlock{Slot: 5, ParentRoot: r4[:]}
|
||||
r5, _ := ssz.HashTreeRoot(b5)
|
||||
b6 := ðpb.BeaconBlock{Slot: 6, ParentRoot: r4[:]}
|
||||
r6, _ := ssz.HashTreeRoot(b6)
|
||||
b7 := ðpb.BeaconBlock{Slot: 7, ParentRoot: r5[:]}
|
||||
r7, _ := ssz.HashTreeRoot(b7)
|
||||
b8 := ðpb.BeaconBlock{Slot: 8, ParentRoot: r6[:]}
|
||||
r8, _ := ssz.HashTreeRoot(b8)
|
||||
for _, b := range []*ethpb.BeaconBlock{b0, b1, b3, b4, b5, b6, b7, b8} {
|
||||
if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, r1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, r7); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, r8); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return [][]byte{r0[:], r1[:], nil, r3[:], r4[:], r5[:], r6[:], r7[:], r8[:]}, nil
|
||||
}
|
||||
|
||||
// blockTree2 constructs the following tree:
|
||||
// Scenario graph: shorturl.at/loyP6
|
||||
//
|
||||
//digraph G {
|
||||
// rankdir=LR;
|
||||
// node [shape="none"];
|
||||
//
|
||||
// subgraph blocks {
|
||||
// rankdir=LR;
|
||||
// node [shape="box"];
|
||||
// a->b;
|
||||
// a->c;
|
||||
// b->d;
|
||||
// b->e;
|
||||
// c->f;
|
||||
// c->g;
|
||||
// d->h
|
||||
// d->i
|
||||
// d->j
|
||||
// d->k
|
||||
// h->l
|
||||
// h->m
|
||||
// g->n
|
||||
// g->o
|
||||
// e->p
|
||||
// }
|
||||
//}
|
||||
func blockTree2(db db.Database) ([][]byte, error) {
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
r0, _ := ssz.HashTreeRoot(b0)
|
||||
b1 := ðpb.BeaconBlock{Slot: 1, ParentRoot: r0[:]}
|
||||
r1, _ := ssz.HashTreeRoot(b1)
|
||||
b2 := ðpb.BeaconBlock{Slot: 2, ParentRoot: r0[:]}
|
||||
r2, _ := ssz.HashTreeRoot(b2)
|
||||
b3 := ðpb.BeaconBlock{Slot: 3, ParentRoot: r1[:]}
|
||||
r3, _ := ssz.HashTreeRoot(b3)
|
||||
b4 := ðpb.BeaconBlock{Slot: 4, ParentRoot: r1[:]}
|
||||
r4, _ := ssz.HashTreeRoot(b4)
|
||||
b5 := ðpb.BeaconBlock{Slot: 5, ParentRoot: r2[:]}
|
||||
r5, _ := ssz.HashTreeRoot(b5)
|
||||
b6 := ðpb.BeaconBlock{Slot: 6, ParentRoot: r2[:]}
|
||||
r6, _ := ssz.HashTreeRoot(b6)
|
||||
b7 := ðpb.BeaconBlock{Slot: 7, ParentRoot: r3[:]}
|
||||
r7, _ := ssz.HashTreeRoot(b7)
|
||||
b8 := ðpb.BeaconBlock{Slot: 8, ParentRoot: r3[:]}
|
||||
r8, _ := ssz.HashTreeRoot(b8)
|
||||
b9 := ðpb.BeaconBlock{Slot: 9, ParentRoot: r3[:]}
|
||||
r9, _ := ssz.HashTreeRoot(b9)
|
||||
b10 := ðpb.BeaconBlock{Slot: 10, ParentRoot: r3[:]}
|
||||
r10, _ := ssz.HashTreeRoot(b10)
|
||||
b11 := ðpb.BeaconBlock{Slot: 11, ParentRoot: r4[:]}
|
||||
r11, _ := ssz.HashTreeRoot(b11)
|
||||
b12 := ðpb.BeaconBlock{Slot: 12, ParentRoot: r6[:]}
|
||||
r12, _ := ssz.HashTreeRoot(b12)
|
||||
b13 := ðpb.BeaconBlock{Slot: 13, ParentRoot: r6[:]}
|
||||
r13, _ := ssz.HashTreeRoot(b13)
|
||||
b14 := ðpb.BeaconBlock{Slot: 14, ParentRoot: r7[:]}
|
||||
r14, _ := ssz.HashTreeRoot(b14)
|
||||
b15 := ðpb.BeaconBlock{Slot: 15, ParentRoot: r7[:]}
|
||||
r15, _ := ssz.HashTreeRoot(b15)
|
||||
for _, b := range []*ethpb.BeaconBlock{b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} {
|
||||
if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return [][]byte{r0[:], r1[:], r2[:], r3[:], r4[:], r5[:], r6[:], r7[:], r8[:], r9[:], r10[:], r11[:], r12[:], r13[:], r14[:], r15[:]}, nil
|
||||
}
|
||||
|
||||
// blockTree3 constructs a tree that is 512 blocks in a row.
|
||||
// B0 - B1 - B2 - B3 - .... - B512
|
||||
func blockTree3(db db.Database) ([][]byte, error) {
|
||||
blkCount := 512
|
||||
roots := make([][]byte, 0, blkCount)
|
||||
blks := make([]*ethpb.BeaconBlock, 0, blkCount)
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
r0, _ := ssz.HashTreeRoot(b0)
|
||||
roots = append(roots, r0[:])
|
||||
blks = append(blks, b0)
|
||||
|
||||
for i := 1; i < blkCount; i++ {
|
||||
b := ðpb.BeaconBlock{Slot: uint64(i), ParentRoot: roots[len(roots)-1]}
|
||||
r, _ := ssz.HashTreeRoot(b)
|
||||
roots = append(roots, r[:])
|
||||
blks = append(blks, b)
|
||||
}
|
||||
|
||||
for _, b := range blks {
|
||||
if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
57
beacon-chain/blockchain/info.go
Normal file
57
beacon-chain/blockchain/info.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const latestSlotCount = 10
|
||||
|
||||
// HeadsHandler is a handler to serve /heads page in metrics.
|
||||
func (s *Service) HeadsHandler(w http.ResponseWriter, _ *http.Request) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
if _, err := fmt.Fprintf(w, "\n %s\t%s\t", "Head slot", "Head root"); err != nil {
|
||||
logrus.WithError(err).Error("Failed to render chain heads page")
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(w, "\n %s\t%s\t", "---------", "---------"); err != nil {
|
||||
logrus.WithError(err).Error("Failed to render chain heads page")
|
||||
return
|
||||
}
|
||||
|
||||
slots := s.latestHeadSlots()
|
||||
for _, slot := range slots {
|
||||
r := hex.EncodeToString(bytesutil.Trunc(s.canonicalRoots[uint64(slot)]))
|
||||
if _, err := fmt.Fprintf(w, "\n %d\t\t%s\t", slot, r); err != nil {
|
||||
logrus.WithError(err).Error("Failed to render chain heads page")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if _, err := w.Write(buf.Bytes()); err != nil {
|
||||
log.WithError(err).Error("Failed to render chain heads page")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// This returns the latest head slots in a slice and up to latestSlotCount
|
||||
func (s *Service) latestHeadSlots() []int {
|
||||
slots := make([]int, 0, len(s.canonicalRoots))
|
||||
for k := range s.canonicalRoots {
|
||||
slots = append(slots, int(k))
|
||||
}
|
||||
sort.Ints(slots)
|
||||
if (len(slots)) > latestSlotCount {
|
||||
return slots[len(slots)-latestSlotCount:]
|
||||
}
|
||||
return slots
|
||||
}
|
||||
17
beacon-chain/blockchain/log.go
Normal file
17
beacon-chain/blockchain/log.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "blockchain")
|
||||
|
||||
// logs state transition related data every slot.
|
||||
func logStateTransitionData(b *ethpb.BeaconBlock, r []byte) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Slot,
|
||||
"attestations": len(b.Body.Attestations),
|
||||
"deposits": len(b.Body.Deposits),
|
||||
}).Info("Finished applying state transition")
|
||||
}
|
||||
68
beacon-chain/blockchain/metrics.go
Normal file
68
beacon-chain/blockchain/metrics.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
)
|
||||
|
||||
var (
|
||||
beaconSlot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_slot",
|
||||
Help: "Latest slot of the beacon chain state",
|
||||
})
|
||||
beaconHeadSlot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_head_slot",
|
||||
Help: "Slot of the head block of the beacon chain",
|
||||
})
|
||||
beaconHeadRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_head_root",
|
||||
Help: "Root of the head block of the beacon chain, it returns the lowest 8 bytes interpreted as little endian",
|
||||
})
|
||||
competingAtts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "competing_attestations",
|
||||
Help: "The # of attestations received and processed from a competing chain",
|
||||
})
|
||||
competingBlks = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "competing_blocks",
|
||||
Help: "The # of blocks received and processed from a competing chain",
|
||||
})
|
||||
processedBlkNoPubsub = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_no_pubsub_block_counter",
|
||||
Help: "The # of processed block without pubsub, this usually means the blocks from sync",
|
||||
})
|
||||
processedBlkNoPubsubForkchoice = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_no_pubsub_forkchoice_block_counter",
|
||||
Help: "The # of processed block without pubsub and forkchoice, this means indicate blocks from initial sync",
|
||||
})
|
||||
processedBlk = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_block_counter",
|
||||
Help: "The # of total processed in block chain service, with fork choice and pubsub",
|
||||
})
|
||||
processedAttNoPubsub = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_no_pubsub_attestation_counter",
|
||||
Help: "The # of processed attestation without pubsub, this usually means the attestations from sync",
|
||||
})
|
||||
processedAtt = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_attestation_counter",
|
||||
Help: "The # of processed attestation with pubsub and fork choice, this ususally means attestations from rpc",
|
||||
})
|
||||
headFinalizedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "head_finalized_epoch",
|
||||
Help: "Last finalized epoch of the head state",
|
||||
})
|
||||
headFinalizedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "head_finalized_root",
|
||||
Help: "Last finalized root of the head state",
|
||||
})
|
||||
)
|
||||
|
||||
func (s *Service) reportSlotMetrics(currentSlot uint64) {
|
||||
beaconSlot.Set(float64(currentSlot))
|
||||
beaconHeadSlot.Set(float64(s.HeadSlot()))
|
||||
beaconHeadRoot.Set(float64(bytesutil.ToLowInt64(s.HeadRoot())))
|
||||
if s.headState != nil {
|
||||
headFinalizedEpoch.Set(float64(s.headState.FinalizedCheckpoint.Epoch))
|
||||
headFinalizedRoot.Set(float64(bytesutil.ToLowInt64(s.headState.FinalizedCheckpoint.Root)))
|
||||
}
|
||||
}
|
||||
89
beacon-chain/blockchain/receive_attestation.go
Normal file
89
beacon-chain/blockchain/receive_attestation.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
type AttestationReceiver interface {
|
||||
ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error
|
||||
}
|
||||
|
||||
// ReceiveAttestationNoPubsub is a function that defines the operations that are preformed on
|
||||
// attestation that is received from regular sync. The operations consist of:
|
||||
// 1. Validate attestation, update validator's latest vote
|
||||
// 2. Apply fork choice to the processed attestation
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveAttestationNoPubsub")
|
||||
defer span.End()
|
||||
|
||||
// Update forkchoice store for the new attestation
|
||||
if err := s.forkChoiceStore.OnAttestation(ctx, att); err != nil {
|
||||
return errors.Wrap(err, "could not process attestation from fork choice service")
|
||||
}
|
||||
|
||||
// Run fork choice for head block after updating fork choice store.
|
||||
headRoot, err := s.forkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head from fork choice service")
|
||||
}
|
||||
// Only save head if it's different than the current head.
|
||||
if !bytes.Equal(headRoot, s.HeadRoot()) {
|
||||
signed, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(headRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute state from block head")
|
||||
}
|
||||
if signed == nil || signed.Block == nil {
|
||||
return errors.New("nil head block")
|
||||
}
|
||||
if err := s.saveHead(ctx, signed, bytesutil.ToBytes32(headRoot)); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
processedAttNoPubsub.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// This processes attestations from the attestation pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestation() {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
<-stateChannel
|
||||
stateSub.Unsubscribe()
|
||||
|
||||
st := slotutil.GetSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-st.C():
|
||||
ctx := context.Background()
|
||||
atts := s.attPool.ForkchoiceAttestations()
|
||||
for _, a := range atts {
|
||||
if err := s.attPool.DeleteForkchoiceAttestation(a); err != nil {
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
if err := s.ReceiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"targetRoot": fmt.Sprintf("%#x", a.Data.Target.Root),
|
||||
}).WithError(err).Error("Could not receive attestation in chain service")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
46
beacon-chain/blockchain/receive_attestation_test.go
Normal file
46
beacon-chain/blockchain/receive_attestation_test.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestReceiveAttestationNoPubsub_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
r, _ := ssz.HashTreeRoot(ðpb.BeaconBlock{})
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: root[:]},
|
||||
}}
|
||||
if err := chainService.ReceiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Saved new head info")
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Broadcasting attestation")
|
||||
}
|
||||
257
beacon-chain/blockchain/receive_block.go
Normal file
257
beacon-chain/blockchain/receive_block.go
Normal file
@@ -0,0 +1,257 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service receive and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock) error
|
||||
ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock) error
|
||||
ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock) error
|
||||
ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock) error
|
||||
}
|
||||
|
||||
// ReceiveBlock is a function that defines the operations that are preformed on
|
||||
// blocks that is received from rpc service. The operations consists of:
|
||||
// 1. Gossip block to other peers
|
||||
// 2. Validate block, apply state transition and update check points
|
||||
// 3. Apply fork choice to the processed block
|
||||
// 4. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlock")
|
||||
defer span.End()
|
||||
|
||||
root, err := ssz.HashTreeRoot(block.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received block")
|
||||
}
|
||||
|
||||
// Broadcast the new block to the network.
|
||||
if err := s.p2p.Broadcast(ctx, block); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast block")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(root[:]),
|
||||
}).Debug("Broadcasting block")
|
||||
|
||||
if err := s.ReceiveBlockNoPubsub(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
processedBlk.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoPubsub is a function that defines the the operations (minus pubsub)
|
||||
// that are preformed on blocks that is received from regular sync service. The operations consists of:
|
||||
// 1. Validate block, apply state transition and update check points
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoPubsub")
|
||||
defer span.End()
|
||||
blockCopy := proto.Clone(block).(*ethpb.SignedBeaconBlock)
|
||||
|
||||
// Apply state transition on the new block.
|
||||
if err := s.forkChoiceStore.OnBlock(ctx, blockCopy); err != nil {
|
||||
err := errors.Wrap(err, "could not process block from fork choice service")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
root, err := ssz.HashTreeRoot(blockCopy.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received block")
|
||||
}
|
||||
|
||||
// Run fork choice after applying state transition on the new block.
|
||||
headRoot, err := s.forkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head from fork choice service")
|
||||
}
|
||||
signedHeadBlock, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(headRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute state from block head")
|
||||
}
|
||||
if signedHeadBlock == nil || signedHeadBlock.Block == nil {
|
||||
return errors.New("nil head block")
|
||||
}
|
||||
|
||||
// Only save head if it's different than the current head.
|
||||
if !bytes.Equal(headRoot, s.HeadRoot()) {
|
||||
if err := s.saveHead(ctx, signedHeadBlock, bytesutil.ToBytes32(headRoot)); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: root,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Add attestations from the block to the pool for fork choice.
|
||||
if err := s.attPool.SaveBlockAttestations(blockCopy.Block.Body.Attestations); err != nil {
|
||||
log.Errorf("Could not save attestation for fork choice: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
s.reportSlotMetrics(blockCopy.Block.Slot)
|
||||
|
||||
// Log if block is a competing block.
|
||||
isCompetingBlock(root[:], blockCopy.Block.Slot, headRoot, signedHeadBlock.Block.Slot)
|
||||
|
||||
// Log state transition data.
|
||||
logStateTransitionData(blockCopy.Block, root[:])
|
||||
|
||||
s.epochParticipationLock.Lock()
|
||||
defer s.epochParticipationLock.Unlock()
|
||||
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
|
||||
|
||||
processedBlkNoPubsub.Inc()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoPubsubForkchoice is a function that defines the all operations (minus pubsub and forkchoice)
|
||||
// that are preformed blocks that is received from initial sync service. The operations consists of:
|
||||
// 1. Validate block, apply state transition and update check points
|
||||
// 2. Save latest head info
|
||||
func (s *Service) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoForkchoice")
|
||||
defer span.End()
|
||||
blockCopy := proto.Clone(block).(*ethpb.SignedBeaconBlock)
|
||||
|
||||
// Apply state transition on the incoming newly received block.
|
||||
if err := s.forkChoiceStore.OnBlock(ctx, blockCopy); err != nil {
|
||||
err := errors.Wrap(err, "could not process block from fork choice service")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
root, err := ssz.HashTreeRoot(blockCopy.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received block")
|
||||
}
|
||||
|
||||
if !bytes.Equal(root[:], s.HeadRoot()) {
|
||||
if err := s.saveHead(ctx, blockCopy, root); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: root,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
s.reportSlotMetrics(blockCopy.Block.Slot)
|
||||
|
||||
// Log state transition data.
|
||||
logStateTransitionData(blockCopy.Block, root[:])
|
||||
|
||||
s.epochParticipationLock.Lock()
|
||||
defer s.epochParticipationLock.Unlock()
|
||||
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
|
||||
|
||||
processedBlkNoPubsubForkchoice.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoVerify runs state transition on a input block without verifying the block's BLS contents.
|
||||
// Depends on the security model, this is the "minimal" work a node can do to sync the chain.
|
||||
// It simulates light client behavior and assumes 100% trust with the syncing peer.
|
||||
func (s *Service) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoVerify")
|
||||
defer span.End()
|
||||
blockCopy := proto.Clone(block).(*ethpb.SignedBeaconBlock)
|
||||
|
||||
// Apply state transition on the incoming newly received blockCopy without verifying its BLS contents.
|
||||
if err := s.forkChoiceStore.OnBlockInitialSyncStateTransition(ctx, blockCopy); err != nil {
|
||||
return errors.Wrap(err, "could not process blockCopy from fork choice service")
|
||||
}
|
||||
root, err := ssz.HashTreeRoot(blockCopy.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received blockCopy")
|
||||
}
|
||||
|
||||
if featureconfig.Get().InitSyncCacheState {
|
||||
if !bytes.Equal(root[:], s.HeadRoot()) {
|
||||
if err := s.saveHeadNoDB(ctx, blockCopy, root); err != nil {
|
||||
err := errors.Wrap(err, "could not save head")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !bytes.Equal(root[:], s.HeadRoot()) {
|
||||
if err := s.saveHead(ctx, blockCopy, root); err != nil {
|
||||
err := errors.Wrap(err, "could not save head")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: root,
|
||||
Verified: false,
|
||||
},
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
s.reportSlotMetrics(blockCopy.Block.Slot)
|
||||
|
||||
// Log state transition data.
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blockCopy.Block.Slot,
|
||||
"attestations": len(blockCopy.Block.Body.Attestations),
|
||||
"deposits": len(blockCopy.Block.Body.Deposits),
|
||||
}).Debug("Finished applying state transition")
|
||||
|
||||
s.epochParticipationLock.Lock()
|
||||
defer s.epochParticipationLock.Unlock()
|
||||
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This checks if the block is from a competing chain, emits warning and updates metrics.
|
||||
func isCompetingBlock(root []byte, slot uint64, headRoot []byte, headSlot uint64) {
|
||||
if !bytes.Equal(root[:], headRoot) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blkSlot": slot,
|
||||
"blkRoot": hex.EncodeToString(root[:]),
|
||||
"headSlot": headSlot,
|
||||
"headRoot": hex.EncodeToString(headRoot),
|
||||
}).Warn("Calculated head diffs from new block")
|
||||
competingBlks.Inc()
|
||||
}
|
||||
}
|
||||
195
beacon-chain/blockchain/receive_block_test.go
Normal file
195
beacon-chain/blockchain/receive_block_test.go
Normal file
@@ -0,0 +1,195 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestReceiveBlock_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
|
||||
genesis, _ := testutil.GenerateFullBlock(beaconState, privKeys, nil, beaconState.Slot+1)
|
||||
beaconState, err := state.ExecuteStateTransition(ctx, beaconState, genesis)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genesisBlkRoot, err := ssz.HashTreeRoot(genesis.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, beaconState, genesisBlkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cp := ðpb.Checkpoint{Root: genesisBlkRoot[:]}
|
||||
if err := chainService.forkChoiceStore.GenesisStore(ctx, cp, cp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Fatalf("Could not save block to db: %v", err)
|
||||
}
|
||||
|
||||
if err := db.SaveState(ctx, beaconState, genesisBlkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
slot := beaconState.Slot + 1
|
||||
block, err := testutil.GenerateFullBlock(beaconState, privKeys, nil, slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.ReceiveBlock(context.Background(), block); err != nil {
|
||||
t.Errorf("Block failed processing: %v", err)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Finished applying state transition")
|
||||
}
|
||||
|
||||
func TestReceiveReceiveBlockNoPubsub_CanSaveHeadInfo(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
headBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 100}}
|
||||
if err := db.SaveBlock(ctx, headBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, err := ssz.HashTreeRoot(headBlk.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
head := &pb.BeaconState{Slot: 100, FinalizedCheckpoint: ðpb.Checkpoint{Root: r[:]}}
|
||||
if err := db.SaveState(ctx, head, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
|
||||
if err := chainService.ReceiveBlockNoPubsub(ctx, ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
},
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(r[:], chainService.HeadRoot()) {
|
||||
t.Error("Incorrect head root saved")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(headBlk, chainService.HeadBlock()) {
|
||||
t.Error("Incorrect head block saved")
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Saved new head info")
|
||||
}
|
||||
|
||||
func TestReceiveReceiveBlockNoPubsub_SameHead(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
headBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(ctx, headBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
newBlk := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
},
|
||||
}
|
||||
newRoot, _ := ssz.HashTreeRoot(newBlk.Block)
|
||||
if err := db.SaveBlock(ctx, newBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
chainService.forkChoiceStore = &store{headRoot: newRoot[:]}
|
||||
chainService.canonicalRoots[0] = newRoot[:]
|
||||
|
||||
if err := chainService.ReceiveBlockNoPubsub(ctx, newBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Saved new head info")
|
||||
}
|
||||
|
||||
func TestReceiveBlockNoPubsubForkchoice_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, 100)
|
||||
|
||||
block, err := testutil.GenerateFullBlock(beaconState, privKeys, nil, beaconState.Slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateRoot, err := stateutil.HashTreeRootState(beaconState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
parentRoot, err := ssz.HashTreeRoot(genesis.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, beaconState, parentRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.forkChoiceStore.GenesisStore(ctx, ðpb.Checkpoint{Root: parentRoot[:]}, ðpb.Checkpoint{Root: parentRoot[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, block); err != nil {
|
||||
t.Fatalf("Could not save block to db: %v", err)
|
||||
}
|
||||
|
||||
block, err = testutil.GenerateFullBlock(beaconState, privKeys, nil, beaconState.Slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, beaconState, bytesutil.ToBytes32(block.Block.ParentRoot)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.ReceiveBlockNoPubsubForkchoice(context.Background(), block); err != nil {
|
||||
t.Errorf("Block failed processing: %v", err)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Finished applying state transition")
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Finished fork choice")
|
||||
}
|
||||
@@ -4,248 +4,386 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/attestation"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2p"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "blockchain")
|
||||
|
||||
// ChainFeeds interface defines the methods of the ChainService which provide
|
||||
// information feeds.
|
||||
type ChainFeeds interface {
|
||||
StateInitializedFeed() *event.Feed
|
||||
}
|
||||
|
||||
// ChainService represents a service that handles the internal
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type ChainService struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB *db.BeaconDB
|
||||
web3Service *powchain.Web3Service
|
||||
attsService attestation.TargetHandler
|
||||
opsPoolService operations.OperationFeeds
|
||||
chainStartChan chan time.Time
|
||||
canonicalBlockFeed *event.Feed
|
||||
genesisTime time.Time
|
||||
finalizedEpoch uint64
|
||||
stateInitializedFeed *event.Feed
|
||||
p2p p2p.Broadcaster
|
||||
canonicalBlocks map[uint64][]byte
|
||||
canonicalBlocksLock sync.RWMutex
|
||||
receiveBlockLock sync.Mutex
|
||||
maxRoutines int64
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.Database
|
||||
depositCache *depositcache.DepositCache
|
||||
chainStartFetcher powchain.ChainStartFetcher
|
||||
attPool attestations.Pool
|
||||
forkChoiceStore forkchoice.ForkChoicer
|
||||
genesisTime time.Time
|
||||
p2p p2p.Broadcaster
|
||||
maxRoutines int64
|
||||
headSlot uint64
|
||||
headBlock *ethpb.SignedBeaconBlock
|
||||
headState *pb.BeaconState
|
||||
canonicalRoots map[uint64][]byte
|
||||
headLock sync.RWMutex
|
||||
stateNotifier statefeed.Notifier
|
||||
genesisRoot [32]byte
|
||||
epochParticipation map[uint64]*precompute.Balance
|
||||
epochParticipationLock sync.RWMutex
|
||||
}
|
||||
|
||||
// Config options for the service.
|
||||
type Config struct {
|
||||
BeaconBlockBuf int
|
||||
Web3Service *powchain.Web3Service
|
||||
AttsService attestation.TargetHandler
|
||||
BeaconDB *db.BeaconDB
|
||||
OpsPoolService operations.OperationFeeds
|
||||
DevMode bool
|
||||
P2p p2p.Broadcaster
|
||||
MaxRoutines int64
|
||||
BeaconBlockBuf int
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
BeaconDB db.Database
|
||||
DepositCache *depositcache.DepositCache
|
||||
AttPool attestations.Pool
|
||||
P2p p2p.Broadcaster
|
||||
MaxRoutines int64
|
||||
StateNotifier statefeed.Notifier
|
||||
}
|
||||
|
||||
// NewChainService instantiates a new service instance that will
|
||||
// NewService instantiates a new block service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewChainService(ctx context.Context, cfg *Config) (*ChainService, error) {
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &ChainService{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
web3Service: cfg.Web3Service,
|
||||
opsPoolService: cfg.OpsPoolService,
|
||||
attsService: cfg.AttsService,
|
||||
canonicalBlockFeed: new(event.Feed),
|
||||
chainStartChan: make(chan time.Time),
|
||||
stateInitializedFeed: new(event.Feed),
|
||||
p2p: cfg.P2p,
|
||||
canonicalBlocks: make(map[uint64][]byte),
|
||||
maxRoutines: cfg.MaxRoutines,
|
||||
store := forkchoice.NewForkChoiceService(ctx, cfg.BeaconDB)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
depositCache: cfg.DepositCache,
|
||||
chainStartFetcher: cfg.ChainStartFetcher,
|
||||
attPool: cfg.AttPool,
|
||||
forkChoiceStore: store,
|
||||
p2p: cfg.P2p,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
maxRoutines: cfg.MaxRoutines,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
epochParticipation: make(map[uint64]*precompute.Balance),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start a blockchain service's main event loop.
|
||||
func (c *ChainService) Start() {
|
||||
beaconState, err := c.beaconDB.HeadState(c.ctx)
|
||||
func (s *Service) Start() {
|
||||
ctx := context.TODO()
|
||||
beaconState, err := s.beaconDB.HeadState(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state: %v", err)
|
||||
}
|
||||
|
||||
// For running initial sync with state cache, in an event of restart, we use
|
||||
// last finalized check point as start point to sync instead of head
|
||||
// state. This is because we no longer save state every slot during sync.
|
||||
if featureconfig.Get().InitSyncCacheState {
|
||||
cp, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
if beaconState == nil {
|
||||
beaconState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(cp.Root))
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the chain has already been initialized, simply start the block processing routine.
|
||||
if beaconState != nil {
|
||||
log.Info("Beacon chain data already exists, starting service")
|
||||
c.genesisTime = time.Unix(int64(beaconState.GenesisTime), 0)
|
||||
c.finalizedEpoch = beaconState.FinalizedEpoch
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(beaconState.GenesisTime), 0)
|
||||
if err := s.initializeChainInfo(ctx); err != nil {
|
||||
log.Fatalf("Could not set up chain info: %v", err)
|
||||
}
|
||||
justifiedCheckpoint, err := s.beaconDB.JustifiedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get justified checkpoint: %v", err)
|
||||
}
|
||||
finalizedCheckpoint, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get finalized checkpoint: %v", err)
|
||||
}
|
||||
if err := s.forkChoiceStore.GenesisStore(ctx, justifiedCheckpoint, finalizedCheckpoint); err != nil {
|
||||
log.Fatalf("Could not start fork choice service: %v", err)
|
||||
}
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: s.genesisTime,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
log.Info("Waiting for ChainStart log from the Validator Deposit Contract to start the beacon chain...")
|
||||
if c.web3Service == nil {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.chainStartFetcher == nil {
|
||||
log.Fatal("Not configured web3Service for POW chain")
|
||||
return // return need for TestStartUninitializedChainWithoutConfigPOWChain.
|
||||
}
|
||||
subChainStart := c.web3Service.ChainStartFeed().Subscribe(c.chainStartChan)
|
||||
go func() {
|
||||
genesisTime := <-c.chainStartChan
|
||||
c.processChainStartTime(genesisTime, subChainStart)
|
||||
return
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.ChainStarted {
|
||||
data := event.Data.(*statefeed.ChainStartedData)
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
|
||||
s.processChainStartTime(ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state notifier failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
go s.processAttestation()
|
||||
}
|
||||
|
||||
// processChainStartTime initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (c *ChainService) processChainStartTime(genesisTime time.Time, chainStartSub event.Subscription) {
|
||||
initialDepositsData := c.web3Service.ChainStartDeposits()
|
||||
initialDeposits := make([]*pb.Deposit, len(initialDepositsData))
|
||||
for i := range initialDepositsData {
|
||||
initialDeposits[i] = &pb.Deposit{DepositData: initialDepositsData[i]}
|
||||
}
|
||||
|
||||
beaconState, err := c.initializeBeaconChain(genesisTime, initialDeposits, c.web3Service.ChainStartETH1Data())
|
||||
if err != nil {
|
||||
func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.chainStartFetcher.PreGenesisState()
|
||||
if err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.chainStartFetcher.ChainStartEth1Data()); err != nil {
|
||||
log.Fatalf("Could not initialize beacon chain: %v", err)
|
||||
}
|
||||
c.finalizedEpoch = beaconState.FinalizedEpoch
|
||||
c.stateInitializedFeed.Send(genesisTime)
|
||||
chainStartSub.Unsubscribe()
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: genesisTime,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
// based on a genesis timestamp value obtained from the ChainStart event emitted
|
||||
// by the ETH1.0 Deposit Contract and the POWChain service of the node.
|
||||
func (c *ChainService) initializeBeaconChain(genesisTime time.Time, deposits []*pb.Deposit,
|
||||
eth1data *pb.Eth1Data) (*pb.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(context.Background(), "beacon-chain.ChainService.initializeBeaconChain")
|
||||
func (s *Service) initializeBeaconChain(
|
||||
ctx context.Context,
|
||||
genesisTime time.Time,
|
||||
preGenesisState *pb.BeaconState,
|
||||
eth1data *ethpb.Eth1Data) error {
|
||||
_, span := trace.StartSpan(context.Background(), "beacon-chain.Service.initializeBeaconChain")
|
||||
defer span.End()
|
||||
log.Info("ChainStart time reached, starting the beacon chain!")
|
||||
c.genesisTime = genesisTime
|
||||
log.Info("Genesis time reached, starting the beacon chain")
|
||||
s.genesisTime = genesisTime
|
||||
unixTime := uint64(genesisTime.Unix())
|
||||
if err := c.beaconDB.InitializeState(c.ctx, unixTime, deposits, eth1data); err != nil {
|
||||
return nil, fmt.Errorf("could not initialize beacon state to disk: %v", err)
|
||||
}
|
||||
beaconState, err := c.beaconDB.HeadState(c.ctx)
|
||||
|
||||
genesisState, err := state.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not attempt fetch beacon state: %v", err)
|
||||
return errors.Wrap(err, "could not initialize genesis state")
|
||||
}
|
||||
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not hash beacon state: %v", err)
|
||||
}
|
||||
genBlock := b.NewGenesisBlock(stateRoot[:])
|
||||
genBlockRoot, err := hashutil.HashBeaconBlock(genBlock)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not hash beacon block: %v", err)
|
||||
if err := s.saveGenesisData(ctx, genesisState); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis data")
|
||||
}
|
||||
|
||||
// TODO(#2011): Remove this in state caching.
|
||||
beaconState.LatestBlock = genBlock
|
||||
// Update committee shuffled indices for genesis epoch.
|
||||
if featureconfig.Get().EnableNewCache {
|
||||
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.beaconDB.SaveBlock(genBlock); err != nil {
|
||||
return nil, fmt.Errorf("could not save genesis block to disk: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveAttestationTarget(ctx, &pb.AttestationTarget{
|
||||
Slot: genBlock.Slot,
|
||||
BlockRoot: genBlockRoot[:],
|
||||
ParentRoot: genBlock.ParentRootHash32,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("failed to save attestation target: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.UpdateChainHead(ctx, genBlock, beaconState); err != nil {
|
||||
return nil, fmt.Errorf("could not set chain head, %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveJustifiedBlock(genBlock); err != nil {
|
||||
return nil, fmt.Errorf("could not save gensis block as justified block: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveFinalizedBlock(genBlock); err != nil {
|
||||
return nil, fmt.Errorf("could not save gensis block as finalized block: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveJustifiedState(beaconState); err != nil {
|
||||
return nil, fmt.Errorf("could not save gensis state as justified state: %v", err)
|
||||
}
|
||||
if err := c.beaconDB.SaveFinalizedState(beaconState); err != nil {
|
||||
return nil, fmt.Errorf("could not save gensis state as finalized state: %v", err)
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
func (c *ChainService) Stop() error {
|
||||
defer c.cancel()
|
||||
|
||||
log.Info("Stopping service")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status always returns nil.
|
||||
// TODO(1202): Add service health checks.
|
||||
func (c *ChainService) Status() error {
|
||||
if runtime.NumGoroutine() > int(c.maxRoutines) {
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status always returns nil unless there is an error condition that causes
|
||||
// this service to be unhealthy.
|
||||
func (s *Service) Status() error {
|
||||
if runtime.NumGoroutine() > int(s.maxRoutines) {
|
||||
return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CanonicalBlockFeed returns a channel that is written to
|
||||
// whenever a new block is determined to be canonical in the chain.
|
||||
func (c *ChainService) CanonicalBlockFeed() *event.Feed {
|
||||
return c.canonicalBlockFeed
|
||||
}
|
||||
// This gets called to update canonical root mapping.
|
||||
func (s *Service) saveHead(ctx context.Context, signed *ethpb.SignedBeaconBlock, r [32]byte) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
// StateInitializedFeed returns a feed that is written to
|
||||
// when the beacon state is first initialized.
|
||||
func (c *ChainService) StateInitializedFeed() *event.Feed {
|
||||
return c.stateInitializedFeed
|
||||
}
|
||||
if signed == nil || signed.Block == nil {
|
||||
return errors.New("cannot save nil head block")
|
||||
}
|
||||
|
||||
// ChainHeadRoot returns the hash root of the last beacon block processed by the
|
||||
// block chain service.
|
||||
func (c *ChainService) ChainHeadRoot() ([32]byte, error) {
|
||||
head, err := c.beaconDB.ChainHead()
|
||||
s.headSlot = signed.Block.Slot
|
||||
|
||||
s.canonicalRoots[signed.Block.Slot] = r[:]
|
||||
|
||||
if err := s.beaconDB.SaveHeadBlockRoot(ctx, r); err != nil {
|
||||
return errors.Wrap(err, "could not save head root in DB")
|
||||
}
|
||||
s.headBlock = signed
|
||||
|
||||
headState, err := s.beaconDB.State(ctx, r)
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("could not retrieve chain head: %v", err)
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
s.headState = headState
|
||||
|
||||
root, err := hashutil.HashBeaconBlock(head)
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block.Slot,
|
||||
"headRoot": fmt.Sprintf("%#x", r),
|
||||
}).Debug("Saved new head info")
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called to update canonical root mapping. It does not save head block
|
||||
// root in DB. With the inception of inital-sync-cache-state flag, it uses finalized
|
||||
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, r [32]byte) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
s.headSlot = b.Block.Slot
|
||||
|
||||
s.canonicalRoots[b.Block.Slot] = r[:]
|
||||
|
||||
s.headBlock = b
|
||||
|
||||
headState, err := s.beaconDB.State(ctx, r)
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("could not tree hash parent block: %v", err)
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
return root, nil
|
||||
s.headState = headState
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Block.Slot,
|
||||
"headRoot": fmt.Sprintf("%#x", r),
|
||||
}).Debug("Saved new head info")
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateCanonicalRoots sets a new head into the canonical block roots map.
|
||||
func (c *ChainService) UpdateCanonicalRoots(newHead *pb.BeaconBlock, newHeadRoot [32]byte) {
|
||||
c.canonicalBlocksLock.Lock()
|
||||
defer c.canonicalBlocksLock.Unlock()
|
||||
c.canonicalBlocks[newHead.Slot] = newHeadRoot[:]
|
||||
// This gets called when beacon chain is first initialized to save validator indices and pubkeys in db
|
||||
func (s *Service) saveGenesisValidators(ctx context.Context, state *pb.BeaconState) error {
|
||||
for i, v := range state.Validators {
|
||||
if err := s.beaconDB.SaveValidatorIndex(ctx, bytesutil.ToBytes48(v.PublicKey), uint64(i)); err != nil {
|
||||
return errors.Wrapf(err, "could not save validator index: %d", i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsCanonical returns true if the input block hash of the corresponding slot
|
||||
// is part of the canonical chain. False otherwise.
|
||||
func (c *ChainService) IsCanonical(slot uint64, hash []byte) bool {
|
||||
c.canonicalBlocksLock.RLock()
|
||||
defer c.canonicalBlocksLock.RUnlock()
|
||||
if canonicalHash, ok := c.canonicalBlocks[slot]; ok {
|
||||
return bytes.Equal(canonicalHash, hash)
|
||||
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState *pb.BeaconState) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
stateRoot, err := ssz.HashTreeRoot(genesisState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash genesis state")
|
||||
}
|
||||
return false
|
||||
genesisBlk := blocks.NewGenesisBlock(stateRoot[:])
|
||||
genesisBlkRoot, err := ssz.HashTreeRoot(genesisBlk.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
|
||||
if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis block")
|
||||
}
|
||||
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state")
|
||||
}
|
||||
if err := s.beaconDB.SaveHeadBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head block root")
|
||||
}
|
||||
if err := s.beaconDB.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could save genesis block root")
|
||||
}
|
||||
if err := s.saveGenesisValidators(ctx, genesisState); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis validators")
|
||||
}
|
||||
|
||||
genesisCheckpoint := ðpb.Checkpoint{Root: genesisBlkRoot[:]}
|
||||
if err := s.forkChoiceStore.GenesisStore(ctx, genesisCheckpoint, genesisCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "Could not start fork choice service: %v")
|
||||
}
|
||||
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
s.headBlock = genesisBlk
|
||||
s.headState = genesisState
|
||||
s.canonicalRoots[genesisState.Slot] = genesisBlkRoot[:]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called to initialize chain info variables using the finalized checkpoint stored in DB
|
||||
func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
genesisBlock, err := s.beaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block from db")
|
||||
}
|
||||
if genesisBlock == nil {
|
||||
return errors.New("no genesis block in db")
|
||||
}
|
||||
genesisBlkRoot, err := ssz.HashTreeRoot(genesisBlock.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root of genesis block")
|
||||
}
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
|
||||
finalized, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint from db")
|
||||
}
|
||||
if finalized == nil {
|
||||
// This should never happen. At chain start, the finalized checkpoint
|
||||
// would be the genesis state and block.
|
||||
return errors.New("no finalized epoch in the database")
|
||||
}
|
||||
s.headState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(finalized.Root))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
s.headBlock, err = s.beaconDB.Block(ctx, bytesutil.ToBytes32(finalized.Root))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block from db")
|
||||
}
|
||||
|
||||
if s.headBlock != nil && s.headBlock.Block != nil {
|
||||
s.headSlot = s.headBlock.Block.Slot
|
||||
}
|
||||
s.canonicalRoots[s.headSlot] = finalized.Root
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
37
beacon-chain/blockchain/service_norace_test.go
Normal file
37
beacon-chain/blockchain/service_norace_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
}
|
||||
|
||||
func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 777}},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 888}},
|
||||
[32]byte{},
|
||||
)
|
||||
}
|
||||
@@ -1,295 +1,171 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"encoding/hex"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethTypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/attestation"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
ssz "github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/internal"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/forkutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2p"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
// Ensure ChainService implements interfaces.
|
||||
var _ = ChainFeeds(&ChainService{})
|
||||
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
EnableCrosslinks: true,
|
||||
EnableCheckBlockStateRoot: true,
|
||||
})
|
||||
}
|
||||
|
||||
type mockOperationService struct{}
|
||||
|
||||
func (ms *mockOperationService) IncomingProcessedBlockFeed() *event.Feed {
|
||||
return new(event.Feed)
|
||||
type store struct {
|
||||
headRoot []byte
|
||||
}
|
||||
|
||||
func (ms *mockOperationService) IncomingAttFeed() *event.Feed {
|
||||
func (s *store) OnBlock(ctx context.Context, b *ethpb.SignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *mockOperationService) IncomingExitFeed() *event.Feed {
|
||||
func (s *store) OnBlockInitialSyncStateTransition(ctx context.Context, b *ethpb.SignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockClient struct{}
|
||||
|
||||
func (m *mockClient) SubscribeNewHead(ctx context.Context, ch chan<- *gethTypes.Header) (ethereum.Subscription, error) {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
func (s *store) OnAttestation(ctx context.Context, a *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockClient) BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error) {
|
||||
head := &gethTypes.Header{Number: big.NewInt(0), Difficulty: big.NewInt(100)}
|
||||
return gethTypes.NewBlockWithHeader(head), nil
|
||||
func (s *store) GenesisStore(ctx context.Context, justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockClient) BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error) {
|
||||
head := &gethTypes.Header{Number: big.NewInt(0), Difficulty: big.NewInt(100)}
|
||||
return gethTypes.NewBlockWithHeader(head), nil
|
||||
func (s *store) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockClient) HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error) {
|
||||
return &gethTypes.Header{Number: big.NewInt(0), Difficulty: big.NewInt(100)}, nil
|
||||
func (s *store) Head(ctx context.Context) ([]byte, error) {
|
||||
return s.headRoot, nil
|
||||
}
|
||||
|
||||
func (m *mockClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
type mockBeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
}
|
||||
|
||||
func (m *mockClient) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
|
||||
return []byte{'t', 'e', 's', 't'}, nil
|
||||
}
|
||||
|
||||
func (m *mockClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) {
|
||||
return []byte{'t', 'e', 's', 't'}, nil
|
||||
}
|
||||
|
||||
func (m *mockClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]gethTypes.Log, error) {
|
||||
logs := make([]gethTypes.Log, 3)
|
||||
for i := 0; i < len(logs); i++ {
|
||||
logs[i].Address = common.Address{}
|
||||
logs[i].Topics = make([]common.Hash, 5)
|
||||
logs[i].Topics[0] = common.Hash{'a'}
|
||||
logs[i].Topics[1] = common.Hash{'b'}
|
||||
logs[i].Topics[2] = common.Hash{'c'}
|
||||
|
||||
// StateFeed mocks the same method in the beacon node.
|
||||
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
||||
if mbn.stateFeed == nil {
|
||||
mbn.stateFeed = new(event.Feed)
|
||||
}
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func (m *mockClient) LatestBlockHash() common.Hash {
|
||||
return common.BytesToHash([]byte{'A'})
|
||||
}
|
||||
|
||||
type faultyClient struct{}
|
||||
|
||||
func (f *faultyClient) SubscribeNewHead(ctx context.Context, ch chan<- *gethTypes.Header) (ethereum.Subscription, error) {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
}
|
||||
|
||||
func (f *faultyClient) BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error) {
|
||||
return nil, errors.New("failed")
|
||||
}
|
||||
|
||||
func (f *faultyClient) BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error) {
|
||||
return nil, errors.New("failed")
|
||||
}
|
||||
|
||||
func (f *faultyClient) HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error) {
|
||||
return nil, errors.New("failed")
|
||||
}
|
||||
|
||||
func (f *faultyClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
}
|
||||
|
||||
func (f *faultyClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]gethTypes.Log, error) {
|
||||
return nil, errors.New("unable to retrieve logs")
|
||||
}
|
||||
|
||||
func (f *faultyClient) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
|
||||
return []byte{}, errors.New("unable to retrieve contract code")
|
||||
}
|
||||
|
||||
func (f *faultyClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) {
|
||||
return []byte{}, errors.New("unable to retrieve contract code")
|
||||
}
|
||||
|
||||
func (f *faultyClient) LatestBlockHash() common.Hash {
|
||||
return common.BytesToHash([]byte{'A'})
|
||||
return mbn.stateFeed
|
||||
}
|
||||
|
||||
type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) {
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ = p2p.Broadcaster(&mockBroadcaster{})
|
||||
|
||||
func setupInitialDeposits(t *testing.T, numDeposits int) ([]*pb.Deposit, []*bls.SecretKey) {
|
||||
privKeys := make([]*bls.SecretKey, numDeposits)
|
||||
deposits := make([]*pb.Deposit, numDeposits)
|
||||
for i := 0; i < len(deposits); i++ {
|
||||
priv, err := bls.RandKey(rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
depositInput := &pb.DepositInput{
|
||||
Pubkey: priv.PublicKey().Marshal(),
|
||||
}
|
||||
balance := params.BeaconConfig().MaxDepositAmount
|
||||
depositData, err := helpers.EncodeDepositData(depositInput, balance, time.Now().Unix())
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot encode data: %v", err)
|
||||
}
|
||||
deposits[i] = &pb.Deposit{
|
||||
DepositData: depositData,
|
||||
MerkleTreeIndex: uint64(i),
|
||||
}
|
||||
privKeys[i] = priv
|
||||
}
|
||||
return deposits, privKeys
|
||||
}
|
||||
|
||||
func createPreChainStartDeposit(t *testing.T, pk []byte, index uint64) *pb.Deposit {
|
||||
depositInput := &pb.DepositInput{Pubkey: pk}
|
||||
balance := params.BeaconConfig().MaxDepositAmount
|
||||
depositData, err := helpers.EncodeDepositData(depositInput, balance, time.Now().Unix())
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot encode data: %v", err)
|
||||
}
|
||||
return &pb.Deposit{DepositData: depositData, MerkleTreeIndex: index}
|
||||
}
|
||||
|
||||
func createRandaoReveal(t *testing.T, beaconState *pb.BeaconState, privKeys []*bls.SecretKey) []byte {
|
||||
// We fetch the proposer's index as that is whom the RANDAO will be verified against.
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(beaconState, beaconState.Slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
epoch := helpers.SlotToEpoch(beaconState.Slot)
|
||||
buf := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(buf, epoch)
|
||||
domain := forkutil.DomainVersion(beaconState.Fork, epoch, params.BeaconConfig().DomainRandao)
|
||||
// We make the previous validator's index sign the message instead of the proposer.
|
||||
epochSignature := privKeys[proposerIdx].Sign(buf, domain)
|
||||
return epochSignature.Marshal()
|
||||
}
|
||||
|
||||
func setupGenesisBlock(t *testing.T, cs *ChainService) ([32]byte, *pb.BeaconBlock) {
|
||||
genesis := b.NewGenesisBlock([]byte{})
|
||||
if err := cs.beaconDB.SaveBlock(genesis); err != nil {
|
||||
t.Fatalf("could not save block to db: %v", err)
|
||||
}
|
||||
parentHash, err := hashutil.HashBeaconBlock(genesis)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get tree hash root of canonical head: %v", err)
|
||||
}
|
||||
return parentHash, genesis
|
||||
}
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB *db.BeaconDB, attsService *attestation.Service) *ChainService {
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
endpoint := "ws://127.0.0.1"
|
||||
ctx := context.Background()
|
||||
var web3Service *powchain.Web3Service
|
||||
var web3Service *powchain.Service
|
||||
var err error
|
||||
client := &mockClient{}
|
||||
web3Service, err = powchain.NewWeb3Service(ctx, &powchain.Web3ServiceConfig{
|
||||
Endpoint: endpoint,
|
||||
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
|
||||
BeaconDB: beaconDB,
|
||||
ETH1Endpoint: endpoint,
|
||||
DepositContract: common.Address{},
|
||||
Reader: client,
|
||||
Client: client,
|
||||
Logger: client,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unable to set up web3 service: %v", err)
|
||||
}
|
||||
|
||||
cfg := &Config{
|
||||
BeaconBlockBuf: 0,
|
||||
BeaconDB: beaconDB,
|
||||
Web3Service: web3Service,
|
||||
OpsPoolService: &mockOperationService{},
|
||||
AttsService: attsService,
|
||||
P2p: &mockBroadcaster{},
|
||||
BeaconBlockBuf: 0,
|
||||
BeaconDB: beaconDB,
|
||||
DepositCache: depositcache.NewDepositCache(),
|
||||
ChainStartFetcher: web3Service,
|
||||
P2p: &mockBroadcaster{},
|
||||
StateNotifier: &mockBeaconNode{},
|
||||
AttPool: attestations.NewPool(),
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("could not register blockchain service: %v", err)
|
||||
}
|
||||
chainService, err := NewChainService(ctx, cfg)
|
||||
chainService, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to setup chain service: %v", err)
|
||||
}
|
||||
chainService.genesisTime = time.Unix(1, 0) // non-zero time
|
||||
|
||||
return chainService
|
||||
}
|
||||
|
||||
func SetSlotInState(service *ChainService, slot uint64) error {
|
||||
bState, err := service.beaconDB.HeadState(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bState.Slot = slot
|
||||
return service.beaconDB.SaveState(context.Background(), bState)
|
||||
}
|
||||
|
||||
func TestChainStartStop_Uninitialized(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
// Test the start function.
|
||||
genesisChan := make(chan time.Time, 0)
|
||||
sub := chainService.stateInitializedFeed.Subscribe(genesisChan)
|
||||
defer sub.Unsubscribe()
|
||||
// Listen for state events.
|
||||
stateSubChannel := make(chan *feed.Event, 1)
|
||||
stateSub := chainService.stateNotifier.StateFeed().Subscribe(stateSubChannel)
|
||||
|
||||
// Test the chain start state notifier.
|
||||
genesisTime := time.Unix(1, 0)
|
||||
chainService.Start()
|
||||
chainService.chainStartChan <- time.Unix(0, 0)
|
||||
genesisTime := <-genesisChan
|
||||
if genesisTime != time.Unix(0, 0) {
|
||||
t.Errorf(
|
||||
"Expected genesis time to equal chainstart time (%v), received %v",
|
||||
time.Unix(0, 0),
|
||||
genesisTime,
|
||||
)
|
||||
event := &feed.Event{
|
||||
Type: statefeed.ChainStarted,
|
||||
Data: &statefeed.ChainStartedData{
|
||||
StartTime: genesisTime,
|
||||
},
|
||||
}
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 1; sent == 1; {
|
||||
sent = chainService.stateNotifier.StateFeed().Send(event)
|
||||
if sent == 1 {
|
||||
// Flush our local subscriber.
|
||||
<-stateSubChannel
|
||||
}
|
||||
}
|
||||
|
||||
// Now wait for notification the state is ready.
|
||||
for stateInitialized := false; stateInitialized == false; {
|
||||
recv := <-stateSubChannel
|
||||
if recv.Type == statefeed.Initialized {
|
||||
stateInitialized = true
|
||||
}
|
||||
}
|
||||
stateSub.Unsubscribe()
|
||||
|
||||
beaconState, err := db.HeadState(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if beaconState == nil || beaconState.Slot != params.BeaconConfig().GenesisSlot {
|
||||
if beaconState == nil || beaconState.Slot != 0 {
|
||||
t.Error("Expected canonical state feed to send a state with genesis block")
|
||||
}
|
||||
if err := chainService.Stop(); err != nil {
|
||||
@@ -299,23 +175,39 @@ func TestChainStartStop_Uninitialized(t *testing.T) {
|
||||
if chainService.ctx.Err() != context.Canceled {
|
||||
t.Error("Context was not canceled")
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Waiting for ChainStart log from the Validator Deposit Contract to start the beacon chain...")
|
||||
testutil.AssertLogsContain(t, hook, "ChainStart time reached, starting the beacon chain!")
|
||||
testutil.AssertLogsContain(t, hook, "Waiting")
|
||||
testutil.AssertLogsContain(t, hook, "Genesis time reached")
|
||||
}
|
||||
|
||||
func TestChainStartStop_Initialized(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := internal.SetupDB(t)
|
||||
defer internal.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
chainService := setupBeaconChain(t, db, nil)
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
unixTime := uint64(time.Now().Unix())
|
||||
deposits, _ := setupInitialDeposits(t, 100)
|
||||
if err := db.InitializeState(context.Background(), unixTime, deposits, &pb.Eth1Data{}); err != nil {
|
||||
t.Fatalf("Could not initialize beacon state to disk: %v", err)
|
||||
genesisBlk := b.NewGenesisBlock([]byte{})
|
||||
blkRoot, err := ssz.HashTreeRoot(genesisBlk.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
setupGenesisBlock(t, chainService)
|
||||
if err := db.SaveBlock(ctx, genesisBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, &pb.BeaconState{Slot: 1}, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveHeadBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Test the start function.
|
||||
chainService.Start()
|
||||
|
||||
@@ -327,5 +219,142 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
if chainService.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Beacon chain data already exists, starting service")
|
||||
testutil.AssertLogsContain(t, hook, "data already exists")
|
||||
}
|
||||
|
||||
func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
bc := setupBeaconChain(t, db)
|
||||
var err error
|
||||
|
||||
// Set up 10 deposits pre chain start for validators to register
|
||||
count := uint64(10)
|
||||
deposits, _, _ := testutil.DeterministicDepositsAndKeys(count)
|
||||
trie, _, err := testutil.DepositTrieFromDeposits(deposits)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hashTreeRoot := trie.HashTreeRoot()
|
||||
genState := state.EmptyGenesisState()
|
||||
genState.Eth1Data = ðpb.Eth1Data{
|
||||
DepositRoot: hashTreeRoot[:],
|
||||
DepositCount: uint64(len(deposits)),
|
||||
}
|
||||
genState, err = b.ProcessDeposits(ctx, genState, ðpb.BeaconBlockBody{Deposits: deposits})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := bc.initializeBeaconChain(ctx, time.Unix(0, 0), genState, ðpb.Eth1Data{
|
||||
DepositRoot: hashTreeRoot[:],
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s, err := bc.beaconDB.State(ctx, bytesutil.ToBytes32(bc.canonicalRoots[0]))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, v := range s.Validators {
|
||||
if !db.HasValidatorIndex(ctx, bytesutil.ToBytes48(v.PublicKey)) {
|
||||
t.Errorf("Validator %s missing from db", hex.EncodeToString(v.PublicKey))
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := bc.HeadState(ctx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if bc.HeadBlock() == nil {
|
||||
t.Error("Head state can't be nil after initialize beacon chain")
|
||||
}
|
||||
if bc.CanonicalRoot(0) == nil {
|
||||
t.Error("Canonical root for slot 0 can't be nil after initialize beacon chain")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := b.NewGenesisBlock([]byte{})
|
||||
genesisRoot, err := ssz.HashTreeRoot(genesis.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, genesisRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: finalizedSlot, ParentRoot: genesisRoot[:]}}
|
||||
headState := &pb.BeaconState{Slot: finalizedSlot}
|
||||
headRoot, _ := ssz.HashTreeRoot(headBlock.Block)
|
||||
if err := db.SaveState(ctx, headState, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveBlock(ctx, headBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Epoch: helpers.SlotToEpoch(finalizedSlot),
|
||||
Root: headRoot[:],
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveBlock(ctx, headBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c := &Service{beaconDB: db, canonicalRoots: make(map[uint64][]byte)}
|
||||
if err := c.initializeChainInfo(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(c.HeadBlock(), headBlock) {
|
||||
t.Error("head block incorrect")
|
||||
}
|
||||
s, err := c.HeadState(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(s, headState) {
|
||||
t.Error("head state incorrect")
|
||||
}
|
||||
if headBlock.Block.Slot != c.HeadSlot() {
|
||||
t.Error("head slot incorrect")
|
||||
}
|
||||
if !bytes.Equal(headRoot[:], c.HeadRoot()) {
|
||||
t.Error("head slot incorrect")
|
||||
}
|
||||
if c.genesisRoot != genesisRoot {
|
||||
t.Error("genesis block root incorrect")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
}
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}}
|
||||
r, _ := ssz.HashTreeRoot(b)
|
||||
if err := s.saveHeadNoDB(ctx, b, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newB, err := s.beaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if reflect.DeepEqual(newB, b) {
|
||||
t.Error("head block should not be equal")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["state_generator.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/stategenerator",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["state_generator_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/chaintest/backend:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,179 +0,0 @@
|
||||
package stategenerator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "stategenerator")
|
||||
|
||||
// GenerateStateFromBlock generates state from the last finalized state to the input slot.
|
||||
// Ex:
|
||||
// 1A - 2B(finalized) - 3C - 4 - 5D - 6 - 7F (letters mean there's a block).
|
||||
// Input: slot 6.
|
||||
// Output: resulting state of state transition function after applying block C and D.
|
||||
// along with skipped slot 4 and 6.
|
||||
func GenerateStateFromBlock(ctx context.Context, db *db.BeaconDB, slot uint64) (*pb.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.stategenerator.GenerateStateFromBlock")
|
||||
defer span.End()
|
||||
fState, err := db.HistoricalStateFromSlot(ctx, slot, [32]byte{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// return finalized state if it's the same as input slot.
|
||||
if fState.Slot == slot {
|
||||
return fState, nil
|
||||
}
|
||||
|
||||
// input slot can't be smaller than last finalized state's slot.
|
||||
if fState.Slot > slot {
|
||||
return nil, fmt.Errorf(
|
||||
"requested slot %d < current slot %d in the finalized beacon state",
|
||||
slot-params.BeaconConfig().GenesisSlot,
|
||||
fState.Slot-params.BeaconConfig().GenesisSlot,
|
||||
)
|
||||
}
|
||||
|
||||
if fState.LatestBlock == nil {
|
||||
return nil, fmt.Errorf("latest head in state is nil %v", err)
|
||||
}
|
||||
|
||||
fRoot, err := hashutil.HashBeaconBlock(fState.LatestBlock)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get block root %v", err)
|
||||
}
|
||||
|
||||
// from input slot, retrieve its corresponding block and call that the most recent block.
|
||||
mostRecentBlocks, err := db.BlocksBySlot(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mostRecentBlock := mostRecentBlocks[0]
|
||||
|
||||
// if the most recent block is a skip block, we get its parent block.
|
||||
// ex:
|
||||
// 1A - 2B - 3C - 4 - 5 (letters mean there's a block).
|
||||
// input slot is 5, but slots 4 and 5 are skipped, we get block C from slot 3.
|
||||
lastSlot := slot
|
||||
for mostRecentBlock == nil {
|
||||
lastSlot--
|
||||
blocks, err := db.BlocksBySlot(ctx, lastSlot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mostRecentBlock = blocks[0]
|
||||
}
|
||||
|
||||
// retrieve the block list to recompute state of the input slot.
|
||||
blocks, err := blocksSinceFinalized(ctx, db, mostRecentBlock, fRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to look up block ancestors %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Recompute state starting last finalized slot %d and ending slot %d",
|
||||
fState.Slot-params.BeaconConfig().GenesisSlot, slot-params.BeaconConfig().GenesisSlot)
|
||||
postState := fState
|
||||
root := fRoot
|
||||
// this recomputes state up to the last available block.
|
||||
// ex: 1A - 2B (finalized) - 3C - 4 - 5 - 6C - 7 - 8 (C is the last block).
|
||||
// input slot 8, this recomputes state to slot 6.
|
||||
for i := len(blocks); i > 0; i-- {
|
||||
block := blocks[i-1]
|
||||
if block.Slot <= postState.Slot {
|
||||
continue
|
||||
}
|
||||
// running state transitions for skipped slots.
|
||||
for block.Slot != fState.Slot+1 {
|
||||
postState, err = state.ExecuteStateTransition(
|
||||
ctx,
|
||||
postState,
|
||||
nil,
|
||||
root,
|
||||
&state.TransitionConfig{
|
||||
VerifySignatures: false,
|
||||
Logging: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not execute state transition %v", err)
|
||||
}
|
||||
}
|
||||
postState, err = state.ExecuteStateTransition(
|
||||
ctx,
|
||||
postState,
|
||||
block,
|
||||
root,
|
||||
&state.TransitionConfig{
|
||||
VerifySignatures: false,
|
||||
Logging: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not execute state transition %v", err)
|
||||
}
|
||||
|
||||
root, err = hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get block root %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// this recomputes state from last block to last slot if there's skipp slots after.
|
||||
// ex: 1A - 2B (finalized) - 3C - 4 - 5 - 6C - 7 - 8 (7 and 8 are skipped slots).
|
||||
// input slot 8, this recomputes state from 6C to 8.
|
||||
for i := postState.Slot; i < slot; i++ {
|
||||
postState, err = state.ExecuteStateTransition(
|
||||
ctx,
|
||||
postState,
|
||||
nil,
|
||||
root,
|
||||
&state.TransitionConfig{
|
||||
VerifySignatures: false,
|
||||
Logging: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not execute state transition %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Finished recompute state with slot %d and finalized epoch %d",
|
||||
postState.Slot-params.BeaconConfig().GenesisSlot, postState.FinalizedEpoch-params.BeaconConfig().GenesisEpoch)
|
||||
|
||||
return postState, nil
|
||||
}
|
||||
|
||||
// blocksSinceFinalized will return a list of linked blocks that's
|
||||
// between the input block and the last finalized block in the db.
|
||||
// The input block is also returned in the list.
|
||||
// Ex:
|
||||
// A -> B(finalized) -> C -> D -> E -> D.
|
||||
// Input: E, output: [E, D, C, B].
|
||||
func blocksSinceFinalized(ctx context.Context, db *db.BeaconDB, block *pb.BeaconBlock,
|
||||
finalizedBlockRoot [32]byte) ([]*pb.BeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.stategenerator.blocksSinceFinalized")
|
||||
defer span.End()
|
||||
blockAncestors := make([]*pb.BeaconBlock, 0)
|
||||
blockAncestors = append(blockAncestors, block)
|
||||
parentRoot := bytesutil.ToBytes32(block.ParentRootHash32)
|
||||
// looking up ancestors, until the finalized block.
|
||||
for parentRoot != finalizedBlockRoot {
|
||||
retblock, err := db.Block(parentRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blockAncestors = append(blockAncestors, retblock)
|
||||
parentRoot = bytesutil.ToBytes32(retblock.ParentRootHash32)
|
||||
}
|
||||
return blockAncestors, nil
|
||||
}
|
||||
@@ -1,162 +0,0 @@
|
||||
package stategenerator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/stategenerator"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/chaintest/backend"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func init() {
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
CacheTreeHash: false,
|
||||
})
|
||||
}
|
||||
func TestGenerateState_OK(t *testing.T) {
|
||||
b, err := backend.NewSimulatedBackend()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create a new simulated backend %v", err)
|
||||
}
|
||||
privKeys, err := b.SetupBackend(100)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not set up backend %v", err)
|
||||
}
|
||||
beaconDb := b.DB()
|
||||
defer b.Shutdown()
|
||||
defer db.TeardownDB(beaconDb)
|
||||
ctx := context.Background()
|
||||
|
||||
slotLimit := uint64(30)
|
||||
|
||||
// Run the simulated chain for 30 slots, to get a state that we can save as finalized.
|
||||
for i := uint64(0); i < slotLimit; i++ {
|
||||
if err := b.GenerateBlockAndAdvanceChain(&backend.SimulatedObjects{}, privKeys); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, b.State().Slot+1)
|
||||
}
|
||||
inMemBlocks := b.InMemoryBlocks()
|
||||
if err := beaconDb.SaveBlock(inMemBlocks[len(inMemBlocks)-1]); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
if err := beaconDb.UpdateChainHead(ctx, inMemBlocks[len(inMemBlocks)-1], b.State()); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
if err := beaconDb.SaveFinalizedBlock(inMemBlocks[len(inMemBlocks)-1]); err != nil {
|
||||
t.Fatalf("Unable to save finalized state: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := beaconDb.SaveFinalizedState(b.State()); err != nil {
|
||||
t.Fatalf("Unable to save finalized state: %v", err)
|
||||
}
|
||||
|
||||
// Run the chain for another 30 slots so that we can have this at the current head.
|
||||
for i := uint64(0); i < slotLimit; i++ {
|
||||
if err := b.GenerateBlockAndAdvanceChain(&backend.SimulatedObjects{}, privKeys); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, b.State().Slot+1)
|
||||
}
|
||||
inMemBlocks := b.InMemoryBlocks()
|
||||
if err := beaconDb.SaveBlock(inMemBlocks[len(inMemBlocks)-1]); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
if err := beaconDb.UpdateChainHead(ctx, inMemBlocks[len(inMemBlocks)-1], b.State()); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ran 30 slots to save finalized slot then ran another 30 slots.
|
||||
slotToGenerateTill := params.BeaconConfig().GenesisSlot + slotLimit*2
|
||||
newState, err := stategenerator.GenerateStateFromBlock(context.Background(), beaconDb, slotToGenerateTill)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to generate new state from previous finalized state %v", err)
|
||||
}
|
||||
|
||||
if newState.Slot != b.State().Slot {
|
||||
t.Fatalf("The generated state and the current state do not have the same slot, expected: %d but got %d",
|
||||
b.State().Slot, newState.Slot)
|
||||
}
|
||||
|
||||
if !proto.Equal(newState, b.State()) {
|
||||
t.Error("Generated and saved states are unequal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateState_WithNilBlocksOK(t *testing.T) {
|
||||
b, err := backend.NewSimulatedBackend()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create a new simulated backend %v", err)
|
||||
}
|
||||
privKeys, err := b.SetupBackend(100)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not set up backend %v", err)
|
||||
}
|
||||
beaconDb := b.DB()
|
||||
defer b.Shutdown()
|
||||
defer db.TeardownDB(beaconDb)
|
||||
ctx := context.Background()
|
||||
|
||||
slotLimit := uint64(30)
|
||||
|
||||
// Run the simulated chain for 30 slots, to get a state that we can save as finalized.
|
||||
for i := uint64(0); i < slotLimit; i++ {
|
||||
if err := b.GenerateBlockAndAdvanceChain(&backend.SimulatedObjects{}, privKeys); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, b.State().Slot+1)
|
||||
}
|
||||
inMemBlocks := b.InMemoryBlocks()
|
||||
if err := beaconDb.SaveBlock(inMemBlocks[len(inMemBlocks)-1]); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
if err := beaconDb.UpdateChainHead(ctx, inMemBlocks[len(inMemBlocks)-1], b.State()); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
if err := beaconDb.SaveFinalizedBlock(inMemBlocks[len(inMemBlocks)-1]); err != nil {
|
||||
t.Fatalf("Unable to save finalized state: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := beaconDb.SaveFinalizedState(b.State()); err != nil {
|
||||
t.Fatalf("Unable to save finalized state")
|
||||
}
|
||||
|
||||
slotsWithNil := uint64(10)
|
||||
|
||||
// Run the chain for 10 slots with nil blocks.
|
||||
for i := uint64(0); i < slotsWithNil; i++ {
|
||||
if err := b.GenerateNilBlockAndAdvanceChain(); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, b.State().Slot+1)
|
||||
}
|
||||
}
|
||||
|
||||
for i := uint64(0); i < slotLimit-slotsWithNil; i++ {
|
||||
if err := b.GenerateBlockAndAdvanceChain(&backend.SimulatedObjects{}, privKeys); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, b.State().Slot+1)
|
||||
}
|
||||
inMemBlocks := b.InMemoryBlocks()
|
||||
if err := beaconDb.SaveBlock(inMemBlocks[len(inMemBlocks)-1]); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
if err := beaconDb.UpdateChainHead(ctx, inMemBlocks[len(inMemBlocks)-1], b.State()); err != nil {
|
||||
t.Fatalf("Unable to save block %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ran 30 slots to save finalized slot then ran another 10 slots w/o blocks and 20 slots w/ blocks.
|
||||
slotToGenerateTill := params.BeaconConfig().GenesisSlot + slotLimit*2
|
||||
newState, err := stategenerator.GenerateStateFromBlock(context.Background(), beaconDb, slotToGenerateTill)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to generate new state from previous finalized state %v", err)
|
||||
}
|
||||
|
||||
if newState.Slot != b.State().Slot {
|
||||
t.Fatalf("The generated state and the current state do not have the same slot, expected: %d but got %d",
|
||||
b.State().Slot, newState.Slot)
|
||||
}
|
||||
|
||||
if !proto.Equal(newState, b.State()) {
|
||||
t.Error("generated and saved states are unequal")
|
||||
}
|
||||
}
|
||||
23
beacon-chain/blockchain/testing/BUILD.bazel
Normal file
23
beacon-chain/blockchain/testing/BUILD.bazel
Normal file
@@ -0,0 +1,23 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = ["mock.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
197
beacon-chain/blockchain/testing/mock.go
Normal file
197
beacon-chain/blockchain/testing/mock.go
Normal file
@@ -0,0 +1,197 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
opfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ChainService defines the mock interface for testing
|
||||
type ChainService struct {
|
||||
State *pb.BeaconState
|
||||
Root []byte
|
||||
Block *ethpb.SignedBeaconBlock
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckPoint *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckPoint *ethpb.Checkpoint
|
||||
BlocksReceived []*ethpb.SignedBeaconBlock
|
||||
Balance *precompute.Balance
|
||||
Genesis time.Time
|
||||
Fork *pb.Fork
|
||||
DB db.Database
|
||||
stateNotifier statefeed.Notifier
|
||||
opNotifier opfeed.Notifier
|
||||
}
|
||||
|
||||
// StateNotifier mocks the same method in the chain service.
|
||||
func (ms *ChainService) StateNotifier() statefeed.Notifier {
|
||||
if ms.stateNotifier == nil {
|
||||
ms.stateNotifier = &MockStateNotifier{}
|
||||
}
|
||||
return ms.stateNotifier
|
||||
}
|
||||
|
||||
// MockStateNotifier mocks the state notifier.
|
||||
type MockStateNotifier struct {
|
||||
feed *event.Feed
|
||||
}
|
||||
|
||||
// StateFeed returns a state feed.
|
||||
func (msn *MockStateNotifier) StateFeed() *event.Feed {
|
||||
if msn.feed == nil {
|
||||
msn.feed = new(event.Feed)
|
||||
}
|
||||
return msn.feed
|
||||
}
|
||||
|
||||
// OperationNotifier mocks the same method in the chain service.
|
||||
func (ms *ChainService) OperationNotifier() opfeed.Notifier {
|
||||
if ms.opNotifier == nil {
|
||||
ms.opNotifier = &MockOperationNotifier{}
|
||||
}
|
||||
return ms.opNotifier
|
||||
}
|
||||
|
||||
// MockOperationNotifier mocks the operation notifier.
|
||||
type MockOperationNotifier struct {
|
||||
feed *event.Feed
|
||||
}
|
||||
|
||||
// OperationFeed returns an operation feed.
|
||||
func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
|
||||
if mon.feed == nil {
|
||||
mon.feed = new(event.Feed)
|
||||
}
|
||||
return mon.feed
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoVerify mocks ReceiveBlockNoVerify method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoPubsub mocks ReceiveBlockNoPubsub method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoPubsubForkchoice mocks ReceiveBlockNoPubsubForkchoice method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &pb.BeaconState{}
|
||||
}
|
||||
if !bytes.Equal(ms.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.Block.ParentRoot)
|
||||
}
|
||||
ms.State.Slot = block.Block.Slot
|
||||
ms.BlocksReceived = append(ms.BlocksReceived, block)
|
||||
signingRoot, err := ssz.HashTreeRoot(block.Block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ms.DB != nil {
|
||||
if err := ms.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
|
||||
}
|
||||
ms.Root = signingRoot[:]
|
||||
ms.Block = block
|
||||
return nil
|
||||
}
|
||||
|
||||
// HeadSlot mocks HeadSlot method in chain service.
|
||||
func (ms *ChainService) HeadSlot() uint64 {
|
||||
if ms.State == nil {
|
||||
return 0
|
||||
}
|
||||
return ms.State.Slot
|
||||
|
||||
}
|
||||
|
||||
// HeadRoot mocks HeadRoot method in chain service.
|
||||
func (ms *ChainService) HeadRoot() []byte {
|
||||
return ms.Root
|
||||
|
||||
}
|
||||
|
||||
// HeadBlock mocks HeadBlock method in chain service.
|
||||
func (ms *ChainService) HeadBlock() *ethpb.SignedBeaconBlock {
|
||||
return ms.Block
|
||||
}
|
||||
|
||||
// HeadState mocks HeadState method in chain service.
|
||||
func (ms *ChainService) HeadState(context.Context) (*pb.BeaconState, error) {
|
||||
return ms.State, nil
|
||||
}
|
||||
|
||||
// CurrentFork mocks HeadState method in chain service.
|
||||
func (ms *ChainService) CurrentFork() *pb.Fork {
|
||||
return ms.Fork
|
||||
}
|
||||
|
||||
// FinalizedCheckpt mocks FinalizedCheckpt method in chain service.
|
||||
func (ms *ChainService) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.FinalizedCheckPoint
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt mocks CurrentJustifiedCheckpt method in chain service.
|
||||
func (ms *ChainService) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.CurrentJustifiedCheckPoint
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt mocks PreviousJustifiedCheckpt method in chain service.
|
||||
func (ms *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.PreviousJustifiedCheckPoint
|
||||
}
|
||||
|
||||
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
|
||||
func (ms *ChainService) ReceiveAttestation(context.Context, *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveAttestationNoPubsub mocks ReceiveAttestationNoPubsub method in chain service.
|
||||
func (ms *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// HeadValidatorsIndices mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadValidatorsIndices(epoch uint64) ([]uint64, error) {
|
||||
if ms.State == nil {
|
||||
return []uint64{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(ms.State, epoch)
|
||||
}
|
||||
|
||||
// HeadSeed mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadSeed(epoch uint64) ([32]byte, error) {
|
||||
return helpers.Seed(ms.State, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
// GenesisTime mocks the same method in the chain service.
|
||||
func (ms *ChainService) GenesisTime() time.Time {
|
||||
return ms.Genesis
|
||||
}
|
||||
|
||||
// Participation mocks the same method in the chain service.
|
||||
func (ms *ChainService) Participation(epoch uint64) *precompute.Balance {
|
||||
return ms.Balance
|
||||
}
|
||||
20
beacon-chain/cache/BUILD.bazel
vendored
20
beacon-chain/cache/BUILD.bazel
vendored
@@ -4,17 +4,23 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attestation_data.go",
|
||||
"block.go",
|
||||
"checkpoint_state.go",
|
||||
"committee.go",
|
||||
"common.go",
|
||||
"eth1_data.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/beacon/rpc/v1:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -24,14 +30,20 @@ go_test(
|
||||
size = "small",
|
||||
srcs = [
|
||||
"attestation_data_test.go",
|
||||
"block_test.go",
|
||||
"checkpoint_state_test.go",
|
||||
"committee_test.go",
|
||||
"eth1_data_test.go",
|
||||
"feature_flag_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
race = "on",
|
||||
deps = [
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/beacon/rpc/v1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
41
beacon-chain/cache/attestation_data.go
vendored
41
beacon-chain/cache/attestation_data.go
vendored
@@ -10,7 +10,8 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
@@ -57,7 +58,13 @@ func NewAttestationCache() *AttestationCache {
|
||||
|
||||
// Get waits for any in progress calculation to complete before returning a
|
||||
// cached response, if any.
|
||||
func (c *AttestationCache) Get(ctx context.Context, req *pb.AttestationDataRequest) (*pb.AttestationDataResponse, error) {
|
||||
func (c *AttestationCache) Get(ctx context.Context, req *ethpb.AttestationDataRequest) (*ethpb.AttestationData, error) {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
// Return a miss result if cache is not enabled.
|
||||
attestationCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if req == nil {
|
||||
return nil, errors.New("nil attestation data request")
|
||||
}
|
||||
@@ -105,7 +112,11 @@ func (c *AttestationCache) Get(ctx context.Context, req *pb.AttestationDataReque
|
||||
|
||||
// MarkInProgress a request so that any other similar requests will block on
|
||||
// Get until MarkNotInProgress is called.
|
||||
func (c *AttestationCache) MarkInProgress(req *pb.AttestationDataRequest) error {
|
||||
func (c *AttestationCache) MarkInProgress(req *ethpb.AttestationDataRequest) error {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
s, e := reqToKey(req)
|
||||
@@ -115,13 +126,19 @@ func (c *AttestationCache) MarkInProgress(req *pb.AttestationDataRequest) error
|
||||
if c.inProgress[s] {
|
||||
return ErrAlreadyInProgress
|
||||
}
|
||||
c.inProgress[s] = true
|
||||
if featureconfig.Get().EnableAttestationCache {
|
||||
c.inProgress[s] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkNotInProgress will release the lock on a given request. This should be
|
||||
// called after put.
|
||||
func (c *AttestationCache) MarkNotInProgress(req *pb.AttestationDataRequest) error {
|
||||
func (c *AttestationCache) MarkNotInProgress(req *ethpb.AttestationDataRequest) error {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
s, e := reqToKey(req)
|
||||
@@ -133,7 +150,11 @@ func (c *AttestationCache) MarkNotInProgress(req *pb.AttestationDataRequest) err
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *AttestationCache) Put(ctx context.Context, req *pb.AttestationDataRequest, res *pb.AttestationDataResponse) error {
|
||||
func (c *AttestationCache) Put(ctx context.Context, req *ethpb.AttestationDataRequest, res *ethpb.AttestationData) error {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
data := &attestationReqResWrapper{
|
||||
req,
|
||||
res,
|
||||
@@ -158,11 +179,11 @@ func wrapperToKey(i interface{}) (string, error) {
|
||||
return reqToKey(w.req)
|
||||
}
|
||||
|
||||
func reqToKey(req *pb.AttestationDataRequest) (string, error) {
|
||||
return fmt.Sprintf("%d-%d", req.Shard, req.Slot), nil
|
||||
func reqToKey(req *ethpb.AttestationDataRequest) (string, error) {
|
||||
return fmt.Sprintf("%d-%d", req.CommitteeIndex, req.Slot), nil
|
||||
}
|
||||
|
||||
type attestationReqResWrapper struct {
|
||||
req *pb.AttestationDataRequest
|
||||
res *pb.AttestationDataResponse
|
||||
req *ethpb.AttestationDataRequest
|
||||
res *ethpb.AttestationData
|
||||
}
|
||||
|
||||
12
beacon-chain/cache/attestation_data_test.go
vendored
12
beacon-chain/cache/attestation_data_test.go
vendored
@@ -5,17 +5,17 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
||||
)
|
||||
|
||||
func TestAttestationCache_RoundTrip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := cache.NewAttestationCache()
|
||||
|
||||
req := &pb.AttestationDataRequest{
|
||||
Shard: 0,
|
||||
Slot: 1,
|
||||
req := ðpb.AttestationDataRequest{
|
||||
CommitteeIndex: 0,
|
||||
Slot: 1,
|
||||
}
|
||||
|
||||
response, err := c.Get(ctx, req)
|
||||
@@ -31,8 +31,8 @@ func TestAttestationCache_RoundTrip(t *testing.T) {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
res := &pb.AttestationDataResponse{
|
||||
HeadSlot: 5,
|
||||
res := ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 5},
|
||||
}
|
||||
|
||||
if err = c.Put(ctx, req, res); err != nil {
|
||||
|
||||
104
beacon-chain/cache/block.go
vendored
104
beacon-chain/cache/block.go
vendored
@@ -1,104 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotAncestorCacheObj will be returned when a cache object is not a pointer to
|
||||
// block ancestor cache obj.
|
||||
ErrNotAncestorCacheObj = errors.New("object is not an ancestor object for cache")
|
||||
// Metrics
|
||||
ancestorBlockCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "ancestor_block_cache_miss",
|
||||
Help: "The number of ancestor block requests that aren't present in the cache.",
|
||||
})
|
||||
ancestorBlockCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "ancestor_block_cache_hit",
|
||||
Help: "The number of ancestor block requests that are present in the cache.",
|
||||
})
|
||||
ancestorBlockCacheSize = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "ancestor_block_cache_size",
|
||||
Help: "The number of ancestor blocks in the ancestorBlock cache",
|
||||
})
|
||||
)
|
||||
|
||||
// AncestorInfo defines the cached ancestor block object for height.
|
||||
type AncestorInfo struct {
|
||||
Height uint64
|
||||
Hash []byte
|
||||
Target *pb.AttestationTarget
|
||||
}
|
||||
|
||||
// AncestorBlockCache structs with 1 queue for looking up block ancestor by height.
|
||||
type AncestorBlockCache struct {
|
||||
ancestorBlockCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// heightKeyFn takes the string representation of the block hash + height as the key
|
||||
// for the ancestor of a given block (AncestorInfo).
|
||||
func heightKeyFn(obj interface{}) (string, error) {
|
||||
aInfo, ok := obj.(*AncestorInfo)
|
||||
if !ok {
|
||||
return "", ErrNotAncestorCacheObj
|
||||
}
|
||||
|
||||
return string(aInfo.Hash) + strconv.Itoa(int(aInfo.Height)), nil
|
||||
}
|
||||
|
||||
// NewBlockAncestorCache creates a new block ancestor cache for storing/accessing block ancestor
|
||||
// from memory.
|
||||
func NewBlockAncestorCache() *AncestorBlockCache {
|
||||
return &AncestorBlockCache{
|
||||
ancestorBlockCache: cache.NewFIFO(heightKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// AncestorBySlot fetches block's ancestor by height. Returns true with a
|
||||
// reference to the ancestor block, if exists. Otherwise returns false, nil.
|
||||
func (a *AncestorBlockCache) AncestorBySlot(blockHash []byte, height uint64) (*AncestorInfo, error) {
|
||||
a.lock.RLock()
|
||||
defer a.lock.RUnlock()
|
||||
|
||||
obj, exists, err := a.ancestorBlockCache.GetByKey(string(blockHash) + strconv.Itoa(int(height)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
ancestorBlockCacheHit.Inc()
|
||||
} else {
|
||||
ancestorBlockCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
aInfo, ok := obj.(*AncestorInfo)
|
||||
if !ok {
|
||||
return nil, ErrNotACommitteeInfo
|
||||
}
|
||||
|
||||
return aInfo, nil
|
||||
}
|
||||
|
||||
// AddBlockAncestor adds block ancestor object to the cache. This method also trims the least
|
||||
// recently added ancestor if the cache size has ready the max cache size limit.
|
||||
func (a *AncestorBlockCache) AddBlockAncestor(ancestorInfo *AncestorInfo) error {
|
||||
a.lock.Lock()
|
||||
defer a.lock.Unlock()
|
||||
|
||||
if err := a.ancestorBlockCache.AddIfNotPresent(ancestorInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(a.ancestorBlockCache, maxCacheSize)
|
||||
ancestorBlockCacheSize.Set(float64(len(a.ancestorBlockCache.ListKeys())))
|
||||
return nil
|
||||
}
|
||||
111
beacon-chain/cache/block_test.go
vendored
111
beacon-chain/cache/block_test.go
vendored
@@ -1,111 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
)
|
||||
|
||||
func TestHeightHeightFn_OK(t *testing.T) {
|
||||
height := uint64(999)
|
||||
hash := []byte{'A'}
|
||||
aInfo := &AncestorInfo{
|
||||
Height: height,
|
||||
Hash: hash,
|
||||
Target: &pb.AttestationTarget{
|
||||
Slot: height,
|
||||
BlockRoot: hash,
|
||||
},
|
||||
}
|
||||
|
||||
key, err := heightKeyFn(aInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
strHeightKey := string(aInfo.Target.BlockRoot) + strconv.Itoa(int(aInfo.Target.Slot))
|
||||
if key != strHeightKey {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, strHeightKey)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeightKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := heightKeyFn("bad")
|
||||
if err != ErrNotAncestorCacheObj {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotAncestorCacheObj, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAncestorCache_AncestorInfoByHeight(t *testing.T) {
|
||||
cache := NewBlockAncestorCache()
|
||||
|
||||
height := uint64(123)
|
||||
hash := []byte{'B'}
|
||||
aInfo := &AncestorInfo{
|
||||
Height: height,
|
||||
Hash: hash,
|
||||
Target: &pb.AttestationTarget{
|
||||
Slot: height,
|
||||
BlockRoot: hash,
|
||||
},
|
||||
}
|
||||
|
||||
fetchedInfo, err := cache.AncestorBySlot(hash, height)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if fetchedInfo != nil {
|
||||
t.Error("Expected ancestor info not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddBlockAncestor(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fetchedInfo, err = cache.AncestorBySlot(hash, height)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if fetchedInfo == nil {
|
||||
t.Error("Expected ancestor info to exist")
|
||||
}
|
||||
if fetchedInfo.Height != height {
|
||||
t.Errorf(
|
||||
"Expected fetched slot number to be %d, got %d",
|
||||
aInfo.Target.Slot,
|
||||
fetchedInfo.Target.Slot,
|
||||
)
|
||||
}
|
||||
if !reflect.DeepEqual(fetchedInfo.Target, aInfo.Target) {
|
||||
t.Errorf(
|
||||
"Expected fetched info committee to be %v, got %v",
|
||||
aInfo.Target,
|
||||
fetchedInfo.Target,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockAncestor_maxSize(t *testing.T) {
|
||||
cache := NewBlockAncestorCache()
|
||||
|
||||
for i := 0; i < maxCacheSize+10; i++ {
|
||||
aInfo := &AncestorInfo{
|
||||
Height: uint64(i),
|
||||
Target: &pb.AttestationTarget{
|
||||
Slot: uint64(i),
|
||||
},
|
||||
}
|
||||
if err := cache.AddBlockAncestor(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.ancestorBlockCache.ListKeys()) != maxCacheSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxCacheSize,
|
||||
len(cache.ancestorBlockCache.ListKeys()),
|
||||
)
|
||||
}
|
||||
}
|
||||
114
beacon-chain/cache/checkpoint_state.go
vendored
Normal file
114
beacon-chain/cache/checkpoint_state.go
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotCheckpointState will be returned when a cache object is not a pointer to
|
||||
// a CheckpointState struct.
|
||||
ErrNotCheckpointState = errors.New("object is not a state by check point struct")
|
||||
|
||||
// maxCheckpointStateSize defines the max number of entries check point to state cache can contain.
|
||||
maxCheckpointStateSize = 4
|
||||
|
||||
// Metrics.
|
||||
checkpointStateMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "check_point_statecache_miss",
|
||||
Help: "The number of check point state requests that aren't present in the cache.",
|
||||
})
|
||||
checkpointStateHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "check_point_state_cache_hit",
|
||||
Help: "The number of check point state requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// CheckpointState defines the active validator indices per epoch.
|
||||
type CheckpointState struct {
|
||||
Checkpoint *ethpb.Checkpoint
|
||||
State *pb.BeaconState
|
||||
}
|
||||
|
||||
// CheckpointStateCache is a struct with 1 queue for looking up state by checkpoint.
|
||||
type CheckpointStateCache struct {
|
||||
cache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// checkpointState takes the checkpoint as the key of the resulting state.
|
||||
func checkpointState(obj interface{}) (string, error) {
|
||||
info, ok := obj.(*CheckpointState)
|
||||
if !ok {
|
||||
return "", ErrNotCheckpointState
|
||||
}
|
||||
|
||||
h, err := hashutil.HashProto(info.Checkpoint)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(h[:]), nil
|
||||
}
|
||||
|
||||
// NewCheckpointStateCache creates a new checkpoint state cache for storing/accessing processed state.
|
||||
func NewCheckpointStateCache() *CheckpointStateCache {
|
||||
return &CheckpointStateCache{
|
||||
cache: cache.NewFIFO(checkpointState),
|
||||
}
|
||||
}
|
||||
|
||||
// StateByCheckpoint fetches state by checkpoint. Returns true with a
|
||||
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*pb.BeaconState, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
h, err := hashutil.HashProto(cp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, exists, err := c.cache.GetByKey(string(h[:]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
checkpointStateHit.Inc()
|
||||
} else {
|
||||
checkpointStateMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
info, ok := obj.(*CheckpointState)
|
||||
if !ok {
|
||||
return nil, ErrNotCheckpointState
|
||||
}
|
||||
|
||||
return proto.Clone(info.State).(*pb.BeaconState), nil
|
||||
}
|
||||
|
||||
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
|
||||
// recently added CheckpointState object if the cache size has ready the max cache size limit.
|
||||
func (c *CheckpointStateCache) AddCheckpointState(cp *CheckpointState) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.cache.AddIfNotPresent(cp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(c.cache, maxCheckpointStateSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckpointStateKeys returns the keys of the state in cache.
|
||||
func (c *CheckpointStateCache) CheckpointStateKeys() []string {
|
||||
return c.cache.ListKeys()
|
||||
}
|
||||
110
beacon-chain/cache/checkpoint_state_test.go
vendored
Normal file
110
beacon-chain/cache/checkpoint_state_test.go
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
)
|
||||
|
||||
func TestCheckpointStateCacheKeyFn_OK(t *testing.T) {
|
||||
cp := ðpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
|
||||
info := &CheckpointState{
|
||||
Checkpoint: cp,
|
||||
State: &pb.BeaconState{Slot: 64},
|
||||
}
|
||||
key, err := checkpointState(info)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantedKey, err := hashutil.HashProto(cp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != string(wantedKey[:]) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, string(wantedKey[:]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointStateCacheKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := checkpointState("bad")
|
||||
if err != ErrNotCheckpointState {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotCheckpointState, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
cache := NewCheckpointStateCache()
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
|
||||
info1 := &CheckpointState{
|
||||
Checkpoint: cp1,
|
||||
State: &pb.BeaconState{Slot: 64},
|
||||
}
|
||||
state, err := cache.StateByCheckpoint(cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if state != nil {
|
||||
t.Error("Expected state not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddCheckpointState(info1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
state, err = cache.StateByCheckpoint(cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(state, info1.State) {
|
||||
t.Error("incorrectly cached state")
|
||||
}
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: []byte{'B'}}
|
||||
info2 := &CheckpointState{
|
||||
Checkpoint: cp2,
|
||||
State: &pb.BeaconState{Slot: 128},
|
||||
}
|
||||
if err := cache.AddCheckpointState(info2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
state, err = cache.StateByCheckpoint(cp2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(state, info2.State) {
|
||||
t.Error("incorrectly cached state")
|
||||
}
|
||||
|
||||
state, err = cache.StateByCheckpoint(cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(state, info1.State) {
|
||||
t.Error("incorrectly cached state")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache__MaxSize(t *testing.T) {
|
||||
c := NewCheckpointStateCache()
|
||||
|
||||
for i := 0; i < maxCheckpointStateSize+100; i++ {
|
||||
info := &CheckpointState{
|
||||
Checkpoint: ðpb.Checkpoint{Epoch: uint64(i)},
|
||||
State: &pb.BeaconState{Slot: uint64(i)},
|
||||
}
|
||||
if err := c.AddCheckpointState(info); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.cache.ListKeys()) != maxCheckpointStateSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxCheckpointStateSize,
|
||||
len(c.cache.ListKeys()),
|
||||
)
|
||||
}
|
||||
}
|
||||
160
beacon-chain/cache/committee.go
vendored
160
beacon-chain/cache/committee.go
vendored
@@ -2,126 +2,162 @@ package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotACommitteeInfo will be returned when a cache object is not a pointer to
|
||||
// a committeeInfo struct.
|
||||
ErrNotACommitteeInfo = errors.New("object is not an committee info")
|
||||
// ErrNotCommittee will be returned when a cache object is not a pointer to
|
||||
// a Committee struct.
|
||||
ErrNotCommittee = errors.New("object is not a committee struct")
|
||||
|
||||
// maxCacheSize is 4x of the epoch length for additional cache padding.
|
||||
// Requests should be only accessing committees within defined epoch length.
|
||||
maxCacheSize = int(4 * params.BeaconConfig().SlotsPerEpoch)
|
||||
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
|
||||
// Due to reorgs, it's good to keep the old cache around for quickly switch over. 10 is a generous
|
||||
// cache size as it considers 3 concurrent branches over 3 epochs.
|
||||
maxCommitteesCacheSize = 10
|
||||
|
||||
// Metrics
|
||||
committeeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
|
||||
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "committee_cache_miss",
|
||||
Help: "The number of committee requests that aren't present in the cache.",
|
||||
})
|
||||
committeeCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
// CommitteeCacheHit tracks the number of committee requests that are in the cache.
|
||||
CommitteeCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "committee_cache_hit",
|
||||
Help: "The number of committee requests that are present in the cache.",
|
||||
})
|
||||
committeeCacheSize = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "committee_cache_size",
|
||||
Help: "The number of committees in the committee cache",
|
||||
})
|
||||
)
|
||||
|
||||
// CommitteeInfo defines the validator committee of slot and shard combinations.
|
||||
type CommitteeInfo struct {
|
||||
Committee []uint64
|
||||
Shard uint64
|
||||
// Committees defines the shuffled committees seed.
|
||||
type Committees struct {
|
||||
CommitteeCount uint64
|
||||
Seed [32]byte
|
||||
ShuffledIndices []uint64
|
||||
SortedIndices []uint64
|
||||
}
|
||||
|
||||
// CommitteesInSlot specifies how many CommitteeInfos are in a given slot.
|
||||
type CommitteesInSlot struct {
|
||||
Slot uint64
|
||||
Committees []*CommitteeInfo
|
||||
// CommitteeCache is a struct with 1 queue for looking up shuffled indices list by seed.
|
||||
type CommitteeCache struct {
|
||||
CommitteeCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// CommitteesCache structs with 1 queue for looking up committees by slot.
|
||||
type CommitteesCache struct {
|
||||
committeesCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// slotKeyFn takes the string representation of the slot number as the key
|
||||
// for the committees of a given slot (CommitteesInSlot).
|
||||
func slotKeyFn(obj interface{}) (string, error) {
|
||||
cInfo, ok := obj.(*CommitteesInSlot)
|
||||
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
|
||||
func committeeKeyFn(obj interface{}) (string, error) {
|
||||
info, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return "", ErrNotACommitteeInfo
|
||||
return "", ErrNotCommittee
|
||||
}
|
||||
|
||||
return strconv.Itoa(int(cInfo.Slot)), nil
|
||||
return key(info.Seed), nil
|
||||
}
|
||||
|
||||
// NewCommitteesCache creates a new committee cache for storing/accessing blockInfo from
|
||||
// memory.
|
||||
func NewCommitteesCache() *CommitteesCache {
|
||||
return &CommitteesCache{
|
||||
committeesCache: cache.NewFIFO(slotKeyFn),
|
||||
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
|
||||
func NewCommitteesCache() *CommitteeCache {
|
||||
return &CommitteeCache{
|
||||
CommitteeCache: cache.NewFIFO(committeeKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// CommitteesInfoBySlot fetches CommitteesInSlot by slot. Returns true with a
|
||||
// reference to the committees info, if exists. Otherwise returns false, nil.
|
||||
func (c *CommitteesCache) CommitteesInfoBySlot(slot uint64) (*CommitteesInSlot, error) {
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
|
||||
func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]uint64, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return nil, nil
|
||||
}
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
obj, exists, err := c.committeesCache.GetByKey(strconv.Itoa(int(slot)))
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
committeeCacheHit.Inc()
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
committeeCacheMiss.Inc()
|
||||
CommitteeCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cInfo, ok := obj.(*CommitteesInSlot)
|
||||
item, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return nil, ErrNotACommitteeInfo
|
||||
return nil, ErrNotCommittee
|
||||
}
|
||||
|
||||
return cInfo, nil
|
||||
committeeCountPerSlot := uint64(1)
|
||||
if item.CommitteeCount/params.BeaconConfig().SlotsPerEpoch > 1 {
|
||||
committeeCountPerSlot = item.CommitteeCount / params.BeaconConfig().SlotsPerEpoch
|
||||
}
|
||||
|
||||
indexOffSet := index + (slot%params.BeaconConfig().SlotsPerEpoch)*committeeCountPerSlot
|
||||
start, end := startEndIndices(item, indexOffSet)
|
||||
|
||||
return item.ShuffledIndices[start:end], nil
|
||||
}
|
||||
|
||||
// AddCommittees adds CommitteesInSlot object to the cache. This method also trims the least
|
||||
// recently added committeeInfo object if the cache size has ready the max cache size limit.
|
||||
func (c *CommitteesCache) AddCommittees(committees *CommitteesInSlot) error {
|
||||
// AddCommitteeShuffledList adds Committee shuffled list object to the cache. T
|
||||
// his method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *CommitteeCache) AddCommitteeShuffledList(committees *Committees) error {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return nil
|
||||
}
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if err := c.committeesCache.AddIfNotPresent(committees); err != nil {
|
||||
if err := c.CommitteeCache.AddIfNotPresent(committees); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(c.committeesCache, maxCacheSize)
|
||||
committeeCacheSize.Set(float64(len(c.committeesCache.ListKeys())))
|
||||
trim(c.CommitteeCache, maxCommitteesCacheSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// trim the FIFO queue to the maxSize.
|
||||
func trim(queue *cache.FIFO, maxSize int) {
|
||||
for s := len(queue.ListKeys()); s > maxSize; s-- {
|
||||
// #nosec G104 popProcessNoopFunc never returns an error
|
||||
_, _ = queue.Pop(popProcessNoopFunc)
|
||||
// ActiveIndices returns the active indices of a given seed stored in cache.
|
||||
func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
CommitteeCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return nil, ErrNotCommittee
|
||||
}
|
||||
|
||||
return item.SortedIndices, nil
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(obj interface{}) error {
|
||||
return nil
|
||||
func startEndIndices(c *Committees, index uint64) (uint64, uint64) {
|
||||
validatorCount := uint64(len(c.ShuffledIndices))
|
||||
start := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index)
|
||||
end := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index+1)
|
||||
return start, end
|
||||
}
|
||||
|
||||
// Using seed as source for key to handle reorgs in the same epoch.
|
||||
// The seed is derived from state's array of randao mixes and epoch value
|
||||
// hashed together. This avoids collisions on different validator set. Spec definition:
|
||||
// https://github.com/ethereum/eth2.0-specs/blob/v0.9.2/specs/core/0_beacon-chain.md#get_seed
|
||||
func key(seed [32]byte) string {
|
||||
return string(seed[:])
|
||||
}
|
||||
|
||||
131
beacon-chain/cache/committee_test.go
vendored
131
beacon-chain/cache/committee_test.go
vendored
@@ -2,95 +2,126 @@ package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func TestSlotKeyFn_OK(t *testing.T) {
|
||||
cInfo := &CommitteesInSlot{
|
||||
Slot: 999,
|
||||
Committees: []*CommitteeInfo{
|
||||
{Shard: 1, Committee: []uint64{1, 2, 3}},
|
||||
{Shard: 1, Committee: []uint64{4, 5, 6}},
|
||||
},
|
||||
func TestCommitteeKeyFn_OK(t *testing.T) {
|
||||
item := &Committees{
|
||||
CommitteeCount: 1,
|
||||
Seed: [32]byte{'A'},
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
key, err := slotKeyFn(cInfo)
|
||||
k, err := committeeKeyFn(item)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
strSlot := strconv.Itoa(int(cInfo.Slot))
|
||||
if key != strSlot {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, strSlot)
|
||||
if k != key(item.Seed) {
|
||||
t.Errorf("Incorrect hash k: %s, expected %s", k, key(item.Seed))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlotKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := slotKeyFn("bad")
|
||||
if err != ErrNotACommitteeInfo {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotACommitteeInfo, err)
|
||||
func TestCommitteeKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := committeeKeyFn("bad")
|
||||
if err != ErrNotCommittee {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotCommittee, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteesCache_CommitteesInfoBySlot(t *testing.T) {
|
||||
func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
cInfo := &CommitteesInSlot{
|
||||
Slot: 123,
|
||||
Committees: []*CommitteeInfo{{Shard: 456}},
|
||||
item := &Committees{
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4, 5, 6},
|
||||
Seed: [32]byte{'A'},
|
||||
CommitteeCount: 3,
|
||||
}
|
||||
|
||||
fetchedInfo, err := cache.CommitteesInfoBySlot(cInfo.Slot)
|
||||
slot := params.BeaconConfig().SlotsPerEpoch
|
||||
committeeIndex := uint64(1)
|
||||
indices, err := cache.Committee(slot, item.Seed, committeeIndex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if fetchedInfo != nil {
|
||||
t.Error("Expected committees info not to exist in empty cache")
|
||||
if indices != nil {
|
||||
t.Error("Expected committee not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddCommittees(cInfo); err != nil {
|
||||
if err := cache.AddCommitteeShuffledList(item); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fetchedInfo, err = cache.CommitteesInfoBySlot(cInfo.Slot)
|
||||
wantedIndex := uint64(0)
|
||||
indices, err = cache.Committee(slot, item.Seed, wantedIndex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if fetchedInfo == nil {
|
||||
t.Error("Expected committee info to exist")
|
||||
}
|
||||
if fetchedInfo.Slot != cInfo.Slot {
|
||||
|
||||
start, end := startEndIndices(item, wantedIndex)
|
||||
if !reflect.DeepEqual(indices, item.ShuffledIndices[start:end]) {
|
||||
t.Errorf(
|
||||
"Expected fetched slot number to be %d, got %d",
|
||||
cInfo.Slot,
|
||||
fetchedInfo.Slot,
|
||||
)
|
||||
}
|
||||
if !reflect.DeepEqual(fetchedInfo.Committees, cInfo.Committees) {
|
||||
t.Errorf(
|
||||
"Expected fetched info committee to be %v, got %v",
|
||||
cInfo.Committees,
|
||||
fetchedInfo.Committees,
|
||||
"Expected fetched active indices to be %v, got %v",
|
||||
indices,
|
||||
item.ShuffledIndices[start:end],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockCache_maxSize(t *testing.T) {
|
||||
func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
for i := 0; i < maxCacheSize+10; i++ {
|
||||
cInfo := &CommitteesInSlot{
|
||||
Slot: uint64(i),
|
||||
}
|
||||
if err := cache.AddCommittees(cInfo); err != nil {
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
indices, err := cache.ActiveIndices(item.Seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if indices != nil {
|
||||
t.Error("Expected committee count not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddCommitteeShuffledList(item); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
indices, err = cache.ActiveIndices(item.Seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(indices, item.SortedIndices) {
|
||||
t.Error("Did not receive correct active indices from cache")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
// Should rotate out all the epochs except 190 through 199.
|
||||
for i := 100; i < 200; i++ {
|
||||
s := []byte(strconv.Itoa(i))
|
||||
item := &Committees{Seed: bytesutil.ToBytes32(s)}
|
||||
if err := cache.AddCommitteeShuffledList(item); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.committeesCache.ListKeys()) != maxCacheSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxCacheSize,
|
||||
len(cache.committeesCache.ListKeys()),
|
||||
)
|
||||
k := cache.CommitteeCache.ListKeys()
|
||||
if len(k) != maxCommitteesCacheSize {
|
||||
t.Errorf("wanted: %d, got: %d", maxCommitteesCacheSize, len(k))
|
||||
}
|
||||
|
||||
sort.Slice(k, func(i, j int) bool {
|
||||
return k[i] < k[j]
|
||||
})
|
||||
s := bytesutil.ToBytes32([]byte(strconv.Itoa(190)))
|
||||
if k[0] != key(s) {
|
||||
t.Error("incorrect key received for slot 190")
|
||||
}
|
||||
s = bytesutil.ToBytes32([]byte(strconv.Itoa(199)))
|
||||
if k[len(k)-1] != key(s) {
|
||||
t.Error("incorrect key received for slot 199")
|
||||
}
|
||||
}
|
||||
|
||||
25
beacon-chain/cache/common.go
vendored
Normal file
25
beacon-chain/cache/common.go
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// maxCacheSize is 4x of the epoch length for additional cache padding.
|
||||
// Requests should be only accessing committees within defined epoch length.
|
||||
maxCacheSize = int(4 * params.BeaconConfig().SlotsPerEpoch)
|
||||
)
|
||||
|
||||
// trim the FIFO queue to the maxSize.
|
||||
func trim(queue *cache.FIFO, maxSize int) {
|
||||
for s := len(queue.ListKeys()); s > maxSize; s-- {
|
||||
// #nosec G104 popProcessNoopFunc never returns an error
|
||||
_, _ = queue.Pop(popProcessNoopFunc)
|
||||
}
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(obj interface{}) error {
|
||||
return nil
|
||||
}
|
||||
37
beacon-chain/cache/depositcache/BUILD.bazel
vendored
Normal file
37
beacon-chain/cache/depositcache/BUILD.bazel
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"deposits_cache.go",
|
||||
"pending_deposits.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//proto/beacon/db:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"deposits_test.go",
|
||||
"pending_deposits_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//proto/beacon/db:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
174
beacon-chain/cache/depositcache/deposits_cache.go
vendored
Normal file
174
beacon-chain/cache/depositcache/deposits_cache.go
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"math/big"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
historicalDepositsCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacondb_all_deposits",
|
||||
Help: "The number of total deposits in the beaconDB in-memory database",
|
||||
})
|
||||
)
|
||||
|
||||
// DepositFetcher defines a struct which can retrieve deposit information from a store.
|
||||
type DepositFetcher interface {
|
||||
AllDeposits(ctx context.Context, beforeBlk *big.Int) []*ethpb.Deposit
|
||||
DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int)
|
||||
DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte)
|
||||
}
|
||||
|
||||
// DepositCache stores all in-memory deposit objects. This
|
||||
// stores all the deposit related data that is required by the beacon-node.
|
||||
type DepositCache struct {
|
||||
// Beacon chain deposits in memory.
|
||||
pendingDeposits []*dbpb.DepositContainer
|
||||
deposits []*dbpb.DepositContainer
|
||||
depositsLock sync.RWMutex
|
||||
chainStartDeposits []*ethpb.Deposit
|
||||
chainstartPubkeys map[string]bool
|
||||
chainstartPubkeysLock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewDepositCache instantiates a new deposit cache
|
||||
func NewDepositCache() *DepositCache {
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*dbpb.DepositContainer{},
|
||||
deposits: []*dbpb.DepositContainer{},
|
||||
chainstartPubkeys: make(map[string]bool),
|
||||
chainStartDeposits: make([]*ethpb.Deposit, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
"index": index,
|
||||
"deposit root": hex.EncodeToString(depositRoot[:]),
|
||||
}).Warn("Ignoring nil deposit insertion")
|
||||
return
|
||||
}
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
// keep the slice sorted on insertion in order to avoid costly sorting on retrival.
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
|
||||
newDeposits := append([]*dbpb.DepositContainer{{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}}, dc.deposits[heightIdx:]...)
|
||||
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
|
||||
historicalDepositsCount.Inc()
|
||||
}
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*dbpb.DepositContainer) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
|
||||
dc.deposits = ctrs
|
||||
historicalDepositsCount.Add(float64(len(ctrs)))
|
||||
}
|
||||
|
||||
// AllDepositContainers returns a list of deposits all historical deposit containers until the given block number.
|
||||
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*dbpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.AllDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return dc.deposits
|
||||
}
|
||||
|
||||
// MarkPubkeyForChainstart sets the pubkey deposit status to true.
|
||||
func (dc *DepositCache) MarkPubkeyForChainstart(ctx context.Context, pubkey string) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.MarkPubkeyForChainstart")
|
||||
defer span.End()
|
||||
dc.chainstartPubkeysLock.Lock()
|
||||
defer dc.chainstartPubkeysLock.Unlock()
|
||||
dc.chainstartPubkeys[pubkey] = true
|
||||
}
|
||||
|
||||
// PubkeyInChainstart returns bool for whether the pubkey passed in has deposited.
|
||||
func (dc *DepositCache) PubkeyInChainstart(ctx context.Context, pubkey string) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PubkeyInChainstart")
|
||||
defer span.End()
|
||||
dc.chainstartPubkeysLock.Lock()
|
||||
defer dc.chainstartPubkeysLock.Unlock()
|
||||
if dc.chainstartPubkeys != nil {
|
||||
return dc.chainstartPubkeys[pubkey]
|
||||
}
|
||||
dc.chainstartPubkeys = make(map[string]bool)
|
||||
return false
|
||||
}
|
||||
|
||||
// AllDeposits returns a list of deposits all historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (dc *DepositCache) AllDeposits(ctx context.Context, beforeBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, ctnr := range dc.deposits {
|
||||
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
deposits = append(deposits, ctnr.Deposit)
|
||||
}
|
||||
}
|
||||
return deposits
|
||||
}
|
||||
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made prior to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Eth1BlockHeight > blockHeight.Uint64() })
|
||||
// send the deposit root of the empty trie, if eth1follow distance is greater than the time of the earliest
|
||||
// deposit.
|
||||
if heightIdx == 0 {
|
||||
return 0, [32]byte{}
|
||||
}
|
||||
return uint64(heightIdx), bytesutil.ToBytes32(dc.deposits[heightIdx-1].DepositRoot)
|
||||
}
|
||||
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var deposit *ethpb.Deposit
|
||||
var blockNum *big.Int
|
||||
for _, ctnr := range dc.deposits {
|
||||
if bytes.Equal(ctnr.Deposit.Data.PublicKey, pubKey) {
|
||||
deposit = ctnr.Deposit
|
||||
blockNum = big.NewInt(int64(ctnr.Eth1BlockHeight))
|
||||
break
|
||||
}
|
||||
}
|
||||
return deposit, blockNum
|
||||
}
|
||||
275
beacon-chain/cache/depositcache/deposits_test.go
vendored
Normal file
275
beacon-chain/cache/depositcache/deposits_test.go
vendored
Normal file
@@ -0,0 +1,275 @@
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
const nilDepositErr = "Ignoring nil deposit insertion"
|
||||
|
||||
var _ = DepositFetcher(&DepositCache{})
|
||||
|
||||
func TestBeaconDB_InsertDeposit_LogsOnNilDepositInsertion(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.InsertDeposit(context.Background(), nil, 1, 0, [32]byte{})
|
||||
|
||||
if len(dc.deposits) != 0 {
|
||||
t.Fatal("Number of deposits changed")
|
||||
}
|
||||
if hook.LastEntry().Message != nilDepositErr {
|
||||
t.Errorf("Did not log correct message, wanted \"Ignoring nil deposit insertion\", got \"%s\"", hook.LastEntry().Message)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_InsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
insertions := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 3,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range insertions {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
}
|
||||
|
||||
expectedIndices := []int64{0, 1, 3, 4}
|
||||
for i, ei := range expectedIndices {
|
||||
if dc.deposits[i].Index != ei {
|
||||
t.Errorf("dc.deposits[%d].Index = %d, wanted %d", i, dc.deposits[i].Index, ei)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_AllDeposits_ReturnsAllDeposits(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
deposits := []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
dc.deposits = deposits
|
||||
|
||||
d := dc.AllDeposits(context.Background(), nil)
|
||||
if len(d) != len(deposits) {
|
||||
t.Errorf("Return the wrong number of deposits (%d) wanted %d", len(d), len(deposits))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_AllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
deposits := []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
dc.deposits = deposits
|
||||
|
||||
d := dc.AllDeposits(context.Background(), big.NewInt(11))
|
||||
expected := 5
|
||||
if len(d) != expected {
|
||||
t.Errorf("Return the wrong number of deposits (%d) wanted %d", len(d), expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: []byte("root"),
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(11))
|
||||
if int(n) != 5 {
|
||||
t.Errorf("Returned unexpected deposits number %d wanted %d", n, 5)
|
||||
}
|
||||
|
||||
if root != bytesutil.ToBytes32([]byte("root")) {
|
||||
t.Errorf("Returned unexpected root: %v", root)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLessThanOldestDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: []byte("root"),
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: []byte("root"),
|
||||
},
|
||||
}
|
||||
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(2))
|
||||
if int(n) != 0 {
|
||||
t.Errorf("Returned unexpected deposits number %d wanted %d", n, 0)
|
||||
}
|
||||
|
||||
if root != [32]byte{} {
|
||||
t.Errorf("Returned unexpected root: %v", root)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 9,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
dep, blkNum := dc.DepositByPubkey(context.Background(), []byte("pk1"))
|
||||
|
||||
if !bytes.Equal(dep.Data.PublicKey, []byte("pk1")) {
|
||||
t.Error("Returned wrong deposit")
|
||||
}
|
||||
if blkNum.Cmp(big.NewInt(10)) != 0 {
|
||||
t.Errorf("Returned wrong block number %v", blkNum)
|
||||
}
|
||||
}
|
||||
165
beacon-chain/cache/depositcache/pending_deposits.go
vendored
Normal file
165
beacon-chain/cache/depositcache/pending_deposits.go
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"sort"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
pendingDepositsCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacondb_pending_deposits",
|
||||
Help: "The number of pending deposits in the beaconDB in-memory database",
|
||||
})
|
||||
)
|
||||
|
||||
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
|
||||
// which have not yet been included in the chain.
|
||||
type PendingDepositsFetcher interface {
|
||||
PendingContainers(ctx context.Context, beforeBlk *big.Int) []*dbpb.DepositContainer
|
||||
}
|
||||
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
}).Debug("Ignoring nil deposit insertion")
|
||||
return
|
||||
}
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
dc.pendingDeposits = append(dc.pendingDeposits,
|
||||
&dbpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
|
||||
pendingDepositsCount.Inc()
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(dc.pendingDeposits))))
|
||||
}
|
||||
|
||||
// PendingDeposits returns a list of deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all pending
|
||||
// deposits.
|
||||
func (dc *DepositCache) PendingDeposits(ctx context.Context, beforeBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var depositCntrs []*dbpb.DepositContainer
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
}
|
||||
}
|
||||
// Sort the deposits by Merkle index.
|
||||
sort.SliceStable(depositCntrs, func(i, j int) bool {
|
||||
return depositCntrs[i].Index < depositCntrs[j].Index
|
||||
})
|
||||
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, dep := range depositCntrs {
|
||||
deposits = append(deposits, dep.Deposit)
|
||||
}
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(deposits))))
|
||||
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PendingContainers returns a list of deposit containers until the given block number
|
||||
// (inclusive).
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, beforeBlk *big.Int) []*dbpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var depositCntrs []*dbpb.DepositContainer
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
}
|
||||
}
|
||||
// Sort the deposits by Merkle index.
|
||||
sort.SliceStable(depositCntrs, func(i, j int) bool {
|
||||
return depositCntrs[i].Index < depositCntrs[j].Index
|
||||
})
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(depositCntrs))))
|
||||
|
||||
return depositCntrs
|
||||
}
|
||||
|
||||
// RemovePendingDeposit from the database. The deposit is indexed by the
|
||||
// Index. This method does nothing if deposit ptr is nil.
|
||||
func (dc *DepositCache) RemovePendingDeposit(ctx context.Context, d *ethpb.Deposit) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.RemovePendingDeposit")
|
||||
defer span.End()
|
||||
|
||||
if d == nil {
|
||||
log.Debug("Ignoring nil deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
depRoot, err := hashutil.HashProto(d)
|
||||
if err != nil {
|
||||
log.Errorf("Could not remove deposit %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
idx := -1
|
||||
for i, ctnr := range dc.pendingDeposits {
|
||||
hash, err := hashutil.HashProto(ctnr.Deposit)
|
||||
if err != nil {
|
||||
log.Errorf("Could not hash deposit %v", err)
|
||||
continue
|
||||
}
|
||||
if hash == depRoot {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if idx >= 0 {
|
||||
dc.pendingDeposits = append(dc.pendingDeposits[:idx], dc.pendingDeposits[idx+1:]...)
|
||||
pendingDepositsCount.Dec()
|
||||
}
|
||||
}
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
log.Debug("Ignoring 0 deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
var cleanDeposits []*dbpb.DepositContainer
|
||||
for _, dp := range dc.pendingDeposits {
|
||||
if dp.Index >= int64(merkleTreeIndex) {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
}
|
||||
}
|
||||
|
||||
dc.pendingDeposits = cleanDeposits
|
||||
pendingDepositsCount.Set(float64(len(dc.pendingDeposits)))
|
||||
}
|
||||
162
beacon-chain/cache/depositcache/pending_deposits_test.go
vendored
Normal file
162
beacon-chain/cache/depositcache/pending_deposits_test.go
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
package depositcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
)
|
||||
|
||||
var _ = PendingDepositsFetcher(&DepositCache{})
|
||||
|
||||
func TestInsertPendingDeposit_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.InsertPendingDeposit(context.Background(), ðpb.Deposit{}, 111, 100, [32]byte{})
|
||||
|
||||
if len(dc.pendingDeposits) != 1 {
|
||||
t.Error("Deposit not inserted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
|
||||
|
||||
if len(dc.pendingDeposits) > 0 {
|
||||
t.Error("Unexpected deposit insertion")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePendingDeposit_OK(t *testing.T) {
|
||||
db := DepositCache{}
|
||||
depToRemove := ðpb.Deposit{Proof: [][]byte{[]byte("A")}}
|
||||
otherDep := ðpb.Deposit{Proof: [][]byte{[]byte("B")}}
|
||||
db.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Deposit: depToRemove, Index: 1},
|
||||
{Deposit: otherDep, Index: 5},
|
||||
}
|
||||
db.RemovePendingDeposit(context.Background(), depToRemove)
|
||||
|
||||
if len(db.pendingDeposits) != 1 || !proto.Equal(db.pendingDeposits[0].Deposit, otherDep) {
|
||||
t.Error("Failed to remove deposit")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{{Deposit: ðpb.Deposit{}}}
|
||||
dc.RemovePendingDeposit(context.Background(), nil /*deposit*/)
|
||||
if len(dc.pendingDeposits) != 1 {
|
||||
t.Errorf("Deposit unexpectedly removed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPendingDeposit_RoundTrip(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dep := ðpb.Deposit{Proof: [][]byte{[]byte("A")}}
|
||||
dc.InsertPendingDeposit(context.Background(), dep, 111, 100, [32]byte{})
|
||||
dc.RemovePendingDeposit(context.Background(), dep)
|
||||
if len(dc.pendingDeposits) != 0 {
|
||||
t.Error("Failed to insert & delete a pending deposit")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("A")}}},
|
||||
{Eth1BlockHeight: 4, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("B")}}},
|
||||
{Eth1BlockHeight: 6, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("c")}}},
|
||||
}
|
||||
|
||||
deposits := dc.PendingDeposits(context.Background(), big.NewInt(4))
|
||||
expected := []*ethpb.Deposit{
|
||||
{Proof: [][]byte{[]byte("A")}},
|
||||
{Proof: [][]byte{[]byte("B")}},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(deposits, expected) {
|
||||
t.Errorf("Unexpected deposits. got=%+v want=%+v", deposits, expected)
|
||||
}
|
||||
|
||||
all := dc.PendingDeposits(context.Background(), nil)
|
||||
if len(all) != len(dc.pendingDeposits) {
|
||||
t.Error("PendingDeposits(ctx, nil) did not return all deposits")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 0)
|
||||
expected := []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
|
||||
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 6)
|
||||
expected := []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
|
||||
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
|
||||
}
|
||||
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 10)
|
||||
expected = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
|
||||
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
|
||||
}
|
||||
|
||||
}
|
||||
136
beacon-chain/cache/eth1_data.go
vendored
Normal file
136
beacon-chain/cache/eth1_data.go
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotEth1DataVote will be returned when a cache object is not a pointer to
|
||||
// a Eth1DataVote struct.
|
||||
ErrNotEth1DataVote = errors.New("object is not a eth1 data vote obj")
|
||||
|
||||
// maxEth1DataVoteSize defines the max number of eth1 data votes can cache.
|
||||
maxEth1DataVoteSize = 1000
|
||||
|
||||
// Metrics.
|
||||
eth1DataVoteCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "eth1_data_vote_cache_miss",
|
||||
Help: "The number of eth1 data vote count requests that aren't present in the cache.",
|
||||
})
|
||||
eth1DataVoteCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "eth1_data_vote_cache_hit",
|
||||
Help: "The number of eth1 data vote count requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// Eth1DataVote defines the struct which keeps track of the vote count of individual deposit root.
|
||||
type Eth1DataVote struct {
|
||||
Eth1DataHash [32]byte
|
||||
VoteCount uint64
|
||||
}
|
||||
|
||||
// Eth1DataVoteCache is a struct with 1 queue for looking up eth1 data vote count by deposit root.
|
||||
type Eth1DataVoteCache struct {
|
||||
eth1DataVoteCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// eth1DataVoteKeyFn takes the eth1data hash as the key for the eth1 data vote count of a given eth1data object.
|
||||
func eth1DataVoteKeyFn(obj interface{}) (string, error) {
|
||||
eInfo, ok := obj.(*Eth1DataVote)
|
||||
if !ok {
|
||||
return "", ErrNotEth1DataVote
|
||||
}
|
||||
|
||||
return string(eInfo.Eth1DataHash[:]), nil
|
||||
}
|
||||
|
||||
// NewEth1DataVoteCache creates a new eth1 data vote count cache for storing/accessing Eth1DataVote.
|
||||
func NewEth1DataVoteCache() *Eth1DataVoteCache {
|
||||
return &Eth1DataVoteCache{
|
||||
eth1DataVoteCache: cache.NewFIFO(eth1DataVoteKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// Eth1DataVote fetches eth1 data vote count by the eth1data hash. Returns vote count,
|
||||
// if exists. Otherwise returns false, nil.
|
||||
func (c *Eth1DataVoteCache) Eth1DataVote(eth1DataHash [32]byte) (uint64, error) {
|
||||
if !featureconfig.Get().EnableEth1DataVoteCache {
|
||||
// Return a miss result if cache is not enabled.
|
||||
eth1DataVoteCacheMiss.Inc()
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.eth1DataVoteCache.GetByKey(string(eth1DataHash[:]))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
eth1DataVoteCacheHit.Inc()
|
||||
} else {
|
||||
eth1DataVoteCacheMiss.Inc()
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
eInfo, ok := obj.(*Eth1DataVote)
|
||||
if !ok {
|
||||
return 0, ErrNotEth1DataVote
|
||||
}
|
||||
|
||||
return eInfo.VoteCount, nil
|
||||
}
|
||||
|
||||
// AddEth1DataVote adds eth1 data vote object to the cache. This method also trims the least
|
||||
// recently added Eth1DataVoteByEpoch object if the cache size has ready the max cache size limit.
|
||||
func (c *Eth1DataVoteCache) AddEth1DataVote(eth1DataVote *Eth1DataVote) error {
|
||||
if !featureconfig.Get().EnableEth1DataVoteCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.eth1DataVoteCache.Add(eth1DataVote); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(c.eth1DataVoteCache, maxEth1DataVoteSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// IncrementEth1DataVote increments the existing eth1 data object's vote count by 1,
|
||||
// and returns the vote count.
|
||||
func (c *Eth1DataVoteCache) IncrementEth1DataVote(eth1DataHash [32]byte) (uint64, error) {
|
||||
if !featureconfig.Get().EnableEth1DataVoteCache {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.eth1DataVoteCache.GetByKey(string(eth1DataHash[:]))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !exists {
|
||||
return 0, errors.New("eth1 data vote object does not exist")
|
||||
}
|
||||
|
||||
eth1DataVoteCacheHit.Inc()
|
||||
|
||||
eInfo, _ := obj.(*Eth1DataVote)
|
||||
eInfo.VoteCount++
|
||||
|
||||
if err := c.eth1DataVoteCache.Add(eInfo); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return eInfo.VoteCount, nil
|
||||
}
|
||||
110
beacon-chain/cache/eth1_data_test.go
vendored
Normal file
110
beacon-chain/cache/eth1_data_test.go
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEth1DataVoteKeyFn_OK(t *testing.T) {
|
||||
eInfo := &Eth1DataVote{
|
||||
VoteCount: 44,
|
||||
Eth1DataHash: [32]byte{'A'},
|
||||
}
|
||||
|
||||
key, err := eth1DataVoteKeyFn(eInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != string(eInfo.Eth1DataHash[:]) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, string(eInfo.Eth1DataHash[:]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEth1DataVoteKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := eth1DataVoteKeyFn("bad")
|
||||
if err != ErrNotEth1DataVote {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotEth1DataVote, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEth1DataVoteCache_CanAdd(t *testing.T) {
|
||||
cache := NewEth1DataVoteCache()
|
||||
|
||||
eInfo := &Eth1DataVote{
|
||||
VoteCount: 55,
|
||||
Eth1DataHash: [32]byte{'B'},
|
||||
}
|
||||
count, err := cache.Eth1DataVote(eInfo.Eth1DataHash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 0 {
|
||||
t.Error("Expected seed not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddEth1DataVote(eInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
count, err = cache.Eth1DataVote(eInfo.Eth1DataHash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != eInfo.VoteCount {
|
||||
t.Errorf(
|
||||
"Expected vote count to be %d, got %d",
|
||||
eInfo.VoteCount,
|
||||
count,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEth1DataVoteCache_CanIncrement(t *testing.T) {
|
||||
cache := NewEth1DataVoteCache()
|
||||
|
||||
eInfo := &Eth1DataVote{
|
||||
VoteCount: 55,
|
||||
Eth1DataHash: [32]byte{'B'},
|
||||
}
|
||||
|
||||
if err := cache.AddEth1DataVote(eInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err := cache.IncrementEth1DataVote(eInfo.Eth1DataHash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, _ = cache.IncrementEth1DataVote(eInfo.Eth1DataHash)
|
||||
count, _ := cache.IncrementEth1DataVote(eInfo.Eth1DataHash)
|
||||
|
||||
if count != 58 {
|
||||
t.Errorf(
|
||||
"Expected vote count to be %d, got %d",
|
||||
58,
|
||||
count,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEth1Data_MaxSize(t *testing.T) {
|
||||
cache := NewEth1DataVoteCache()
|
||||
|
||||
for i := 0; i < maxEth1DataVoteSize+1; i++ {
|
||||
var hash [32]byte
|
||||
copy(hash[:], []byte(strconv.Itoa(i)))
|
||||
eInfo := &Eth1DataVote{
|
||||
Eth1DataHash: hash,
|
||||
}
|
||||
if err := cache.AddEth1DataVote(eInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.eth1DataVoteCache.ListKeys()) != maxEth1DataVoteSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxEth1DataVoteSize,
|
||||
len(cache.eth1DataVoteCache.ListKeys()),
|
||||
)
|
||||
}
|
||||
}
|
||||
11
beacon-chain/cache/feature_flag_test.go
vendored
Normal file
11
beacon-chain/cache/feature_flag_test.go
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
package cache
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
|
||||
func init() {
|
||||
featureconfig.Init(&featureconfig.Flags{
|
||||
EnableAttestationCache: true,
|
||||
EnableEth1DataVoteCache: true,
|
||||
EnableShuffledIndexCache: true,
|
||||
})
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["main.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/chaintest",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//beacon-chain/chaintest/backend:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"@com_github_go_yaml_yaml//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "chaintest",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["yaml_test.go"],
|
||||
data = glob(["tests/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/chaintest/backend:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,238 +0,0 @@
|
||||
# Ethereum 2.0 E2E Test Suite
|
||||
|
||||
This is a test-suite for conformity end-2-end tests for Prysm's implementation of the Ethereum 2.0 specification. Implementation teams have decided to utilize YAML as a general conformity test format for the current beacon chain's runtime functionality.
|
||||
|
||||
The test suite opts for YAML due to wide language support and support for inline comments.
|
||||
|
||||
# Testing Format
|
||||
|
||||
The testing format follows the official ETH2.0 Specification created [here](https://github.com/ethereum/eth2.0-specs/blob/master/specs/test-format.md)
|
||||
|
||||
## Stateful Tests
|
||||
|
||||
Chain tests check for conformity of a certain client to the beacon chain specification for items such as the fork choice rule and Casper FFG validator rewards & penalties. Stateful tests need to specify a certain configuration of a beacon chain, with items such as the number validators, in the YAML file. Sample tests will all required fields are shown below.
|
||||
|
||||
### State Transition
|
||||
|
||||
The most important use case for this test format is to verify the ins and outs of the Ethereum Phase 0 Beacon Chain state advancement. The specification details very strict guidelines for blocks to successfully trigger a state transition, including items such as Casper Proof of Stake slashing conditions of validators, pseudorandomness in the form of RANDAO, and attestation on shard blocks being processed all inside each incoming beacon block. The YAML configuration for this test type allows for configuring a state transition run over N slots, triggering slashing conditions, processing deposits of new validators, and more.
|
||||
|
||||
An example state transition test for testing slot and block processing will look as follows:
|
||||
|
||||
```yaml
|
||||
title: Sample Ethereum Serenity State Transition Tests
|
||||
summary: Testing full state transition block processing
|
||||
test_suite: prysm
|
||||
fork: sapphire
|
||||
version: 1.0
|
||||
test_cases:
|
||||
- config:
|
||||
epoch_length: 64
|
||||
deposits_for_chain_start: 1000
|
||||
num_slots: 32 # Testing advancing state to slot < SlotsPerEpoch
|
||||
results:
|
||||
slot: 32
|
||||
num_validators: 1000
|
||||
- config:
|
||||
epoch_length: 64
|
||||
deposits_for_chain_start: 16384
|
||||
num_slots: 64
|
||||
deposits:
|
||||
- slot: 1
|
||||
amount: 32
|
||||
merkle_index: 0
|
||||
pubkey: !!binary |
|
||||
SlAAbShSkUg7PLiPHZI/rTS1uAvKiieOrifPN6Moso0=
|
||||
- slot: 15
|
||||
amount: 32
|
||||
merkle_index: 1
|
||||
pubkey: !!binary |
|
||||
Oklajsjdkaklsdlkajsdjlajslkdjlkasjlkdjlajdsd
|
||||
- slot: 55
|
||||
amount: 32
|
||||
merkle_index: 2
|
||||
pubkey: !!binary |
|
||||
LkmqmqoodLKAslkjdkajsdljasdkajlksjdasldjasdd
|
||||
proposer_slashings:
|
||||
- slot: 16 # At slot 16, we trigger a proposal slashing occurring
|
||||
proposer_index: 16385 # We penalize the proposer that was just added from slot 15
|
||||
proposal_1_shard: 0
|
||||
proposal_1_slot: 15
|
||||
proposal_1_root: !!binary |
|
||||
LkmqmqoodLKAslkjdkajsdljasdkajlksjdasldjasdd
|
||||
proposal_2_shard: 0
|
||||
proposal_2_slot: 15
|
||||
proposal_2_root: !!binary |
|
||||
LkmqmqoodLKAslkjdkajsdljasdkajlksjdasldjasdd
|
||||
attester_slashings:
|
||||
- slot: 59 # At slot 59, we trigger a attester slashing
|
||||
slashable_vote_data_1_slot: 55
|
||||
slashable_vote_data_2_slot: 55
|
||||
slashable_vote_data_1_justified_slot: 0
|
||||
slashable_vote_data_2_justified_slot: 1
|
||||
slashable_vote_data_1_custody_0_indices: [16386]
|
||||
slashable_vote_data_1_custody_1_indices: []
|
||||
slashable_vote_data_2_custody_0_indices: []
|
||||
slashable_vote_data_2_custody_1_indices: [16386]
|
||||
results:
|
||||
slot: 64
|
||||
num_validators: 16387
|
||||
penalized_validators: [16385, 16386] # We test that the validators at indices 16385, 16386 were indeed penalized
|
||||
- config:
|
||||
skip_slots: [10, 20]
|
||||
epoch_length: 64
|
||||
deposits_for_chain_start: 1000
|
||||
num_slots: 128 # Testing advancing state's slot == 2*SlotsPerEpoch
|
||||
deposits:
|
||||
- slot: 10
|
||||
amount: 32
|
||||
merkle_index: 0
|
||||
pubkey: !!binary |
|
||||
SlAAbShSkUg7PLiPHZI/rTS1uAvKiieOrifPN6Moso0=
|
||||
- slot: 20
|
||||
amount: 32
|
||||
merkle_index: 1
|
||||
pubkey: !!binary |
|
||||
Oklajsjdkaklsdlkajsdjlajslkdjlkasjlkdjlajdsd
|
||||
results:
|
||||
slot: 128
|
||||
num_validators: 1000 # Validator registry should not have grown if slots 10 and 20 were skipped
|
||||
```
|
||||
|
||||
#### Test Configuration Options
|
||||
|
||||
The following configuration options are available for state transition tests:
|
||||
|
||||
**Config**
|
||||
|
||||
- **skip_slots**: `[int]` determines which slot numbers to simulate a proposer not submitting a block in the state transition TODO
|
||||
- **epoch_length**: `int` the number of slots in an epoch
|
||||
- **deposits_for_chain_start**: `int` the number of eth deposits needed for the beacon chain to initialize (this simulates an initial validator registry based on this number in the test)
|
||||
- **num_slots**: `int` the number of times we run a state transition in the test
|
||||
- **deposits**: `[Deposit Config]` trigger a new validator deposit into the beacon state based on configuration options
|
||||
- **proposer_slashings**: `[Proposer Slashing Config]` trigger a proposer slashing at a certain slot for a certain proposer index
|
||||
- **attester_slashings**: `[Casper Slashing Config]` trigger a attester slashing at a certain slot
|
||||
- **validator_exits**: `[Validator Exit Config]` trigger a voluntary validator exit at a certain slot for a validator index
|
||||
|
||||
**Deposit Config**
|
||||
|
||||
- **slot**: `int` a slot in which to trigger a deposit during a state transition test
|
||||
- **amount**: `int` the ETH deposit amount to trigger
|
||||
- **merkle_index**: `int` the index of the deposit in the validator deposit contract's Merkle trie
|
||||
- **pubkey**: `!!binary` the public key of the validator in the triggered deposit object
|
||||
|
||||
**Proposer Slashing Config**
|
||||
|
||||
- **slot**: `int` a slot in which to trigger a proposer slashing during a state transition test
|
||||
- **proposer_index**: `int` the proposer to penalize
|
||||
- **proposal_1_shard**: `int` the first proposal data's shard id
|
||||
- **proposal_1_slot**: `int` the first proposal data's slot
|
||||
- **proposal_1_root**: `!!binary` the second proposal data's block root
|
||||
- **proposal_2_shard**: `int` the second proposal data's shard id
|
||||
- **proposal_2_slot**: `int` the second proposal data's slot
|
||||
- **proposal_2_root**: `!!binary` the second proposal data's block root
|
||||
|
||||
**Casper Slashing Config**
|
||||
|
||||
- **slot**: `int` a slot in which to trigger a attester slashing during a state transition test
|
||||
- **slashable_vote_data_1_slot**: `int` the slot of the attestation data of slashableVoteData1
|
||||
- **slashable_vote_data_2_slot**: `int` the slot of the attestation data of slashableVoteData2
|
||||
- **slashable_vote_data_1_justified_slot**: `int` the justified slot of the attestation data of slashableVoteData1
|
||||
- **slashable_vote_data_2_justified_slot**: `int` the justified slot of the attestation data of slashableVoteData2
|
||||
- **slashable_vote_data_1_custody_0_indices**: `[int]` the custody indices 0 for slashableVoteData1
|
||||
- **slashable_vote_data_1_custody_1_indices**: `[int]` the custody indices 1 for slashableVoteData1
|
||||
- **slashable_vote_data_2_custody_0_indices**: `[int]` the custody indices 0 for slashableVoteData2
|
||||
- **slashable_vote_data_2_custody_1_indices**: `[int]` the custody indices 1 for slashableVoteData2
|
||||
|
||||
**Validator Exit Config**
|
||||
|
||||
- **slot**: `int` the slot at which a validator wants to voluntarily exit the validator registry
|
||||
- **validator_index**: `int` the index of the validator in the registry that is exiting
|
||||
|
||||
#### Test Results
|
||||
|
||||
The following are **mandatory** fields as they correspond to checks done at the end of the test run.
|
||||
|
||||
- **slot**: `int` check the slot of the state resulting from applying N state transitions in the test
|
||||
- **num_validators** `[int]` check the number of validators in the validator registry after applying N state transitions
|
||||
- **penalized_validators** `[int]` the list of validator indices we verify were penalized during the test
|
||||
- **exited_validators**: `[int]` the list of validator indices we verify voluntarily exited the registry during the test
|
||||
|
||||
## Stateless Tests
|
||||
|
||||
Stateless tests represent simple unit test definitions for important invariants in the ETH2.0 runtime. In particular, these test conformity across clients with respect to items such as Simple Serialize (SSZ), Signature Aggregation (BLS), and Validator Shuffling
|
||||
|
||||
**Simple Serialize**
|
||||
|
||||
TODO
|
||||
|
||||
**Signature Aggregation**
|
||||
|
||||
TODO
|
||||
|
||||
**Validator Shuffling**
|
||||
|
||||
```yaml
|
||||
title: Shuffling Algorithm Tests
|
||||
summary: Test vectors for shuffling a list based upon a seed using `shuffle`
|
||||
test_suite: shuffle
|
||||
fork: tchaikovsky
|
||||
version: 1.0
|
||||
|
||||
test_cases:
|
||||
- input: []
|
||||
output: []
|
||||
seed: !!binary ""
|
||||
- name: boring_list
|
||||
description: List with a single element, 0
|
||||
input: [0]
|
||||
output: [0]
|
||||
seed: !!binary ""
|
||||
- input: [255]
|
||||
output: [255]
|
||||
seed: !!binary ""
|
||||
- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5]
|
||||
output: [1, 6, 4, 1, 6, 6, 2, 2, 4, 5]
|
||||
seed: !!binary ""
|
||||
- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
|
||||
output: [4, 7, 10, 13, 3, 1, 2, 9, 12, 6, 11, 8, 5]
|
||||
seed: !!binary ""
|
||||
- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5]
|
||||
output: [6, 65, 2, 5, 4, 2, 6, 6, 1, 1]
|
||||
seed: !!binary |
|
||||
JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0=
|
||||
```
|
||||
|
||||
# Using the Runner
|
||||
|
||||
First, create a directory containing the YAML files you wish to test (or use the default `./sampletests` directory included with Prysm).
|
||||
Then, make sure you have the following folder structure for the directory:
|
||||
|
||||
```
|
||||
yourtestdir/
|
||||
fork-choice-tests/
|
||||
*.yaml
|
||||
...
|
||||
shuffle-tests/
|
||||
*.yaml
|
||||
...
|
||||
state-tests/
|
||||
*.yaml
|
||||
...
|
||||
```
|
||||
|
||||
Then, navigate to the test runner's directory and use the go tool as follows:
|
||||
|
||||
```bash
|
||||
go run main.go -tests-dir /path/to/your/testsdir
|
||||
```
|
||||
|
||||
The runner will then start up a simulated backend and run all your specified YAML tests.
|
||||
|
||||
```bash
|
||||
[2018-11-06 15:01:44] INFO ----Running Chain Tests----
|
||||
[2018-11-06 15:01:44] INFO Running 4 YAML Tests
|
||||
[2018-11-06 15:01:44] INFO Title: Sample Ethereum 2.0 Beacon Chain Test
|
||||
[2018-11-06 15:01:44] INFO Summary: Basic, functioning fork choice rule for Ethereum 2.0
|
||||
[2018-11-06 15:01:44] INFO Test Suite: prysm
|
||||
[2018-11-06 15:01:44] INFO Test Runs Finished In: 0.000643545 Seconds
|
||||
```
|
||||
@@ -1,43 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"fork_choice_test_format.go",
|
||||
"helpers.go",
|
||||
"shuffle_test_format.go",
|
||||
"simulated_backend.go",
|
||||
"state_test_format.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/chaintest/backend",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/utils:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/forkutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["simulated_backend_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,51 +0,0 @@
|
||||
package backend
|
||||
|
||||
// ForkChoiceTest --
|
||||
type ForkChoiceTest struct {
|
||||
Title string
|
||||
Summary string
|
||||
TestSuite string `yaml:"test_suite"`
|
||||
TestCases []*ForkChoiceTestCase `yaml:"test_cases"`
|
||||
}
|
||||
|
||||
// ForkChoiceTestCase --
|
||||
type ForkChoiceTestCase struct {
|
||||
Config *ForkChoiceTestConfig `yaml:"config"`
|
||||
Slots []*ForkChoiceTestSlot `yaml:"slots,flow"`
|
||||
Results *ForkChoiceTestResult `yaml:"results"`
|
||||
}
|
||||
|
||||
// ForkChoiceTestConfig --
|
||||
type ForkChoiceTestConfig struct {
|
||||
ValidatorCount uint64 `yaml:"validator_count"`
|
||||
CycleLength uint64 `yaml:"cycle_length"`
|
||||
ShardCount uint64 `yaml:"shard_count"`
|
||||
MinCommitteeSize uint64 `yaml:"min_committee_size"`
|
||||
}
|
||||
|
||||
// ForkChoiceTestSlot --
|
||||
type ForkChoiceTestSlot struct {
|
||||
SlotNumber uint64 `yaml:"slot_number"`
|
||||
NewBlock *TestBlock `yaml:"new_block"`
|
||||
Attestations []*TestAttestation `yaml:",flow"`
|
||||
}
|
||||
|
||||
// ForkChoiceTestResult --
|
||||
type ForkChoiceTestResult struct {
|
||||
Head string
|
||||
LastJustifiedBlock string `yaml:"last_justified_block"`
|
||||
LastFinalizedBlock string `yaml:"last_finalized_block"`
|
||||
}
|
||||
|
||||
// TestBlock --
|
||||
type TestBlock struct {
|
||||
ID string `yaml:"ID"`
|
||||
Parent string `yaml:"parent"`
|
||||
}
|
||||
|
||||
// TestAttestation --
|
||||
type TestAttestation struct {
|
||||
Block string `yaml:"block"`
|
||||
ValidatorRegistry string `yaml:"validators"`
|
||||
CommitteeSlot uint64 `yaml:"committee_slot"`
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/forkutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/trieutil"
|
||||
)
|
||||
|
||||
// Generates a simulated beacon block to use
|
||||
// in the next state transition given the current state,
|
||||
// the previous beacon block, and previous beacon block root.
|
||||
func generateSimulatedBlock(
|
||||
beaconState *pb.BeaconState,
|
||||
prevBlockRoot [32]byte,
|
||||
historicalDeposits []*pb.Deposit,
|
||||
simObjects *SimulatedObjects,
|
||||
privKeys []*bls.SecretKey,
|
||||
) (*pb.BeaconBlock, [32]byte, error) {
|
||||
stateRoot, err := hashutil.HashProto(beaconState)
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, fmt.Errorf("could not tree hash state: %v", err)
|
||||
}
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(beaconState, beaconState.Slot+1)
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, err
|
||||
}
|
||||
epoch := helpers.SlotToEpoch(beaconState.Slot + 1)
|
||||
buf := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(buf, epoch)
|
||||
domain := forkutil.DomainVersion(beaconState.Fork, epoch, params.BeaconConfig().DomainRandao)
|
||||
// We make the previous validator's index sign the message instead of the proposer.
|
||||
epochSignature := privKeys[proposerIdx].Sign(buf, domain)
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: beaconState.Slot + 1,
|
||||
RandaoReveal: epochSignature.Marshal(),
|
||||
ParentRootHash32: prevBlockRoot[:],
|
||||
StateRootHash32: stateRoot[:],
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: []byte{1},
|
||||
BlockHash32: []byte{2},
|
||||
},
|
||||
Body: &pb.BeaconBlockBody{
|
||||
ProposerSlashings: []*pb.ProposerSlashing{},
|
||||
AttesterSlashings: []*pb.AttesterSlashing{},
|
||||
Attestations: []*pb.Attestation{},
|
||||
Deposits: []*pb.Deposit{},
|
||||
VoluntaryExits: []*pb.VoluntaryExit{},
|
||||
},
|
||||
}
|
||||
if simObjects.simDeposit != nil {
|
||||
depositInput := &pb.DepositInput{
|
||||
Pubkey: []byte(simObjects.simDeposit.Pubkey),
|
||||
WithdrawalCredentialsHash32: make([]byte, 32),
|
||||
ProofOfPossession: make([]byte, 96),
|
||||
}
|
||||
|
||||
data, err := helpers.EncodeDepositData(depositInput, simObjects.simDeposit.Amount, time.Now().Unix())
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, fmt.Errorf("could not encode deposit data: %v", err)
|
||||
}
|
||||
|
||||
// We then update the deposits Merkle trie with the deposit data and return
|
||||
// its Merkle branch leading up to the root of the trie.
|
||||
historicalDepositData := make([][]byte, len(historicalDeposits))
|
||||
for i := range historicalDeposits {
|
||||
historicalDepositData[i] = historicalDeposits[i].DepositData
|
||||
}
|
||||
newTrie, err := trieutil.GenerateTrieFromItems(append(historicalDepositData, data), int(params.BeaconConfig().DepositContractTreeDepth))
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, fmt.Errorf("could not regenerate trie: %v", err)
|
||||
}
|
||||
proof, err := newTrie.MerkleProof(int(simObjects.simDeposit.MerkleIndex))
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, fmt.Errorf("could not generate proof: %v", err)
|
||||
}
|
||||
|
||||
root := newTrie.Root()
|
||||
block.Eth1Data.DepositRootHash32 = root[:]
|
||||
block.Body.Deposits = append(block.Body.Deposits, &pb.Deposit{
|
||||
DepositData: data,
|
||||
MerkleProofHash32S: proof,
|
||||
MerkleTreeIndex: simObjects.simDeposit.MerkleIndex,
|
||||
})
|
||||
}
|
||||
if simObjects.simProposerSlashing != nil {
|
||||
block.Body.ProposerSlashings = append(block.Body.ProposerSlashings, &pb.ProposerSlashing{
|
||||
ProposerIndex: simObjects.simProposerSlashing.ProposerIndex,
|
||||
ProposalData_1: &pb.ProposalSignedData{
|
||||
Slot: simObjects.simProposerSlashing.Proposal1Slot,
|
||||
Shard: simObjects.simProposerSlashing.Proposal1Shard,
|
||||
BlockRootHash32: []byte(simObjects.simProposerSlashing.Proposal1Root),
|
||||
},
|
||||
ProposalData_2: &pb.ProposalSignedData{
|
||||
Slot: simObjects.simProposerSlashing.Proposal2Slot,
|
||||
Shard: simObjects.simProposerSlashing.Proposal2Shard,
|
||||
BlockRootHash32: []byte(simObjects.simProposerSlashing.Proposal2Root),
|
||||
},
|
||||
})
|
||||
}
|
||||
if simObjects.simAttesterSlashing != nil {
|
||||
block.Body.AttesterSlashings = append(block.Body.AttesterSlashings, &pb.AttesterSlashing{
|
||||
SlashableAttestation_1: &pb.SlashableAttestation{
|
||||
Data: &pb.AttestationData{
|
||||
Slot: simObjects.simAttesterSlashing.SlashableAttestation1Slot,
|
||||
JustifiedEpoch: simObjects.simAttesterSlashing.SlashableAttestation1JustifiedEpoch,
|
||||
},
|
||||
CustodyBitfield: []byte(simObjects.simAttesterSlashing.SlashableAttestation1CustodyBitField),
|
||||
ValidatorIndices: simObjects.simAttesterSlashing.SlashableAttestation1ValidatorIndices,
|
||||
},
|
||||
SlashableAttestation_2: &pb.SlashableAttestation{
|
||||
Data: &pb.AttestationData{
|
||||
Slot: simObjects.simAttesterSlashing.SlashableAttestation2Slot,
|
||||
JustifiedEpoch: simObjects.simAttesterSlashing.SlashableAttestation2JustifiedEpoch,
|
||||
},
|
||||
CustodyBitfield: []byte(simObjects.simAttesterSlashing.SlashableAttestation2CustodyBitField),
|
||||
ValidatorIndices: simObjects.simAttesterSlashing.SlashableAttestation2ValidatorIndices,
|
||||
},
|
||||
})
|
||||
}
|
||||
if simObjects.simValidatorExit != nil {
|
||||
block.Body.VoluntaryExits = append(block.Body.VoluntaryExits, &pb.VoluntaryExit{
|
||||
Epoch: simObjects.simValidatorExit.Epoch,
|
||||
ValidatorIndex: simObjects.simValidatorExit.ValidatorIndex,
|
||||
})
|
||||
}
|
||||
blockRoot, err := hashutil.HashBeaconBlock(block)
|
||||
if err != nil {
|
||||
return nil, [32]byte{}, fmt.Errorf("could not tree hash new block: %v", err)
|
||||
}
|
||||
return block, blockRoot, nil
|
||||
}
|
||||
|
||||
// generateInitialSimulatedDeposits generates initial deposits for creating a beacon state in the simulated
|
||||
// backend based on the yaml configuration.
|
||||
func generateInitialSimulatedDeposits(numDeposits uint64) ([]*pb.Deposit, []*bls.SecretKey, error) {
|
||||
genesisTime := time.Date(2018, 9, 0, 0, 0, 0, 0, time.UTC).Unix()
|
||||
deposits := make([]*pb.Deposit, numDeposits)
|
||||
privKeys := make([]*bls.SecretKey, numDeposits)
|
||||
for i := 0; i < len(deposits); i++ {
|
||||
priv, err := bls.RandKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not initialize key: %v", err)
|
||||
}
|
||||
depositInput := &pb.DepositInput{
|
||||
Pubkey: priv.PublicKey().Marshal(),
|
||||
WithdrawalCredentialsHash32: make([]byte, 32),
|
||||
ProofOfPossession: make([]byte, 96),
|
||||
}
|
||||
depositData, err := helpers.EncodeDepositData(
|
||||
depositInput,
|
||||
params.BeaconConfig().MaxDepositAmount,
|
||||
genesisTime,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not encode genesis block deposits: %v", err)
|
||||
}
|
||||
deposits[i] = &pb.Deposit{DepositData: depositData, MerkleTreeIndex: uint64(i)}
|
||||
privKeys[i] = priv
|
||||
}
|
||||
return deposits, privKeys, nil
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package backend
|
||||
|
||||
// ShuffleTest --
|
||||
type ShuffleTest struct {
|
||||
Title string `yaml:"title"`
|
||||
Summary string `yaml:"summary"`
|
||||
TestSuite string `yaml:"test_suite"`
|
||||
Fork string `yaml:"fork"`
|
||||
Version string `yaml:"version"`
|
||||
TestCases []*ShuffleTestCase `yaml:"test_cases"`
|
||||
}
|
||||
|
||||
// ShuffleTestCase --
|
||||
type ShuffleTestCase struct {
|
||||
Input []uint64 `yaml:"input,flow"`
|
||||
Output []uint64 `yaml:"output,flow"`
|
||||
Seed string
|
||||
}
|
||||
@@ -1,393 +0,0 @@
|
||||
// Package backend contains utilities for simulating an entire
|
||||
// ETH 2.0 beacon chain for e2e tests and benchmarking
|
||||
// purposes.
|
||||
package backend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/utils"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SimulatedBackend allowing for a programmatic advancement
|
||||
// of an in-memory beacon chain for client test runs
|
||||
// and other e2e use cases.
|
||||
type SimulatedBackend struct {
|
||||
chainService *blockchain.ChainService
|
||||
beaconDB *db.BeaconDB
|
||||
state *pb.BeaconState
|
||||
prevBlockRoots [][32]byte
|
||||
inMemoryBlocks []*pb.BeaconBlock
|
||||
historicalDeposits []*pb.Deposit
|
||||
}
|
||||
|
||||
// SimulatedObjects is a container to hold the
|
||||
// required primitives for generation of a beacon
|
||||
// block.
|
||||
type SimulatedObjects struct {
|
||||
simDeposit *StateTestDeposit
|
||||
simProposerSlashing *StateTestProposerSlashing
|
||||
simAttesterSlashing *StateTestAttesterSlashing
|
||||
simValidatorExit *StateTestValidatorExit
|
||||
}
|
||||
|
||||
// NewSimulatedBackend creates an instance by initializing a chain service
|
||||
// utilizing a mockDB which will act according to test run parameters specified
|
||||
// in the common ETH 2.0 client test YAML format.
|
||||
func NewSimulatedBackend() (*SimulatedBackend, error) {
|
||||
db, err := db.SetupDB()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not setup simulated backend db: %v", err)
|
||||
}
|
||||
cs, err := blockchain.NewChainService(context.Background(), &blockchain.Config{
|
||||
BeaconDB: db,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SimulatedBackend{
|
||||
chainService: cs,
|
||||
beaconDB: db,
|
||||
inMemoryBlocks: make([]*pb.BeaconBlock, 0),
|
||||
historicalDeposits: make([]*pb.Deposit, 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SetupBackend sets up the simulated backend with simulated deposits, and initializes the
|
||||
// state and genesis block.
|
||||
func (sb *SimulatedBackend) SetupBackend(numOfDeposits uint64) ([]*bls.SecretKey, error) {
|
||||
initialDeposits, privKeys, err := generateInitialSimulatedDeposits(numOfDeposits)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not simulate initial validator deposits: %v", err)
|
||||
}
|
||||
if err := sb.setupBeaconStateAndGenesisBlock(initialDeposits); err != nil {
|
||||
return nil, fmt.Errorf("could not set up beacon state and initialize genesis block %v", err)
|
||||
}
|
||||
return privKeys, nil
|
||||
}
|
||||
|
||||
// DB returns the underlying db instance in the simulated
|
||||
// backend.
|
||||
func (sb *SimulatedBackend) DB() *db.BeaconDB {
|
||||
return sb.beaconDB
|
||||
}
|
||||
|
||||
// GenerateBlockAndAdvanceChain generates a simulated block and runs that block though
|
||||
// state transition.
|
||||
func (sb *SimulatedBackend) GenerateBlockAndAdvanceChain(objects *SimulatedObjects, privKeys []*bls.SecretKey) error {
|
||||
prevBlockRoot := sb.prevBlockRoots[len(sb.prevBlockRoots)-1]
|
||||
// We generate a new block to pass into the state transition.
|
||||
newBlock, newBlockRoot, err := generateSimulatedBlock(
|
||||
sb.state,
|
||||
prevBlockRoot,
|
||||
sb.historicalDeposits,
|
||||
objects,
|
||||
privKeys,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not generate simulated beacon block %v", err)
|
||||
}
|
||||
newState := sb.state
|
||||
newState.LatestEth1Data = newBlock.Eth1Data
|
||||
newState, err = state.ExecuteStateTransition(
|
||||
context.Background(),
|
||||
sb.state,
|
||||
newBlock,
|
||||
prevBlockRoot,
|
||||
state.DefaultConfig(),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not execute state transition: %v", err)
|
||||
}
|
||||
|
||||
sb.state = newState
|
||||
sb.prevBlockRoots = append(sb.prevBlockRoots, newBlockRoot)
|
||||
sb.inMemoryBlocks = append(sb.inMemoryBlocks, newBlock)
|
||||
if len(newBlock.Body.Deposits) > 0 {
|
||||
sb.historicalDeposits = append(sb.historicalDeposits, newBlock.Body.Deposits...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateNilBlockAndAdvanceChain would trigger a state transition with a nil block.
|
||||
func (sb *SimulatedBackend) GenerateNilBlockAndAdvanceChain() error {
|
||||
prevBlockRoot := sb.prevBlockRoots[len(sb.prevBlockRoots)-1]
|
||||
newState, err := state.ExecuteStateTransition(
|
||||
context.Background(),
|
||||
sb.state,
|
||||
nil,
|
||||
prevBlockRoot,
|
||||
state.DefaultConfig(),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not execute state transition: %v", err)
|
||||
}
|
||||
sb.state = newState
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown closes the db associated with the simulated backend.
|
||||
func (sb *SimulatedBackend) Shutdown() error {
|
||||
return sb.beaconDB.Close()
|
||||
}
|
||||
|
||||
// State is a getter to return the current beacon state
|
||||
// of the backend.
|
||||
func (sb *SimulatedBackend) State() *pb.BeaconState {
|
||||
return sb.state
|
||||
}
|
||||
|
||||
// InMemoryBlocks returns the blocks that have been processed by the simulated
|
||||
// backend.
|
||||
func (sb *SimulatedBackend) InMemoryBlocks() []*pb.BeaconBlock {
|
||||
return sb.inMemoryBlocks
|
||||
}
|
||||
|
||||
// RunForkChoiceTest uses a parsed set of chaintests from a YAML file
|
||||
// according to the ETH 2.0 client chain test specification and runs them
|
||||
// against the simulated backend.
|
||||
func (sb *SimulatedBackend) RunForkChoiceTest(testCase *ForkChoiceTestCase) error {
|
||||
defer db.TeardownDB(sb.beaconDB)
|
||||
// Utilize the config parameters in the test case to setup
|
||||
// the DB and set global config parameters accordingly.
|
||||
// Config parameters include: ValidatorCount, ShardCount,
|
||||
// CycleLength, MinCommitteeSize, and more based on the YAML
|
||||
// test language specification.
|
||||
c := params.BeaconConfig()
|
||||
c.ShardCount = testCase.Config.ShardCount
|
||||
c.SlotsPerEpoch = testCase.Config.CycleLength
|
||||
c.TargetCommitteeSize = testCase.Config.MinCommitteeSize
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
// Then, we create the validators based on the custom test config.
|
||||
validators := make([]*pb.Validator, testCase.Config.ValidatorCount)
|
||||
for i := uint64(0); i < testCase.Config.ValidatorCount; i++ {
|
||||
validators[i] = &pb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().ActivationExitDelay,
|
||||
Pubkey: []byte{},
|
||||
}
|
||||
}
|
||||
// TODO(#718): Next step is to update and save the blocks specified
|
||||
// in the case case into the DB.
|
||||
//
|
||||
// Then, we call the updateHead routine and confirm the
|
||||
// chain's head is the expected result from the test case.
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunShuffleTest uses validator set specified from a YAML file, runs the validator shuffle
|
||||
// algorithm, then compare the output with the expected output from the YAML file.
|
||||
func (sb *SimulatedBackend) RunShuffleTest(testCase *ShuffleTestCase) error {
|
||||
defer db.TeardownDB(sb.beaconDB)
|
||||
seed := common.BytesToHash([]byte(testCase.Seed))
|
||||
output, err := utils.ShuffleIndices(seed, testCase.Input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !reflect.DeepEqual(output, testCase.Output) {
|
||||
return fmt.Errorf("shuffle result error: expected %v, actual %v", testCase.Output, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunStateTransitionTest advances a beacon chain state transition an N amount of
|
||||
// slots from a genesis state, with a block being processed at every iteration
|
||||
// of the state transition function.
|
||||
func (sb *SimulatedBackend) RunStateTransitionTest(testCase *StateTestCase) error {
|
||||
defer db.TeardownDB(sb.beaconDB)
|
||||
setTestConfig(testCase)
|
||||
|
||||
privKeys, err := sb.initializeStateTest(testCase)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not initialize state test %v", err)
|
||||
}
|
||||
averageTimesPerTransition := []time.Duration{}
|
||||
startSlot := params.BeaconConfig().GenesisSlot
|
||||
for i := startSlot; i < startSlot+testCase.Config.NumSlots; i++ {
|
||||
|
||||
// If the slot is marked as skipped in the configuration options,
|
||||
// we simply run the state transition with a nil block argument.
|
||||
if sliceutil.IsInUint64(i, testCase.Config.SkipSlots) {
|
||||
if err := sb.GenerateNilBlockAndAdvanceChain(); err != nil {
|
||||
return fmt.Errorf("could not advance the chain with a nil block %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
simulatedObjects := sb.generateSimulatedObjects(testCase, i)
|
||||
startTime := time.Now()
|
||||
|
||||
if err := sb.GenerateBlockAndAdvanceChain(simulatedObjects, privKeys); err != nil {
|
||||
return fmt.Errorf("could not generate the block and advance the chain %v", err)
|
||||
}
|
||||
|
||||
endTime := time.Now()
|
||||
averageTimesPerTransition = append(averageTimesPerTransition, endTime.Sub(startTime))
|
||||
}
|
||||
|
||||
log.Infof(
|
||||
"with %d initial deposits, each state transition took average time = %v",
|
||||
testCase.Config.DepositsForChainStart,
|
||||
averageDuration(averageTimesPerTransition),
|
||||
)
|
||||
|
||||
if err := sb.compareTestCase(testCase); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// initializeStateTest sets up the environment by generating all the required objects in order
|
||||
// to proceed with the state test.
|
||||
func (sb *SimulatedBackend) initializeStateTest(testCase *StateTestCase) ([]*bls.SecretKey, error) {
|
||||
initialDeposits, privKeys, err := generateInitialSimulatedDeposits(testCase.Config.DepositsForChainStart)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not simulate initial validator deposits: %v", err)
|
||||
}
|
||||
if err := sb.setupBeaconStateAndGenesisBlock(initialDeposits); err != nil {
|
||||
return nil, fmt.Errorf("could not set up beacon state and initialize genesis block %v", err)
|
||||
}
|
||||
return privKeys, nil
|
||||
}
|
||||
|
||||
// setupBeaconStateAndGenesisBlock creates the initial beacon state and genesis block in order to
|
||||
// proceed with the test.
|
||||
func (sb *SimulatedBackend) setupBeaconStateAndGenesisBlock(initialDeposits []*pb.Deposit) error {
|
||||
var err error
|
||||
genesisTime := time.Date(2018, 9, 0, 0, 0, 0, 0, time.UTC).Unix()
|
||||
sb.state, err = state.GenesisBeaconState(initialDeposits, uint64(genesisTime), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not initialize simulated beacon state: %v", err)
|
||||
}
|
||||
sb.historicalDeposits = initialDeposits
|
||||
|
||||
// We do not expect hashing initial beacon state and genesis block to
|
||||
// fail, so we can safely ignore the error below.
|
||||
// #nosec G104
|
||||
stateRoot, err := hashutil.HashProto(sb.state)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not tree hash state: %v", err)
|
||||
}
|
||||
genesisBlock := b.NewGenesisBlock(stateRoot[:])
|
||||
genesisBlockRoot, err := hashutil.HashBeaconBlock(genesisBlock)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not tree hash genesis block: %v", err)
|
||||
}
|
||||
|
||||
// We now keep track of generated blocks for each state transition in
|
||||
// a slice.
|
||||
sb.prevBlockRoots = [][32]byte{genesisBlockRoot}
|
||||
sb.inMemoryBlocks = append(sb.inMemoryBlocks, genesisBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateSimulatedObjects generates the simulated objects depending on the testcase and current slot.
|
||||
func (sb *SimulatedBackend) generateSimulatedObjects(testCase *StateTestCase, slotNumber uint64) *SimulatedObjects {
|
||||
// If the slot is not skipped, we check if we are simulating a deposit at the current slot.
|
||||
var simulatedDeposit *StateTestDeposit
|
||||
for _, deposit := range testCase.Config.Deposits {
|
||||
if deposit.Slot == slotNumber {
|
||||
simulatedDeposit = deposit
|
||||
break
|
||||
}
|
||||
}
|
||||
var simulatedProposerSlashing *StateTestProposerSlashing
|
||||
for _, pSlashing := range testCase.Config.ProposerSlashings {
|
||||
if pSlashing.Slot == slotNumber {
|
||||
simulatedProposerSlashing = pSlashing
|
||||
break
|
||||
}
|
||||
}
|
||||
var simulatedAttesterSlashing *StateTestAttesterSlashing
|
||||
for _, cSlashing := range testCase.Config.AttesterSlashings {
|
||||
if cSlashing.Slot == slotNumber {
|
||||
simulatedAttesterSlashing = cSlashing
|
||||
break
|
||||
}
|
||||
}
|
||||
var simulatedValidatorExit *StateTestValidatorExit
|
||||
for _, exit := range testCase.Config.ValidatorExits {
|
||||
if exit.Epoch == slotNumber/params.BeaconConfig().SlotsPerEpoch {
|
||||
simulatedValidatorExit = exit
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return &SimulatedObjects{
|
||||
simDeposit: simulatedDeposit,
|
||||
simProposerSlashing: simulatedProposerSlashing,
|
||||
simAttesterSlashing: simulatedAttesterSlashing,
|
||||
simValidatorExit: simulatedValidatorExit,
|
||||
}
|
||||
}
|
||||
|
||||
// compareTestCase compares the state in the simulated backend against the values in inputted test case. If
|
||||
// there are any discrepancies it returns an error.
|
||||
func (sb *SimulatedBackend) compareTestCase(testCase *StateTestCase) error {
|
||||
if sb.state.Slot != testCase.Results.Slot {
|
||||
return fmt.Errorf(
|
||||
"incorrect state slot after %d state transitions without blocks, wanted %d, received %d",
|
||||
testCase.Config.NumSlots,
|
||||
sb.state.Slot,
|
||||
testCase.Results.Slot,
|
||||
)
|
||||
}
|
||||
if len(sb.state.ValidatorRegistry) != testCase.Results.NumValidators {
|
||||
return fmt.Errorf(
|
||||
"incorrect num validators after %d state transitions without blocks, wanted %d, received %d",
|
||||
testCase.Config.NumSlots,
|
||||
testCase.Results.NumValidators,
|
||||
len(sb.state.ValidatorRegistry),
|
||||
)
|
||||
}
|
||||
for _, slashed := range testCase.Results.SlashedValidators {
|
||||
if sb.state.ValidatorRegistry[slashed].SlashedEpoch == params.BeaconConfig().FarFutureEpoch {
|
||||
return fmt.Errorf(
|
||||
"expected validator at index %d to have been slashed",
|
||||
slashed,
|
||||
)
|
||||
}
|
||||
}
|
||||
for _, exited := range testCase.Results.ExitedValidators {
|
||||
if sb.state.ValidatorRegistry[exited].StatusFlags != pb.Validator_INITIATED_EXIT {
|
||||
return fmt.Errorf(
|
||||
"expected validator at index %d to have exited",
|
||||
exited,
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setTestConfig(testCase *StateTestCase) {
|
||||
// We setup the initial configuration for running state
|
||||
// transition tests below.
|
||||
c := params.BeaconConfig()
|
||||
c.SlotsPerEpoch = testCase.Config.SlotsPerEpoch
|
||||
c.DepositsForChainStart = testCase.Config.DepositsForChainStart
|
||||
params.OverrideBeaconConfig(c)
|
||||
}
|
||||
|
||||
func averageDuration(times []time.Duration) time.Duration {
|
||||
sum := int64(0)
|
||||
for _, t := range times {
|
||||
sum += t.Nanoseconds()
|
||||
}
|
||||
return time.Duration(sum / int64(len(times)))
|
||||
}
|
||||
@@ -1,85 +0,0 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func init() {
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
EnableCrosslinks: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestSimulatedBackendStop_ShutsDown(t *testing.T) {
|
||||
|
||||
backend, err := NewSimulatedBackend()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create a new simulated backedn %v", err)
|
||||
}
|
||||
if err := backend.Shutdown(); err != nil {
|
||||
t.Errorf("Could not successfully shutdown simulated backend %v", err)
|
||||
}
|
||||
|
||||
db.TeardownDB(backend.beaconDB)
|
||||
}
|
||||
|
||||
func TestGenerateBlockAndAdvanceChain_IncreasesSlot(t *testing.T) {
|
||||
backend, err := NewSimulatedBackend()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create a new simulated backend %v", err)
|
||||
}
|
||||
|
||||
privKeys, err := backend.SetupBackend(100)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not set up backend %v", err)
|
||||
}
|
||||
defer backend.Shutdown()
|
||||
defer db.TeardownDB(backend.beaconDB)
|
||||
|
||||
slotLimit := params.BeaconConfig().SlotsPerEpoch + uint64(1)
|
||||
|
||||
for i := uint64(0); i < slotLimit; i++ {
|
||||
if err := backend.GenerateBlockAndAdvanceChain(&SimulatedObjects{}, privKeys); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, backend.state.Slot+1)
|
||||
}
|
||||
if backend.inMemoryBlocks[len(backend.inMemoryBlocks)-1].Slot != backend.state.Slot {
|
||||
t.Errorf("In memory Blocks do not have the same last slot as the state, expected %d but got %v",
|
||||
backend.state.Slot, backend.inMemoryBlocks[len(backend.inMemoryBlocks)-1])
|
||||
}
|
||||
}
|
||||
|
||||
if backend.state.Slot != params.BeaconConfig().GenesisSlot+uint64(slotLimit) {
|
||||
t.Errorf("Unequal state slot and expected slot %d %d", backend.state.Slot, slotLimit)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGenerateNilBlockAndAdvanceChain_IncreasesSlot(t *testing.T) {
|
||||
backend, err := NewSimulatedBackend()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create a new simulated backedn %v", err)
|
||||
}
|
||||
|
||||
if _, err := backend.SetupBackend(100); err != nil {
|
||||
t.Fatalf("Could not set up backend %v", err)
|
||||
}
|
||||
defer backend.Shutdown()
|
||||
defer db.TeardownDB(backend.beaconDB)
|
||||
|
||||
slotLimit := params.BeaconConfig().SlotsPerEpoch + uint64(1)
|
||||
|
||||
for i := uint64(0); i < slotLimit; i++ {
|
||||
if err := backend.GenerateNilBlockAndAdvanceChain(); err != nil {
|
||||
t.Fatalf("Could not generate block and transition state successfully %v for slot %d", err, backend.state.Slot+1)
|
||||
}
|
||||
}
|
||||
|
||||
if backend.state.Slot != params.BeaconConfig().GenesisSlot+uint64(slotLimit) {
|
||||
t.Errorf("Unequal state slot and expected slot %d %d", backend.state.Slot, slotLimit)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
package backend
|
||||
|
||||
// StateTest --
|
||||
type StateTest struct {
|
||||
Title string
|
||||
Summary string
|
||||
Fork string `yaml:"fork"`
|
||||
Version string `yaml:"version"`
|
||||
TestSuite string `yaml:"test_suite"`
|
||||
TestCases []*StateTestCase `yaml:"test_cases"`
|
||||
}
|
||||
|
||||
// StateTestCase --
|
||||
type StateTestCase struct {
|
||||
Config *StateTestConfig `yaml:"config"`
|
||||
Results *StateTestResults `yaml:"results"`
|
||||
}
|
||||
|
||||
// StateTestConfig --
|
||||
type StateTestConfig struct {
|
||||
SkipSlots []uint64 `yaml:"skip_slots"`
|
||||
DepositSlots []uint64 `yaml:"deposit_slots"`
|
||||
Deposits []*StateTestDeposit `yaml:"deposits"`
|
||||
ProposerSlashings []*StateTestProposerSlashing `yaml:"proposer_slashings"`
|
||||
AttesterSlashings []*StateTestAttesterSlashing `yaml:"attester_slashings"`
|
||||
ValidatorExits []*StateTestValidatorExit `yaml:"validator_exits"`
|
||||
SlotsPerEpoch uint64 `yaml:"slots_per_epoch"`
|
||||
ShardCount uint64 `yaml:"shard_count"`
|
||||
DepositsForChainStart uint64 `yaml:"deposits_for_chain_start"`
|
||||
NumSlots uint64 `yaml:"num_slots"`
|
||||
}
|
||||
|
||||
// StateTestDeposit --
|
||||
type StateTestDeposit struct {
|
||||
Slot uint64 `yaml:"slot"`
|
||||
Amount uint64 `yaml:"amount"`
|
||||
MerkleIndex uint64 `yaml:"merkle_index"`
|
||||
Pubkey string `yaml:"pubkey"`
|
||||
}
|
||||
|
||||
// StateTestProposerSlashing --
|
||||
type StateTestProposerSlashing struct {
|
||||
Slot uint64 `yaml:"slot"`
|
||||
ProposerIndex uint64 `yaml:"proposer_index"`
|
||||
Proposal1Shard uint64 `yaml:"proposal_1_shard"`
|
||||
Proposal2Shard uint64 `yaml:"proposal_2_shard"`
|
||||
Proposal1Slot uint64 `yaml:"proposal_1_slot"`
|
||||
Proposal2Slot uint64 `yaml:"proposal_2_slot"`
|
||||
Proposal1Root string `yaml:"proposal_1_root"`
|
||||
Proposal2Root string `yaml:"proposal_2_root"`
|
||||
}
|
||||
|
||||
// StateTestAttesterSlashing --
|
||||
type StateTestAttesterSlashing struct {
|
||||
Slot uint64 `yaml:"slot"`
|
||||
SlashableAttestation1Slot uint64 `yaml:"slashable_attestation_1_slot"`
|
||||
SlashableAttestation1JustifiedEpoch uint64 `yaml:"slashable_attestation_1_justified_epoch"`
|
||||
SlashableAttestation1ValidatorIndices []uint64 `yaml:"slashable_attestation_1_validator_indices"`
|
||||
SlashableAttestation1CustodyBitField string `yaml:"slashable_attestation_1_custody_bitfield"`
|
||||
SlashableAttestation2Slot uint64 `yaml:"slashable_attestation_2_slot"`
|
||||
SlashableAttestation2JustifiedEpoch uint64 `yaml:"slashable_attestation_2_justified_epoch"`
|
||||
SlashableAttestation2ValidatorIndices []uint64 `yaml:"slashable_attestation_2_validator_indices"`
|
||||
SlashableAttestation2CustodyBitField string `yaml:"slashable_attestation_2_custody_bitfield"`
|
||||
}
|
||||
|
||||
// StateTestValidatorExit --
|
||||
type StateTestValidatorExit struct {
|
||||
Epoch uint64 `yaml:"epoch"`
|
||||
ValidatorIndex uint64 `yaml:"validator_index"`
|
||||
}
|
||||
|
||||
// StateTestResults --
|
||||
type StateTestResults struct {
|
||||
Slot uint64
|
||||
NumValidators int `yaml:"num_validators"`
|
||||
SlashedValidators []uint64 `yaml:"slashed_validators"`
|
||||
ExitedValidators []uint64 `yaml:"exited_validators"`
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/go-yaml/yaml"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/chaintest/backend"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
log "github.com/sirupsen/logrus"
|
||||
prefixed "github.com/x-cray/logrus-prefixed-formatter"
|
||||
)
|
||||
|
||||
func init() {
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
EnableCrosslinks: false,
|
||||
})
|
||||
}
|
||||
|
||||
func readTestsFromYaml(yamlDir string) ([]interface{}, error) {
|
||||
const forkChoiceTestsFolderName = "fork-choice-tests"
|
||||
const shuffleTestsFolderName = "shuffle-tests"
|
||||
const stateTestsFolderName = "state-tests"
|
||||
|
||||
var tests []interface{}
|
||||
|
||||
dirs, err := ioutil.ReadDir(yamlDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read YAML tests directory: %v", err)
|
||||
}
|
||||
for _, dir := range dirs {
|
||||
files, err := ioutil.ReadDir(path.Join(yamlDir, dir.Name()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read YAML tests directory: %v", err)
|
||||
}
|
||||
for _, file := range files {
|
||||
filePath := path.Join(yamlDir, dir.Name(), file.Name())
|
||||
// #nosec G304
|
||||
data, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read YAML file: %v", err)
|
||||
}
|
||||
switch dir.Name() {
|
||||
case forkChoiceTestsFolderName:
|
||||
decoded := &backend.ForkChoiceTest{}
|
||||
if err := yaml.Unmarshal(data, decoded); err != nil {
|
||||
return nil, fmt.Errorf("could not unmarshal YAML file into test struct: %v", err)
|
||||
}
|
||||
tests = append(tests, decoded)
|
||||
case shuffleTestsFolderName:
|
||||
decoded := &backend.ShuffleTest{}
|
||||
if err := yaml.Unmarshal(data, decoded); err != nil {
|
||||
return nil, fmt.Errorf("could not unmarshal YAML file into test struct: %v", err)
|
||||
}
|
||||
tests = append(tests, decoded)
|
||||
case stateTestsFolderName:
|
||||
decoded := &backend.StateTest{}
|
||||
if err := yaml.Unmarshal(data, decoded); err != nil {
|
||||
return nil, fmt.Errorf("could not unmarshal YAML file into test struct: %v", err)
|
||||
}
|
||||
tests = append(tests, decoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
return tests, nil
|
||||
}
|
||||
|
||||
func runTests(tests []interface{}, sb *backend.SimulatedBackend) error {
|
||||
for _, tt := range tests {
|
||||
switch typedTest := tt.(type) {
|
||||
case *backend.ForkChoiceTest:
|
||||
log.Infof("Title: %v", typedTest.Title)
|
||||
log.Infof("Summary: %v", typedTest.Summary)
|
||||
log.Infof("Test Suite: %v", typedTest.TestSuite)
|
||||
for _, testCase := range typedTest.TestCases {
|
||||
if err := sb.RunForkChoiceTest(testCase); err != nil {
|
||||
return fmt.Errorf("chain test failed: %v", err)
|
||||
}
|
||||
}
|
||||
log.Info("Test PASSED")
|
||||
case *backend.ShuffleTest:
|
||||
log.Infof("Title: %v", typedTest.Title)
|
||||
log.Infof("Summary: %v", typedTest.Summary)
|
||||
log.Infof("Test Suite: %v", typedTest.TestSuite)
|
||||
log.Infof("Fork: %v", typedTest.Fork)
|
||||
log.Infof("Version: %v", typedTest.Version)
|
||||
for _, testCase := range typedTest.TestCases {
|
||||
if err := sb.RunShuffleTest(testCase); err != nil {
|
||||
return fmt.Errorf("chain test failed: %v", err)
|
||||
}
|
||||
}
|
||||
log.Info("Test PASSED")
|
||||
case *backend.StateTest:
|
||||
log.Infof("Title: %v", typedTest.Title)
|
||||
log.Infof("Summary: %v", typedTest.Summary)
|
||||
log.Infof("Test Suite: %v", typedTest.TestSuite)
|
||||
log.Infof("Fork: %v", typedTest.Fork)
|
||||
log.Infof("Version: %v", typedTest.Version)
|
||||
for _, testCase := range typedTest.TestCases {
|
||||
if err := sb.RunStateTransitionTest(testCase); err != nil {
|
||||
return fmt.Errorf("chain test failed: %v", err)
|
||||
}
|
||||
}
|
||||
log.Info("Test PASSED")
|
||||
default:
|
||||
return fmt.Errorf("receive unknown test type: %T", typedTest)
|
||||
}
|
||||
log.Info("-----------------------------")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
var yamlDir = flag.String("tests-dir", "", "path to directory of yaml tests")
|
||||
flag.Parse()
|
||||
|
||||
customFormatter := new(prefixed.TextFormatter)
|
||||
customFormatter.TimestampFormat = "2006-01-02 15:04:05"
|
||||
customFormatter.FullTimestamp = true
|
||||
log.SetFormatter(customFormatter)
|
||||
|
||||
tests, err := readTestsFromYaml(*yamlDir)
|
||||
if err != nil {
|
||||
log.Fatalf("Fail to load tests from yaml: %v", err)
|
||||
}
|
||||
|
||||
sb, err := backend.NewSimulatedBackend()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create backend: %v", err)
|
||||
}
|
||||
|
||||
log.Info("----Running Tests----")
|
||||
startTime := time.Now()
|
||||
|
||||
err = runTests(tests, sb)
|
||||
if err != nil {
|
||||
log.Fatalf("Test failed %v", err)
|
||||
}
|
||||
|
||||
endTime := time.Now()
|
||||
log.Infof("Test Runs Finished In: %v", endTime.Sub(startTime))
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
# Credits to Danny Ryan (Ethereum Foundation)
|
||||
---
|
||||
|
||||
title: Sample Ethereum 2.0 Beacon Chain Test
|
||||
summary: Basic, functioning fork choice rule for Ethereum 2.0
|
||||
test_suite: prysm
|
||||
test_cases:
|
||||
- config:
|
||||
validator_count: 100
|
||||
cycle_length: 8
|
||||
shard_count: 64
|
||||
min_committee_size: 8
|
||||
slots:
|
||||
# "slot_number" has a minimum of 1
|
||||
- slot_number: 1
|
||||
new_block:
|
||||
id: A
|
||||
# "*" is used for the genesis block
|
||||
parent: "*"
|
||||
attestations:
|
||||
- block: A
|
||||
# the following is a shorthand string for [0, 1, 2, 3, 4, 5]
|
||||
validators: "0-5"
|
||||
- slot_number: 2
|
||||
new_block:
|
||||
id: B
|
||||
parent: A
|
||||
attestations:
|
||||
- block: B
|
||||
validators: "0-5"
|
||||
- slot_number: 3
|
||||
new_block:
|
||||
id: C
|
||||
parent: A
|
||||
attestations:
|
||||
# attestation "committee_slot" defaults to the slot during which the attestation occurs
|
||||
- block: C
|
||||
validators: "2-7"
|
||||
# default "committee_slot" can be directly overridden
|
||||
- block: C
|
||||
committee_slot: 2
|
||||
validators: "6, 7"
|
||||
- slot_number: 4
|
||||
new_block:
|
||||
id: D
|
||||
parent: C
|
||||
attestations:
|
||||
- block: D
|
||||
validators: "1-4"
|
||||
# slots can be skipped entirely (5 in this case)
|
||||
- slot_number: 6
|
||||
new_block:
|
||||
id: E
|
||||
parent: D
|
||||
attestations:
|
||||
- block: E
|
||||
validators: "0-4"
|
||||
- block: B
|
||||
validators: "5, 6, 7"
|
||||
results:
|
||||
head: E
|
||||
last_justified_block: "*"
|
||||
last_finalized_block: "*"
|
||||
@@ -1,44 +0,0 @@
|
||||
# Credits to Danny Ryan (Ethereum Foundation)
|
||||
---
|
||||
|
||||
title: Shuffling Algorithm Tests
|
||||
summary: Test vectors for shuffling a list based upon a seed using `shuffle`
|
||||
test_suite: shuffle
|
||||
fork: tchaikovsky
|
||||
version: 1.0
|
||||
|
||||
test_cases:
|
||||
- config:
|
||||
validator_count: 100
|
||||
cycle_length: 8
|
||||
shard_count: 32
|
||||
min_committee_size: 8
|
||||
- input: []
|
||||
output: []
|
||||
seed: !!binary ""
|
||||
- name: boring_list
|
||||
description: List with a single element, 0
|
||||
input: [0]
|
||||
output: [0]
|
||||
seed: !!binary ""
|
||||
- input: [255]
|
||||
output: [255]
|
||||
seed: !!binary ""
|
||||
- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5]
|
||||
output: [2, 1, 6, 1, 4, 5, 6, 4, 6, 2]
|
||||
seed: !!binary ""
|
||||
- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
|
||||
output: [4, 9, 1, 13, 8, 3, 5, 10, 7, 6, 11, 2, 12]
|
||||
seed: !!binary ""
|
||||
- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5]
|
||||
output: [6, 1, 2, 2, 6, 6, 1, 5, 65, 4]
|
||||
seed: !!binary |
|
||||
JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0=
|
||||
- input: [35, 6, 2, 6, 1, 4, 6, 2, 1, 5, 7, 98, 3, 2, 11]
|
||||
output: [35, 1, 6, 4, 6, 6, 5, 11, 2, 3, 7, 1, 2, 2, 98]
|
||||
seed: !!binary |
|
||||
VGhlIHF1aWNrIGJyb3duIGZveCBqdW1wcyBvdmVyIDEzIGxhenkgZG9ncy4=
|
||||
- input: [35, 6, 2, 6, 1, 4, 6, 2, 1, 5, 7, 98, 3, 2, 11]
|
||||
output: [98, 6, 6, 11, 5, 35, 2, 7, 2, 6, 4, 2, 1, 3, 1]
|
||||
seed: !!binary |
|
||||
rDTbe23J4UA0yLIurjbJqk49VcavAC0Nysas+l5MlwvLc0B/JqQ=
|
||||
@@ -1,82 +0,0 @@
|
||||
title: Sample Ethereum Serenity State Transition Tests
|
||||
summary: Testing full state transition block processing
|
||||
test_suite: prysm
|
||||
fork: sapphire
|
||||
version: 1.0
|
||||
test_cases:
|
||||
- config:
|
||||
slots_per_epoch: 64
|
||||
deposits_for_chain_start: 64
|
||||
num_slots: 32 # Testing advancing state to slot < SlotsPerEpoch
|
||||
results:
|
||||
slot: 9223372036854775840
|
||||
num_validators: 64
|
||||
- config:
|
||||
slots_per_epoch: 64
|
||||
deposits_for_chain_start: 64
|
||||
num_slots: 64 # Testing advancing state to exactly slot == SlotsPerEpoch
|
||||
deposits:
|
||||
- slot: 9223372036854775809
|
||||
amount: 32
|
||||
merkle_index: 64
|
||||
pubkey: !!binary |
|
||||
SlAAbShSkUg7PLiPHZI/rTS1uAvKiieOrifPN6Moso0=
|
||||
- slot: 9223372036854775823
|
||||
amount: 32
|
||||
merkle_index: 65
|
||||
pubkey: !!binary |
|
||||
Oklajsjdkaklsdlkajsdjlajslkdjlkasjlkdjlajdsd
|
||||
- slot: 9223372036854775863
|
||||
amount: 32
|
||||
merkle_index: 66
|
||||
pubkey: !!binary |
|
||||
LkmqmqoodLKAslkjdkajsdljasdkajlksjdasldjasdd
|
||||
proposer_slashings:
|
||||
- slot: 9223372036854775824 # At slot 9223372036854775824, we trigger a proposal slashing occurring
|
||||
proposer_index: 50 # We penalize the proposer that was just added from slot 15
|
||||
proposal_1_shard: 0
|
||||
proposal_1_slot: 15
|
||||
proposal_1_root: !!binary |
|
||||
LkmqmqoodLKAslkjdkajsdljasdkajlksjdasldjasdd
|
||||
proposal_2_shard: 0
|
||||
proposal_2_slot: 15
|
||||
proposal_2_root: !!binary |
|
||||
LkmqmqoodLKAslkjdkajsdljasdkajlksjdasldjasdd
|
||||
attester_slashings:
|
||||
- slot: 9223372036854775868 # At slot 59, we trigger a attester slashing
|
||||
slashable_attestation_1_slot: 9223372036854775864
|
||||
slashable_attestation_2_slot: 9223372036854775864
|
||||
slashable_attestation_1_justified_epoch: 0
|
||||
slashable_attestation_2_justified_epoch: 1
|
||||
slashable_attestation_1_custody_bitfield: !binary "F"
|
||||
slashable_attestation_1_validator_indices: [1, 2, 3, 4, 5, 6, 7, 51]
|
||||
slashable_attestation_2_custody_bitfield: !binary "F"
|
||||
slashable_attestation_2_validator_indices: [1, 2, 3, 4, 5, 6, 7, 51]
|
||||
validator_exits:
|
||||
- epoch: 144115188075855872
|
||||
validator_index: 45 # At slot 9223372036854775868, validator at index 45 triggers a voluntary exit
|
||||
results:
|
||||
slot: 9223372036854775872
|
||||
num_validators: 67
|
||||
penalized_validators: [50, 51] # We test that the validators at indices were indeed penalized
|
||||
exited_validators: [45] # We confirm the indices of validators that willingly exited the registry
|
||||
# TODO(1387): Waiting for spec to stable to proceed with this test case
|
||||
# - config:
|
||||
# skip_slots: [10, 20]
|
||||
# slots_per_epoch: 64
|
||||
# deposits_for_chain_start: 1000
|
||||
# num_slots: 128 # Testing advancing state's slot == 2*SlotsPerEpoch
|
||||
# deposits:
|
||||
# - slot: 10
|
||||
# amount: 32
|
||||
# merkle_index: 0
|
||||
# pubkey: !!binary |
|
||||
# SlAAbShSkUg7PLiPHZI/rTS1uAvKiieOrifPN6Moso0=
|
||||
# - slot: 20
|
||||
# amount: 32
|
||||
# merkle_index: 1
|
||||
# pubkey: !!binary |
|
||||
# Oklajsjdkaklsdlkajsdjlajslkdjlkasjlkdjlajdsd
|
||||
# results:
|
||||
# slot: 128
|
||||
# num_validators: 1000 # Validator registry should not have grown if slots 10 and 20 were skipped
|
||||
@@ -1,49 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/chaintest/backend"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
)
|
||||
|
||||
func init() {
|
||||
featureconfig.InitFeatureConfig(&featureconfig.FeatureFlagConfig{
|
||||
EnableCrosslinks: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestFromYaml_Pass(t *testing.T) {
|
||||
tests, err := readTestsFromYaml("./tests")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read yaml files: %v", err)
|
||||
}
|
||||
|
||||
sb, err := backend.NewSimulatedBackend()
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create backend: %v", err)
|
||||
}
|
||||
|
||||
if err := runTests(tests, sb); err != nil {
|
||||
t.Errorf("Failed to run yaml tests %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStateTestFromYaml(b *testing.B) {
|
||||
tests, err := readTestsFromYaml("./tests")
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to read yaml files: %v", err)
|
||||
}
|
||||
|
||||
sb, err := backend.NewSimulatedBackend()
|
||||
if err != nil {
|
||||
b.Fatalf("Could not create backend: %v", err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := runTests(tests, sb); err != nil {
|
||||
b.Errorf("Failed to run yaml tests %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["rewards_penalties.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/balances",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["rewards_penalties_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,379 +0,0 @@
|
||||
// Package balances contains libraries to calculate reward and
|
||||
// penalty quotients. It computes new validator balances
|
||||
// for justifications, crosslinks and attestation inclusions. It
|
||||
// also computes penalties for the inactive validators.
|
||||
package balances
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
)
|
||||
|
||||
// ExpectedFFGSource applies rewards or penalties
|
||||
// for an expected FFG source. It uses total justified
|
||||
// attesting balances, total validator balances and base
|
||||
// reward quotient to calculate the reward amount.
|
||||
// Validators who voted for previous justified hash
|
||||
// will get a reward, everyone else will get a penalty.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// Any validator index in previous_epoch_justified_attester_indices
|
||||
// gains base_reward(state, index) * previous_epoch_justified_attesting_balance // total_balance.
|
||||
// Any active validator v not in previous_epoch_justified_attester_indices
|
||||
// loses base_reward(state, index).
|
||||
func ExpectedFFGSource(
|
||||
state *pb.BeaconState,
|
||||
justifiedAttesterIndices []uint64,
|
||||
justifiedAttestingBalance uint64,
|
||||
totalBalance uint64) *pb.BeaconState {
|
||||
baseRewardQuotient := helpers.BaseRewardQuotient(totalBalance)
|
||||
|
||||
for _, index := range justifiedAttesterIndices {
|
||||
state.ValidatorBalances[index] +=
|
||||
helpers.BaseReward(state, index, baseRewardQuotient) *
|
||||
justifiedAttestingBalance /
|
||||
totalBalance
|
||||
}
|
||||
activeValidatorIndices := helpers.ActiveValidatorIndices(state.ValidatorRegistry, helpers.CurrentEpoch(state))
|
||||
didNotAttestIndices := sliceutil.NotUint64(justifiedAttesterIndices, activeValidatorIndices)
|
||||
|
||||
for _, index := range didNotAttestIndices {
|
||||
state.ValidatorBalances[index] -=
|
||||
helpers.BaseReward(state, index, baseRewardQuotient)
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
// ExpectedFFGTarget applies rewards or penalties
|
||||
// for an expected FFG target. It uses total boundary
|
||||
// attesting balances, total validator balances and base
|
||||
// reward quotient to calculate the reward amount.
|
||||
// Validators who voted for epoch boundary block
|
||||
// will get a reward, everyone else will get a penalty.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// Any validator index in previous_epoch_boundary_attester_indices gains
|
||||
// base_reward(state, index) * previous_epoch_boundary_attesting_balance // total_balance.
|
||||
// Any active validator index not in previous_epoch_boundary_attester_indices loses
|
||||
// base_reward(state, index).
|
||||
func ExpectedFFGTarget(
|
||||
state *pb.BeaconState,
|
||||
boundaryAttesterIndices []uint64,
|
||||
boundaryAttestingBalance uint64,
|
||||
totalBalance uint64) *pb.BeaconState {
|
||||
|
||||
baseRewardQuotient := helpers.BaseRewardQuotient(totalBalance)
|
||||
|
||||
for _, index := range boundaryAttesterIndices {
|
||||
state.ValidatorBalances[index] +=
|
||||
helpers.BaseReward(state, index, baseRewardQuotient) *
|
||||
boundaryAttestingBalance /
|
||||
totalBalance
|
||||
}
|
||||
activeValidatorIndices := helpers.ActiveValidatorIndices(state.ValidatorRegistry, helpers.CurrentEpoch(state))
|
||||
didNotAttestIndices := sliceutil.NotUint64(boundaryAttesterIndices, activeValidatorIndices)
|
||||
|
||||
for _, index := range didNotAttestIndices {
|
||||
state.ValidatorBalances[index] -=
|
||||
helpers.BaseReward(state, index, baseRewardQuotient)
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
// ExpectedBeaconChainHead applies rewards or penalties
|
||||
// for an expected beacon chain head. It uses total head
|
||||
// attesting balances, total validator balances and base
|
||||
// reward quotient to calculate the reward amount.
|
||||
// Validators who voted for the canonical head block
|
||||
// will get a reward, everyone else will get a penalty.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// Any validator index in previous_epoch_head_attester_indices gains
|
||||
// base_reward(state, index) * previous_epoch_head_attesting_balance // total_balance).
|
||||
// Any active validator index not in previous_epoch_head_attester_indices loses
|
||||
// base_reward(state, index).
|
||||
func ExpectedBeaconChainHead(
|
||||
state *pb.BeaconState,
|
||||
headAttesterIndices []uint64,
|
||||
headAttestingBalance uint64,
|
||||
totalBalance uint64) *pb.BeaconState {
|
||||
|
||||
baseRewardQuotient := helpers.BaseRewardQuotient(totalBalance)
|
||||
|
||||
for _, index := range headAttesterIndices {
|
||||
state.ValidatorBalances[index] +=
|
||||
helpers.BaseReward(state, index, baseRewardQuotient) *
|
||||
headAttestingBalance /
|
||||
totalBalance
|
||||
}
|
||||
activeValidatorIndices := helpers.ActiveValidatorIndices(state.ValidatorRegistry, helpers.CurrentEpoch(state))
|
||||
didNotAttestIndices := sliceutil.NotUint64(headAttesterIndices, activeValidatorIndices)
|
||||
|
||||
for _, index := range didNotAttestIndices {
|
||||
state.ValidatorBalances[index] -=
|
||||
helpers.BaseReward(state, index, baseRewardQuotient)
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
// InclusionDistance applies rewards based on
|
||||
// inclusion distance. It uses calculated inclusion distance
|
||||
// and base reward quotient to calculate the reward amount.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// Any validator index in previous_epoch_attester_indices gains
|
||||
// base_reward(state, index) * MIN_ATTESTATION_INCLUSION_DELAY //
|
||||
// inclusion_distance(state, index)
|
||||
func InclusionDistance(
|
||||
state *pb.BeaconState,
|
||||
attesterIndices []uint64,
|
||||
totalBalance uint64,
|
||||
inclusionDistanceByAttester map[uint64]uint64) (*pb.BeaconState, error) {
|
||||
|
||||
baseRewardQuotient := helpers.BaseRewardQuotient(totalBalance)
|
||||
|
||||
for _, index := range attesterIndices {
|
||||
inclusionDistance, ok := inclusionDistanceByAttester[index]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not get inclusion distance for attester: %d", index)
|
||||
}
|
||||
if inclusionDistance == 0 {
|
||||
return nil, errors.New("could not process inclusion distance: 0")
|
||||
}
|
||||
state.ValidatorBalances[index] +=
|
||||
helpers.BaseReward(state, index, baseRewardQuotient) *
|
||||
params.BeaconConfig().MinAttestationInclusionDelay /
|
||||
inclusionDistance
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// InactivityFFGSource applies penalties to inactive
|
||||
// validators that missed to vote FFG source over an
|
||||
// extended of time. (epochs_since_finality > 4)
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// Any active validator index not in previous_epoch_justified_attester_indices,
|
||||
// loses inactivity_penalty(state, index, epochs_since_finality)
|
||||
func InactivityFFGSource(
|
||||
state *pb.BeaconState,
|
||||
justifiedAttesterIndices []uint64,
|
||||
totalBalance uint64,
|
||||
epochsSinceFinality uint64) *pb.BeaconState {
|
||||
|
||||
baseRewardQuotient := helpers.BaseRewardQuotient(totalBalance)
|
||||
activeValidatorIndices := helpers.ActiveValidatorIndices(state.ValidatorRegistry, helpers.CurrentEpoch(state))
|
||||
didNotAttestIndices := sliceutil.NotUint64(justifiedAttesterIndices, activeValidatorIndices)
|
||||
|
||||
for _, index := range didNotAttestIndices {
|
||||
state.ValidatorBalances[index] -=
|
||||
helpers.InactivityPenalty(state, index, baseRewardQuotient, epochsSinceFinality)
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
// InactivityFFGTarget applies penalties to inactive
|
||||
// validators that missed to vote FFG target over an
|
||||
// extended of time. (epochs_since_finality > 4)
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// Any active validator index not in previous_epoch_boundary_attester_indices,
|
||||
// loses inactivity_penalty(state, index, epochs_since_finality)
|
||||
func InactivityFFGTarget(
|
||||
state *pb.BeaconState,
|
||||
boundaryAttesterIndices []uint64,
|
||||
totalBalance uint64,
|
||||
epochsSinceFinality uint64) *pb.BeaconState {
|
||||
|
||||
baseRewardQuotient := helpers.BaseRewardQuotient(totalBalance)
|
||||
activeValidatorIndices := helpers.ActiveValidatorIndices(state.ValidatorRegistry, helpers.CurrentEpoch(state))
|
||||
didNotAttestIndices := sliceutil.NotUint64(boundaryAttesterIndices, activeValidatorIndices)
|
||||
|
||||
for _, index := range didNotAttestIndices {
|
||||
state.ValidatorBalances[index] -=
|
||||
helpers.InactivityPenalty(state, index, baseRewardQuotient, epochsSinceFinality)
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
// InactivityChainHead applies penalties to inactive validators
|
||||
// that missed to vote on canonical head over an extended of time.
|
||||
// (epochs_since_finality > 4)
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// Any active validator index not in previous_epoch_head_attester_indices,
|
||||
// loses base_reward(state, index)
|
||||
func InactivityChainHead(
|
||||
state *pb.BeaconState,
|
||||
headAttesterIndices []uint64,
|
||||
totalBalance uint64) *pb.BeaconState {
|
||||
|
||||
baseRewardQuotient := helpers.BaseRewardQuotient(totalBalance)
|
||||
activeValidatorIndices := helpers.ActiveValidatorIndices(state.ValidatorRegistry, helpers.CurrentEpoch(state))
|
||||
didNotAttestIndices := sliceutil.NotUint64(headAttesterIndices, activeValidatorIndices)
|
||||
|
||||
for _, index := range didNotAttestIndices {
|
||||
state.ValidatorBalances[index] -=
|
||||
helpers.BaseReward(state, index, baseRewardQuotient)
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
// InactivityExitedPenalties applies additional (2x) penalties
|
||||
// to inactive validators with status EXITED_WITH_PENALTY.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// Any active_validator index with validator.slashed_epoch <= current_epoch,
|
||||
// loses 2 * inactivity_penalty(state, index, epochs_since_finality) +
|
||||
// base_reward(state, index).
|
||||
func InactivityExitedPenalties(
|
||||
state *pb.BeaconState,
|
||||
totalBalance uint64,
|
||||
epochsSinceFinality uint64) *pb.BeaconState {
|
||||
|
||||
baseRewardQuotient := helpers.BaseRewardQuotient(totalBalance)
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
activeValidatorIndices := helpers.ActiveValidatorIndices(state.ValidatorRegistry, currentEpoch)
|
||||
|
||||
for _, index := range activeValidatorIndices {
|
||||
if state.ValidatorRegistry[index].SlashedEpoch <= currentEpoch {
|
||||
state.ValidatorBalances[index] -=
|
||||
2*helpers.InactivityPenalty(state, index, baseRewardQuotient, epochsSinceFinality) +
|
||||
helpers.BaseReward(state, index, baseRewardQuotient)
|
||||
}
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
// InactivityInclusionDistance applies penalties in relation with
|
||||
// inclusion delay to inactive validators.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// Any validator index in previous_epoch_attester_indices loses
|
||||
// base_reward(state, index) - base_reward(state, index) *
|
||||
// MIN_ATTESTATION_INCLUSION_DELAY // inclusion_distance(state, index)
|
||||
func InactivityInclusionDistance(
|
||||
state *pb.BeaconState,
|
||||
attesterIndices []uint64,
|
||||
totalBalance uint64,
|
||||
inclusionDistanceByAttester map[uint64]uint64) (*pb.BeaconState, error) {
|
||||
baseRewardQuotient := helpers.BaseRewardQuotient(totalBalance)
|
||||
|
||||
for _, index := range attesterIndices {
|
||||
inclusionDistance, ok := inclusionDistanceByAttester[index]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not get inclusion distance for attester: %d", index)
|
||||
}
|
||||
baseReward := helpers.BaseReward(state, index, baseRewardQuotient)
|
||||
state.ValidatorBalances[index] -= baseReward -
|
||||
baseReward*params.BeaconConfig().MinAttestationInclusionDelay/
|
||||
inclusionDistance
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// AttestationInclusion awards the the beacon
|
||||
// proposers who included previous epoch attestations.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// For each index in previous_epoch_attester_indices,
|
||||
// we determine the proposer proposer_index =
|
||||
// get_beacon_proposer_index(state, inclusion_slot(state, index))
|
||||
// and set state.validator_balances[proposer_index] +=
|
||||
// base_reward(state, index) // ATTESTATION_INCLUSION_REWARD_QUOTIENT
|
||||
func AttestationInclusion(
|
||||
state *pb.BeaconState,
|
||||
totalBalance uint64,
|
||||
prevEpochAttesterIndices []uint64,
|
||||
inclusionSlotByAttester map[uint64]uint64) (*pb.BeaconState, error) {
|
||||
|
||||
baseRewardQuotient := helpers.BaseRewardQuotient(totalBalance)
|
||||
for _, index := range prevEpochAttesterIndices {
|
||||
// Get the attestation's inclusion slot using the attestor's index.
|
||||
slot, ok := inclusionSlotByAttester[index]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not get inclusion slot for attester: %d", index)
|
||||
}
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(state, slot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get proposer index: %v", err)
|
||||
}
|
||||
state.ValidatorBalances[proposerIndex] +=
|
||||
helpers.BaseReward(state, proposerIndex, baseRewardQuotient) /
|
||||
params.BeaconConfig().AttestationInclusionRewardQuotient
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// Crosslinks awards or slashs attesters
|
||||
// for attesting shard cross links.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// For slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(current_epoch)),
|
||||
// let crosslink_committees_at_slot = get_crosslink_committees_at_slot(slot).
|
||||
// For every (crosslink_committee, shard) in crosslink_committee_at_slot,
|
||||
// and every index in crosslink_committee:
|
||||
// If index in attesting_validators(crosslink_committee),
|
||||
// state.validator_balances[index] += base_reward(state, index) *
|
||||
// total_attesting_balance(crosslink_committee) //
|
||||
// get_total_balance(state, crosslink_committee)).
|
||||
// If index not in attesting_validators(crosslink_committee),
|
||||
// state.validator_balances[index] -= base_reward(state, index).
|
||||
func Crosslinks(
|
||||
state *pb.BeaconState,
|
||||
thisEpochAttestations []*pb.PendingAttestation,
|
||||
prevEpochAttestations []*pb.PendingAttestation) (*pb.BeaconState, error) {
|
||||
|
||||
prevEpoch := helpers.PrevEpoch(state)
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
startSlot := helpers.StartSlot(prevEpoch)
|
||||
endSlot := helpers.StartSlot(currentEpoch)
|
||||
|
||||
for i := startSlot; i < endSlot; i++ {
|
||||
// RegistryChange is a no-op when requesting slot in current and previous epoch.
|
||||
// Process crosslinks rewards will never request crosslink committees of next epoch.
|
||||
crosslinkCommittees, err := helpers.CrosslinkCommitteesAtSlot(state, i, false /* registryChange */)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get shard committees for slot %d: %v",
|
||||
i-params.BeaconConfig().GenesisSlot, err)
|
||||
}
|
||||
for _, crosslinkCommittee := range crosslinkCommittees {
|
||||
shard := crosslinkCommittee.Shard
|
||||
committee := crosslinkCommittee.Committee
|
||||
totalAttestingBalance, err :=
|
||||
epoch.TotalAttestingBalance(state, shard, thisEpochAttestations, prevEpochAttestations)
|
||||
if err != nil {
|
||||
return nil,
|
||||
fmt.Errorf("could not get attesting balance for shard committee %d: %v", shard, err)
|
||||
}
|
||||
totalBalance := epoch.TotalBalance(state, committee)
|
||||
baseRewardQuotient := helpers.BaseRewardQuotient(totalBalance)
|
||||
|
||||
attestingIndices, err := epoch.AttestingValidators(
|
||||
state,
|
||||
shard,
|
||||
thisEpochAttestations,
|
||||
prevEpochAttestations)
|
||||
if err != nil {
|
||||
return nil,
|
||||
fmt.Errorf("could not get attesting indices for shard committee %d: %v", shard, err)
|
||||
}
|
||||
for _, index := range committee {
|
||||
baseReward := helpers.BaseReward(state, index, baseRewardQuotient)
|
||||
if sliceutil.IsInUint64(index, attestingIndices) {
|
||||
state.ValidatorBalances[index] +=
|
||||
baseReward * totalAttestingBalance / totalBalance
|
||||
} else {
|
||||
state.ValidatorBalances[index] -= baseReward
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
@@ -1,677 +0,0 @@
|
||||
package balances
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func TestFFGSrcRewardsPenalties_AccurateBalances(t *testing.T) {
|
||||
tests := []struct {
|
||||
voted []uint64
|
||||
balanceAfterSrcRewardPenalties []uint64
|
||||
}{
|
||||
// voted represents the validator indices that voted for FFG source,
|
||||
// balanceAfterSrcRewardPenalties represents their final balances,
|
||||
// validators who voted should get an increase, who didn't should get a decrease.
|
||||
{[]uint64{}, []uint64{31999427550, 31999427550, 31999427550, 31999427550}},
|
||||
{[]uint64{0, 1}, []uint64{32000286225, 32000286225, 31999427550, 31999427550}},
|
||||
{[]uint64{0, 1, 2, 3}, []uint64{32000572450, 32000572450, 32000572450, 32000572450}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
validatorBalances := make([]uint64, 4)
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxDepositAmount
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: []*pb.Validator{
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
},
|
||||
ValidatorBalances: validatorBalances,
|
||||
}
|
||||
state = ExpectedFFGSource(
|
||||
state,
|
||||
tt.voted,
|
||||
uint64(len(tt.voted))*params.BeaconConfig().MaxDepositAmount,
|
||||
uint64(len(validatorBalances))*params.BeaconConfig().MaxDepositAmount)
|
||||
|
||||
if !reflect.DeepEqual(state.ValidatorBalances, tt.balanceAfterSrcRewardPenalties) {
|
||||
t.Errorf("FFGSrcRewardsPenalties(%v) = %v, wanted: %v",
|
||||
tt.voted, state.ValidatorBalances, tt.balanceAfterSrcRewardPenalties)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFFGTargetRewardsPenalties_AccurateBalances(t *testing.T) {
|
||||
tests := []struct {
|
||||
voted []uint64
|
||||
balanceAfterTgtRewardPenalties []uint64
|
||||
}{
|
||||
// voted represents the validator indices that voted for FFG target,
|
||||
// balanceAfterTgtRewardPenalties represents their final balances,
|
||||
// validators who voted should get an increase, who didn't should get a decrease.
|
||||
{[]uint64{}, []uint64{31999427550, 31999427550, 31999427550, 31999427550}},
|
||||
{[]uint64{0, 1}, []uint64{32000286225, 32000286225, 31999427550, 31999427550}},
|
||||
{[]uint64{0, 1, 2, 3}, []uint64{32000572450, 32000572450, 32000572450, 32000572450}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
validatorBalances := make([]uint64, 4)
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxDepositAmount
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: []*pb.Validator{
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
},
|
||||
ValidatorBalances: validatorBalances,
|
||||
}
|
||||
state = ExpectedFFGTarget(
|
||||
state,
|
||||
tt.voted,
|
||||
uint64(len(tt.voted))*params.BeaconConfig().MaxDepositAmount,
|
||||
uint64(len(validatorBalances))*params.BeaconConfig().MaxDepositAmount)
|
||||
|
||||
if !reflect.DeepEqual(state.ValidatorBalances, tt.balanceAfterTgtRewardPenalties) {
|
||||
t.Errorf("FFGTargetRewardsPenalties(%v) = %v, wanted: %v",
|
||||
tt.voted, state.ValidatorBalances, tt.balanceAfterTgtRewardPenalties)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainHeadRewardsPenalties_AccuratePenalties(t *testing.T) {
|
||||
tests := []struct {
|
||||
voted []uint64
|
||||
balanceAfterHeadRewardPenalties []uint64
|
||||
}{
|
||||
// voted represents the validator indices that voted for canonical chain,
|
||||
// balanceAfterHeadRewardPenalties represents their final balances,
|
||||
// validators who voted should get an increase, who didn't should get a decrease.
|
||||
{[]uint64{}, []uint64{31999427550, 31999427550, 31999427550, 31999427550}},
|
||||
{[]uint64{0, 1}, []uint64{32000286225, 32000286225, 31999427550, 31999427550}},
|
||||
{[]uint64{0, 1, 2, 3}, []uint64{32000572450, 32000572450, 32000572450, 32000572450}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
validatorBalances := make([]uint64, 4)
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxDepositAmount
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: []*pb.Validator{
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
},
|
||||
ValidatorBalances: validatorBalances,
|
||||
}
|
||||
state = ExpectedBeaconChainHead(
|
||||
state,
|
||||
tt.voted,
|
||||
uint64(len(tt.voted))*params.BeaconConfig().MaxDepositAmount,
|
||||
uint64(len(validatorBalances))*params.BeaconConfig().MaxDepositAmount)
|
||||
|
||||
if !reflect.DeepEqual(state.ValidatorBalances, tt.balanceAfterHeadRewardPenalties) {
|
||||
t.Errorf("ChainHeadRewardsPenalties(%v) = %v, wanted: %v",
|
||||
tt.voted, state.ValidatorBalances, tt.balanceAfterHeadRewardPenalties)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInclusionDistRewards_AccurateRewards(t *testing.T) {
|
||||
validators := make([]*pb.Validator, params.BeaconConfig().DepositsForChainStart)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = &pb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
var participationBitfield []byte
|
||||
// participation byte length = number of validators / target committee size / bits in a byte.
|
||||
byteLength := int(params.BeaconConfig().DepositsForChainStart / params.BeaconConfig().TargetCommitteeSize / 8)
|
||||
for i := 0; i < byteLength; i++ {
|
||||
participationBitfield = append(participationBitfield, byte(0xff))
|
||||
}
|
||||
|
||||
attestations := []*pb.PendingAttestation{
|
||||
{Data: &pb.AttestationData{
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
JustifiedBlockRootHash32: []byte{},
|
||||
Shard: 0,
|
||||
CrosslinkDataRootHash32: params.BeaconConfig().ZeroHash[:],
|
||||
},
|
||||
AggregationBitfield: participationBitfield,
|
||||
InclusionSlot: params.BeaconConfig().GenesisSlot + 5,
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
voted []uint64
|
||||
}{
|
||||
{[]uint64{}},
|
||||
{[]uint64{251, 192}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
validatorBalances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxDepositAmount
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 5,
|
||||
ValidatorRegistry: validators,
|
||||
ValidatorBalances: validatorBalances,
|
||||
LatestAttestations: attestations,
|
||||
PreviousJustifiedRoot: []byte{},
|
||||
LatestCrosslinks: []*pb.Crosslink{
|
||||
{
|
||||
CrosslinkDataRootHash32: params.BeaconConfig().ZeroHash[:],
|
||||
Epoch: params.BeaconConfig().GenesisEpoch,
|
||||
},
|
||||
},
|
||||
}
|
||||
block := &pb.BeaconBlock{
|
||||
Body: &pb.BeaconBlockBody{
|
||||
Attestations: []*pb.Attestation{
|
||||
{
|
||||
Data: attestations[0].Data,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err := blocks.ProcessBlockAttestations(state, block, false /* verify sig */); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
inclusionMap := make(map[uint64]uint64)
|
||||
for _, voted := range tt.voted {
|
||||
inclusionMap[voted] = state.Slot
|
||||
}
|
||||
state, err := InclusionDistance(
|
||||
state,
|
||||
tt.voted,
|
||||
uint64(len(validatorBalances))*params.BeaconConfig().MaxDepositAmount,
|
||||
inclusionMap)
|
||||
if err != nil {
|
||||
t.Fatalf("could not execute InclusionDistRewards:%v", err)
|
||||
}
|
||||
|
||||
for _, i := range tt.voted {
|
||||
validatorBalances[i] = 32000055555
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(state.ValidatorBalances, validatorBalances) {
|
||||
t.Errorf("InclusionDistRewards(%v) = %v, wanted: %v",
|
||||
tt.voted, state.ValidatorBalances, validatorBalances)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInclusionDistRewards_OutOfBounds(t *testing.T) {
|
||||
validators := make([]*pb.Validator, params.BeaconConfig().SlotsPerEpoch*2)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = &pb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
attestation := []*pb.PendingAttestation{
|
||||
{Data: &pb.AttestationData{Shard: 1, Slot: 0},
|
||||
AggregationBitfield: []byte{0xff}},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
voted []uint64
|
||||
balanceAfterInclusionRewards []uint64
|
||||
}{
|
||||
{[]uint64{0, 1, 2, 3}, []uint64{}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: validators,
|
||||
LatestAttestations: attestation,
|
||||
}
|
||||
inclusionMap := make(map[uint64]uint64)
|
||||
_, err := InclusionDistance(state, tt.voted, 0, inclusionMap)
|
||||
if err == nil {
|
||||
t.Fatal("InclusionDistRewards should have failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInactivityFFGSrcPenalty_AccuratePenalties(t *testing.T) {
|
||||
tests := []struct {
|
||||
voted []uint64
|
||||
balanceAfterFFGSrcPenalty []uint64
|
||||
epochsSinceFinality uint64
|
||||
}{
|
||||
// The higher the epochs since finality, the more penalties applied.
|
||||
{[]uint64{0, 1}, []uint64{32000000000, 32000000000, 31999422782, 31999422782}, 5},
|
||||
{[]uint64{}, []uint64{31999422782, 31999422782, 31999422782, 31999422782}, 5},
|
||||
{[]uint64{}, []uint64{31999418014, 31999418014, 31999418014, 31999418014}, 10},
|
||||
{[]uint64{}, []uint64{31999408477, 31999408477, 31999408477, 31999408477}, 20},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
validatorBalances := make([]uint64, 4)
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxDepositAmount
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: []*pb.Validator{
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
},
|
||||
ValidatorBalances: validatorBalances,
|
||||
}
|
||||
state = InactivityFFGSource(
|
||||
state,
|
||||
tt.voted,
|
||||
uint64(len(validatorBalances))*params.BeaconConfig().MaxDepositAmount,
|
||||
tt.epochsSinceFinality)
|
||||
|
||||
if !reflect.DeepEqual(state.ValidatorBalances, tt.balanceAfterFFGSrcPenalty) {
|
||||
t.Errorf("InactivityFFGSrcPenalty(%v) = %v, wanted: %v",
|
||||
tt.voted, state.ValidatorBalances, tt.balanceAfterFFGSrcPenalty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInactivityFFGTargetPenalty_AccuratePenalties(t *testing.T) {
|
||||
tests := []struct {
|
||||
voted []uint64
|
||||
balanceAfterFFGTargetPenalty []uint64
|
||||
epochsSinceFinality uint64
|
||||
}{
|
||||
// The higher the epochs since finality, the more penalties applied.
|
||||
{[]uint64{0, 1}, []uint64{32000000000, 32000000000, 31999422782, 31999422782}, 5},
|
||||
{[]uint64{}, []uint64{31999422782, 31999422782, 31999422782, 31999422782}, 5},
|
||||
{[]uint64{}, []uint64{31999418014, 31999418014, 31999418014, 31999418014}, 10},
|
||||
{[]uint64{}, []uint64{31999408477, 31999408477, 31999408477, 31999408477}, 20},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
validatorBalances := make([]uint64, 4)
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxDepositAmount
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: []*pb.Validator{
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
},
|
||||
ValidatorBalances: validatorBalances,
|
||||
}
|
||||
state = InactivityFFGTarget(
|
||||
state,
|
||||
tt.voted,
|
||||
uint64(len(validatorBalances))*params.BeaconConfig().MaxDepositAmount,
|
||||
tt.epochsSinceFinality)
|
||||
|
||||
if !reflect.DeepEqual(state.ValidatorBalances, tt.balanceAfterFFGTargetPenalty) {
|
||||
t.Errorf("InactivityFFGTargetPenalty(%v) = %v, wanted: %v",
|
||||
tt.voted, state.ValidatorBalances, tt.balanceAfterFFGTargetPenalty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInactivityHeadPenalty_AccuratePenalties(t *testing.T) {
|
||||
tests := []struct {
|
||||
voted []uint64
|
||||
balanceAfterInactivityHeadPenalty []uint64
|
||||
}{
|
||||
{[]uint64{}, []uint64{31999427550, 31999427550, 31999427550, 31999427550}},
|
||||
{[]uint64{0, 1}, []uint64{32000000000, 32000000000, 31999427550, 31999427550}},
|
||||
{[]uint64{0, 1, 2, 3}, []uint64{32000000000, 32000000000, 32000000000, 32000000000}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
validatorBalances := make([]uint64, 4)
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxDepositAmount
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: []*pb.Validator{
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
},
|
||||
ValidatorBalances: validatorBalances,
|
||||
}
|
||||
state = InactivityChainHead(
|
||||
state,
|
||||
tt.voted,
|
||||
uint64(len(validatorBalances))*params.BeaconConfig().MaxDepositAmount)
|
||||
|
||||
if !reflect.DeepEqual(state.ValidatorBalances, tt.balanceAfterInactivityHeadPenalty) {
|
||||
t.Errorf("InactivityHeadPenalty(%v) = %v, wanted: %v",
|
||||
tt.voted, state.ValidatorBalances, tt.balanceAfterInactivityHeadPenalty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInactivityExitedPenality_AccuratePenalties(t *testing.T) {
|
||||
tests := []struct {
|
||||
balanceAfterExitedPenalty []uint64
|
||||
epochsSinceFinality uint64
|
||||
}{
|
||||
{[]uint64{31998273114, 31998273114, 31998273114, 31998273114}, 5},
|
||||
{[]uint64{31998263578, 31998263578, 31998263578, 31998263578}, 10},
|
||||
{[]uint64{31997328976, 31997328976, 31997328976, 31997328976}, 500},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
validatorBalances := make([]uint64, 4)
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxDepositAmount
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: []*pb.Validator{
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch}},
|
||||
ValidatorBalances: validatorBalances,
|
||||
}
|
||||
state = InactivityExitedPenalties(
|
||||
state,
|
||||
uint64(len(validatorBalances))*params.BeaconConfig().MaxDepositAmount,
|
||||
tt.epochsSinceFinality,
|
||||
)
|
||||
|
||||
if !reflect.DeepEqual(state.ValidatorBalances, tt.balanceAfterExitedPenalty) {
|
||||
t.Errorf("InactivityExitedPenalty(epochSinceFinality=%v) = %v, wanted: %v",
|
||||
tt.epochsSinceFinality, state.ValidatorBalances, tt.balanceAfterExitedPenalty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInactivityInclusionPenalty_AccuratePenalties(t *testing.T) {
|
||||
validators := make([]*pb.Validator, params.BeaconConfig().DepositsForChainStart)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = &pb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
var participationBitfield []byte
|
||||
// participation byte length = number of validators / target committee size / bits in a byte.
|
||||
byteLength := int(params.BeaconConfig().DepositsForChainStart / params.BeaconConfig().TargetCommitteeSize / 8)
|
||||
for i := 0; i < byteLength; i++ {
|
||||
participationBitfield = append(participationBitfield, byte(0xff))
|
||||
}
|
||||
attestation := []*pb.PendingAttestation{
|
||||
{Data: &pb.AttestationData{Slot: params.BeaconConfig().GenesisSlot},
|
||||
AggregationBitfield: participationBitfield,
|
||||
InclusionSlot: 5},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
voted []uint64
|
||||
}{
|
||||
{[]uint64{}},
|
||||
{[]uint64{251, 192}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
validatorBalances := make([]uint64, params.BeaconConfig().SlotsPerEpoch*4)
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxDepositAmount
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
ValidatorRegistry: validators,
|
||||
ValidatorBalances: validatorBalances,
|
||||
LatestAttestations: attestation,
|
||||
}
|
||||
inclusionMap := make(map[uint64]uint64)
|
||||
for _, voted := range tt.voted {
|
||||
inclusionMap[voted] = state.Slot + 1
|
||||
}
|
||||
state, err := InactivityInclusionDistance(
|
||||
state,
|
||||
tt.voted,
|
||||
uint64(len(validatorBalances))*params.BeaconConfig().MaxDepositAmount,
|
||||
inclusionMap)
|
||||
|
||||
for _, i := range tt.voted {
|
||||
validatorBalances[i] = 32000055555
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("could not execute InactivityInclusionPenalty:%v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(state.ValidatorBalances, validatorBalances) {
|
||||
t.Errorf("InactivityInclusionPenalty(%v) = %v, wanted: %v",
|
||||
tt.voted, state.ValidatorBalances, validatorBalances)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInactivityInclusionPenalty_OutOfBounds(t *testing.T) {
|
||||
validators := make([]*pb.Validator, params.BeaconConfig().SlotsPerEpoch*2)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = &pb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
attestation := []*pb.PendingAttestation{
|
||||
{Data: &pb.AttestationData{Shard: 1, Slot: 0},
|
||||
AggregationBitfield: []byte{0xff}},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
voted []uint64
|
||||
balanceAfterInclusionRewards []uint64
|
||||
}{
|
||||
{[]uint64{0, 1, 2, 3}, []uint64{}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: validators,
|
||||
LatestAttestations: attestation,
|
||||
}
|
||||
inclusionMap := make(map[uint64]uint64)
|
||||
_, err := InactivityInclusionDistance(state, tt.voted, 0, inclusionMap)
|
||||
if err == nil {
|
||||
t.Fatal("InclusionDistRewards should have failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestationInclusionRewards_AccurateRewards(t *testing.T) {
|
||||
validators := make([]*pb.Validator, params.BeaconConfig().DepositsForChainStart)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = &pb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
var participationBitfield []byte
|
||||
// participation byte length = number of validators / target committee size / bits in a byte.
|
||||
byteLength := int(params.BeaconConfig().DepositsForChainStart / params.BeaconConfig().TargetCommitteeSize / 8)
|
||||
for i := 0; i < byteLength; i++ {
|
||||
participationBitfield = append(participationBitfield, byte(0xff))
|
||||
}
|
||||
atts := []*pb.Attestation{
|
||||
{Data: &pb.AttestationData{
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
LatestCrosslink: &pb.Crosslink{},
|
||||
CrosslinkDataRootHash32: params.BeaconConfig().ZeroHash[:]}}}
|
||||
pendingAtts := []*pb.PendingAttestation{
|
||||
{Data: &pb.AttestationData{Slot: params.BeaconConfig().GenesisSlot},
|
||||
AggregationBitfield: participationBitfield,
|
||||
InclusionSlot: params.BeaconConfig().GenesisSlot},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
voted []uint64
|
||||
}{
|
||||
{[]uint64{}},
|
||||
{[]uint64{251}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
validatorBalances := make([]uint64, params.BeaconConfig().DepositsForChainStart)
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxDepositAmount
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
Slot: params.BeaconConfig().GenesisSlot + 10,
|
||||
ValidatorRegistry: validators,
|
||||
ValidatorBalances: validatorBalances,
|
||||
LatestAttestations: pendingAtts,
|
||||
LatestCrosslinks: []*pb.Crosslink{{}},
|
||||
}
|
||||
|
||||
_, err := blocks.ProcessBlockAttestations(state, &pb.BeaconBlock{
|
||||
Body: &pb.BeaconBlockBody{
|
||||
Attestations: atts,
|
||||
},
|
||||
}, false /* sig verification */)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
inclusionMap := make(map[uint64]uint64)
|
||||
for _, voted := range tt.voted {
|
||||
inclusionMap[voted] = state.Slot
|
||||
}
|
||||
|
||||
state, err = AttestationInclusion(
|
||||
state,
|
||||
uint64(len(validatorBalances))*params.BeaconConfig().MaxDepositAmount,
|
||||
tt.voted,
|
||||
inclusionMap)
|
||||
|
||||
for _, i := range tt.voted {
|
||||
validatorBalances[i] = 32000008680
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("could not execute InactivityInclusionPenalty:%v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(state.ValidatorBalances, validatorBalances) {
|
||||
t.Errorf("AttestationInclusionRewards(%v) = %v, wanted: %v",
|
||||
tt.voted, state.ValidatorBalances, validatorBalances)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestationInclusionRewards_NoInclusionSlot(t *testing.T) {
|
||||
validators := make([]*pb.Validator, params.BeaconConfig().SlotsPerEpoch*2)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = &pb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
voted []uint64
|
||||
balanceAfterAttestationInclusion []uint64
|
||||
}{
|
||||
{[]uint64{0, 1, 2, 3}, []uint64{32000000000, 32000000000, 32000000000, 32000000000}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
validatorBalances := make([]uint64, 128)
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxDepositAmount
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: validators,
|
||||
ValidatorBalances: validatorBalances,
|
||||
}
|
||||
inclusionMap := make(map[uint64]uint64)
|
||||
if _, err := AttestationInclusion(state, 0, tt.voted, inclusionMap); err == nil {
|
||||
t.Fatal("AttestationInclusionRewards should have failed with no inclusion slot")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttestationInclusionRewards_NoProposerIndex(t *testing.T) {
|
||||
validators := make([]*pb.Validator, params.BeaconConfig().SlotsPerEpoch*2)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = &pb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
attestation := []*pb.PendingAttestation{
|
||||
{Data: &pb.AttestationData{Shard: 1, Slot: 0},
|
||||
AggregationBitfield: []byte{0xff},
|
||||
InclusionSlot: 0},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
voted []uint64
|
||||
balanceAfterAttestationInclusion []uint64
|
||||
}{
|
||||
{[]uint64{0}, []uint64{32000071022, 32000000000, 32000000000, 32000000000}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
validatorBalances := make([]uint64, 4)
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxDepositAmount
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
Slot: 1000,
|
||||
ValidatorRegistry: validators,
|
||||
ValidatorBalances: validatorBalances,
|
||||
LatestAttestations: attestation,
|
||||
}
|
||||
inclusionMap := make(map[uint64]uint64)
|
||||
if _, err := AttestationInclusion(state, 0, tt.voted, inclusionMap); err == nil {
|
||||
t.Fatal("AttestationInclusionRewards should have failed with no proposer index")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCrosslinksRewardsPenalties_AccurateBalances(t *testing.T) {
|
||||
validators := make([]*pb.Validator, params.BeaconConfig().SlotsPerEpoch*4)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = &pb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
voted []byte
|
||||
balanceAfterCrosslinkRewards []uint64
|
||||
}{
|
||||
{[]byte{0x0}, []uint64{
|
||||
32 * 1e9, 32 * 1e9, 32 * 1e9, 32 * 1e9, 32 * 1e9, 32 * 1e9, 32 * 1e9, 32 * 1e9}},
|
||||
{[]byte{0xF}, []uint64{
|
||||
31585730498, 31585730498, 31585730498, 31585730498,
|
||||
32416931985, 32416931985, 32416931985, 32416931985}},
|
||||
{[]byte{0xFF}, []uint64{
|
||||
32829149760, 32829149760, 32829149760, 32829149760,
|
||||
32829149760, 32829149760, 32829149760, 32829149760}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
validatorBalances := make([]uint64, params.BeaconConfig().SlotsPerEpoch*4)
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxDepositAmount
|
||||
}
|
||||
attestation := []*pb.PendingAttestation{
|
||||
{Data: &pb.AttestationData{Shard: 1, Slot: 0},
|
||||
AggregationBitfield: tt.voted,
|
||||
InclusionSlot: 0},
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
ValidatorRegistry: validators,
|
||||
ValidatorBalances: validatorBalances,
|
||||
LatestAttestations: attestation,
|
||||
}
|
||||
state, err := Crosslinks(
|
||||
state,
|
||||
attestation,
|
||||
nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not apply Crosslinks rewards: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(state.ValidatorBalances, validatorBalances) {
|
||||
t.Errorf("CrosslinksRewardsPenalties(%v) = %v, wanted: %v",
|
||||
tt.voted, state.ValidatorBalances, validatorBalances)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,54 +5,59 @@ go_library(
|
||||
srcs = [
|
||||
"block.go",
|
||||
"block_operations.go",
|
||||
"validity_conditions.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//shared/testutil:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state/stateutils:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/utils:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/forkutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"block_operations_fuzz_test.go",
|
||||
"block_operations_test.go",
|
||||
"block_test.go",
|
||||
"validity_conditions_test.go",
|
||||
"eth1_data_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/state/stateutils:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/forkutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,77 +4,20 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/utils"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
var clock utils.Clock = &utils.RealClock{}
|
||||
|
||||
// NewGenesisBlock returns the canonical, genesis block for the beacon chain protocol.
|
||||
func NewGenesisBlock(stateRoot []byte) *pb.BeaconBlock {
|
||||
block := &pb.BeaconBlock{
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
ParentRootHash32: params.BeaconConfig().ZeroHash[:],
|
||||
StateRootHash32: stateRoot,
|
||||
RandaoReveal: params.BeaconConfig().ZeroHash[:],
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
Eth1Data: &pb.Eth1Data{
|
||||
DepositRootHash32: params.BeaconConfig().ZeroHash[:],
|
||||
BlockHash32: params.BeaconConfig().ZeroHash[:],
|
||||
},
|
||||
Body: &pb.BeaconBlockBody{
|
||||
ProposerSlashings: []*pb.ProposerSlashing{},
|
||||
AttesterSlashings: []*pb.AttesterSlashing{},
|
||||
Attestations: []*pb.Attestation{},
|
||||
Deposits: []*pb.Deposit{},
|
||||
VoluntaryExits: []*pb.VoluntaryExit{},
|
||||
},
|
||||
func NewGenesisBlock(stateRoot []byte) *ethpb.SignedBeaconBlock {
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
genBlock := ðpb.BeaconBlock{
|
||||
ParentRoot: zeroHash,
|
||||
StateRoot: stateRoot,
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
// BlockRoot returns the block root stored in the BeaconState for a given slot.
|
||||
// It returns an error if the requested block root is not within the BeaconState.
|
||||
// Spec pseudocode definition:
|
||||
// def get_block_root(state: BeaconState, slot: int) -> Hash32:
|
||||
// """
|
||||
// returns the block root at a recent ``slot``.
|
||||
// """
|
||||
// assert state.slot <= slot + LATEST_BLOCK_ROOTS_LENGTH
|
||||
// assert slot < state.slot
|
||||
// return state.latest_block_roots[slot % LATEST_BLOCK_ROOTS_LENGTH]
|
||||
func BlockRoot(state *pb.BeaconState, slot uint64) ([]byte, error) {
|
||||
earliestSlot := state.Slot - params.BeaconConfig().LatestBlockRootsLength
|
||||
|
||||
if slot < earliestSlot || slot >= state.Slot {
|
||||
if earliestSlot < params.BeaconConfig().GenesisSlot {
|
||||
earliestSlot = params.BeaconConfig().GenesisSlot
|
||||
}
|
||||
return []byte{}, fmt.Errorf("slot %d is not within expected range of %d to %d",
|
||||
slot-params.BeaconConfig().GenesisSlot,
|
||||
earliestSlot-params.BeaconConfig().GenesisSlot,
|
||||
state.Slot-params.BeaconConfig().GenesisSlot,
|
||||
)
|
||||
return ðpb.SignedBeaconBlock{
|
||||
Block: genBlock,
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
}
|
||||
|
||||
return state.LatestBlockRootHash32S[slot%params.BeaconConfig().LatestBlockRootsLength], nil
|
||||
}
|
||||
|
||||
// ProcessBlockRoots processes the previous block root into the state, by appending it
|
||||
// to the most recent block roots.
|
||||
// Spec:
|
||||
// Let previous_block_root be the tree_hash_root of the previous beacon block processed in the chain.
|
||||
// Set state.latest_block_roots[(state.slot - 1) % LATEST_BLOCK_ROOTS_LENGTH] = previous_block_root.
|
||||
// If state.slot % LATEST_BLOCK_ROOTS_LENGTH == 0 append merkle_root(state.latest_block_roots) to state.batched_block_roots.
|
||||
func ProcessBlockRoots(state *pb.BeaconState, parentRoot [32]byte) *pb.BeaconState {
|
||||
state.LatestBlockRootHash32S[(state.Slot-1)%params.BeaconConfig().LatestBlockRootsLength] = parentRoot[:]
|
||||
if state.Slot%params.BeaconConfig().LatestBlockRootsLength == 0 {
|
||||
merkleRoot := hashutil.MerkleRoot(state.LatestBlockRootHash32S)
|
||||
state.BatchedBlockRootHash32S = append(state.BatchedBlockRootHash32S, merkleRoot)
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
36
beacon-chain/core/blocks/block_operations_fuzz_test.go
Normal file
36
beacon-chain/core/blocks/block_operations_fuzz_test.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
)
|
||||
|
||||
func TestFuzzProcessAttestation_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ctx := context.Background()
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
att := ð.Attestation{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(att)
|
||||
_, _ = blocks.ProcessAttestationNoVerify(ctx, state, att)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
block := ð.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
_, _ = blocks.ProcessBlockHeader(state, block)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,171 +1,21 @@
|
||||
package blocks
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
)
|
||||
|
||||
func TestGenesisBlock_InitializedCorrectly(t *testing.T) {
|
||||
stateHash := []byte{0}
|
||||
b1 := NewGenesisBlock(stateHash)
|
||||
b1 := blocks.NewGenesisBlock(stateHash)
|
||||
|
||||
if b1.ParentRootHash32 == nil {
|
||||
if b1.Block.ParentRoot == nil {
|
||||
t.Error("genesis block missing ParentHash field")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(b1.Body.Attestations, []*pb.Attestation{}) {
|
||||
t.Errorf("genesis block should have 0 attestations")
|
||||
}
|
||||
|
||||
if !bytes.Equal(b1.RandaoReveal, params.BeaconConfig().ZeroHash[:]) {
|
||||
t.Error("genesis block missing RandaoReveal field")
|
||||
}
|
||||
|
||||
if !bytes.Equal(b1.StateRootHash32, stateHash) {
|
||||
if !bytes.Equal(b1.Block.StateRoot, stateHash) {
|
||||
t.Error("genesis block StateRootHash32 isn't initialized correctly")
|
||||
}
|
||||
expectedEth1 := &pb.Eth1Data{
|
||||
DepositRootHash32: params.BeaconConfig().ZeroHash[:],
|
||||
BlockHash32: params.BeaconConfig().ZeroHash[:],
|
||||
}
|
||||
if !proto.Equal(b1.Eth1Data, expectedEth1) {
|
||||
t.Error("genesis block Eth1Data isn't initialized correctly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockRootAtSlot_AccurateBlockRoot(t *testing.T) {
|
||||
if params.BeaconConfig().SlotsPerEpoch != 64 {
|
||||
t.Errorf("slotsPerEpoch should be 64 for these tests to pass")
|
||||
}
|
||||
var blockRoots [][]byte
|
||||
|
||||
for i := uint64(0); i < params.BeaconConfig().LatestBlockRootsLength; i++ {
|
||||
blockRoots = append(blockRoots, []byte{byte(i)})
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
LatestBlockRootHash32S: blockRoots,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
slot uint64
|
||||
stateSlot uint64
|
||||
expectedRoot []byte
|
||||
}{
|
||||
{
|
||||
slot: 0,
|
||||
stateSlot: 1,
|
||||
expectedRoot: []byte{0},
|
||||
},
|
||||
{
|
||||
slot: 2,
|
||||
stateSlot: 5,
|
||||
expectedRoot: []byte{2},
|
||||
},
|
||||
{
|
||||
slot: 64,
|
||||
stateSlot: 128,
|
||||
expectedRoot: []byte{64},
|
||||
}, {
|
||||
slot: 2999,
|
||||
stateSlot: 3000,
|
||||
expectedRoot: []byte{183},
|
||||
}, {
|
||||
slot: 2873,
|
||||
stateSlot: 3000,
|
||||
expectedRoot: []byte{57},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
state.Slot = tt.stateSlot + params.BeaconConfig().GenesisSlot
|
||||
wantedSlot := tt.slot + params.BeaconConfig().GenesisSlot
|
||||
result, err := BlockRoot(state, wantedSlot)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get block root at slot %d: %v", wantedSlot, err)
|
||||
}
|
||||
if !bytes.Equal(result, tt.expectedRoot) {
|
||||
t.Errorf(
|
||||
"result block root was an unexpected value. Wanted %d, got %d",
|
||||
tt.expectedRoot,
|
||||
result,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockRootAtSlot_OutOfBounds(t *testing.T) {
|
||||
if params.BeaconConfig().SlotsPerEpoch != 64 {
|
||||
t.Errorf("slotsPerEpoch should be 64 for these tests to pass")
|
||||
}
|
||||
|
||||
var blockRoots [][]byte
|
||||
|
||||
for i := uint64(0); i < params.BeaconConfig().LatestBlockRootsLength; i++ {
|
||||
blockRoots = append(blockRoots, []byte{byte(i)})
|
||||
}
|
||||
state := &pb.BeaconState{
|
||||
LatestBlockRootHash32S: blockRoots,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
slot uint64
|
||||
stateSlot uint64
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
slot: params.BeaconConfig().GenesisSlot + 1000,
|
||||
stateSlot: params.BeaconConfig().GenesisSlot + 500,
|
||||
expectedErr: fmt.Sprintf("slot %d is not within expected range of %d to %d",
|
||||
1000,
|
||||
0,
|
||||
500),
|
||||
},
|
||||
{
|
||||
slot: params.BeaconConfig().GenesisSlot + 129,
|
||||
stateSlot: params.BeaconConfig().GenesisSlot + 400,
|
||||
expectedErr: "slot 129 is not within expected range of 272 to 399",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
state.Slot = tt.stateSlot
|
||||
_, err := BlockRoot(state, tt.slot)
|
||||
if err != nil && err.Error() != tt.expectedErr {
|
||||
t.Errorf("Expected error \"%s\" got \"%v\"", tt.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessBlockRoots_AccurateMerkleTree(t *testing.T) {
|
||||
state := &pb.BeaconState{}
|
||||
|
||||
state.LatestBlockRootHash32S = make([][]byte, params.BeaconConfig().LatestBlockRootsLength)
|
||||
state.Slot = params.BeaconConfig().LatestBlockRootsLength + 1
|
||||
|
||||
testRoot := [32]byte{'a'}
|
||||
|
||||
newState := ProcessBlockRoots(state, testRoot)
|
||||
if !bytes.Equal(newState.LatestBlockRootHash32S[0], testRoot[:]) {
|
||||
t.Fatalf("Latest Block root hash not saved."+
|
||||
" Supposed to get %#x , but got %#x", testRoot, newState.LatestBlockRootHash32S[0])
|
||||
}
|
||||
|
||||
newState.Slot = newState.Slot - 1
|
||||
|
||||
newState = ProcessBlockRoots(newState, testRoot)
|
||||
expectedHashes := make([][]byte, params.BeaconConfig().LatestBlockRootsLength)
|
||||
expectedHashes[0] = testRoot[:]
|
||||
expectedHashes[params.BeaconConfig().LatestBlockRootsLength-1] = testRoot[:]
|
||||
|
||||
expectedRoot := hashutil.MerkleRoot(expectedHashes)
|
||||
|
||||
if !bytes.Equal(newState.BatchedBlockRootHash32S[0], expectedRoot[:]) {
|
||||
t.Errorf("saved merkle root is not equal to expected merkle root"+
|
||||
"\n expected %#x but got %#x", expectedRoot, newState.BatchedBlockRootHash32S[0])
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user