mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
700 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4f8d9c59dd | ||
|
|
021d777b5e | ||
|
|
dc3fb018fe | ||
|
|
2ab4b86f9b | ||
|
|
b30a089548 | ||
|
|
271938202e | ||
|
|
6fe814c5aa | ||
|
|
a9f4d1d02d | ||
|
|
7c110e54f0 | ||
|
|
3043d4722f | ||
|
|
c96c8b4aa3 | ||
|
|
9f46000dba | ||
|
|
5450b3155e | ||
|
|
1bb12c3568 | ||
|
|
1be8b3aa5e | ||
|
|
431762164e | ||
|
|
3ec2a0f9e0 | ||
|
|
e96b45b29c | ||
|
|
e529f5b1d6 | ||
|
|
f18bada8c9 | ||
|
|
5657535c52 | ||
|
|
9da9fbdfba | ||
|
|
de2ec8e575 | ||
|
|
3660732f44 | ||
|
|
8e6c16d416 | ||
|
|
8143cc36bc | ||
|
|
eeffa4fb30 | ||
|
|
1137403e4b | ||
|
|
f17818b1c0 | ||
|
|
691f0bba70 | ||
|
|
b024191887 | ||
|
|
1f87cb11fc | ||
|
|
a0b142a26c | ||
|
|
035eaffd9d | ||
|
|
c41244ad34 | ||
|
|
c20d9ccbb3 | ||
|
|
3380d14475 | ||
|
|
4f031d1988 | ||
|
|
02afb53ea4 | ||
|
|
0974c02a00 | ||
|
|
c6acf0a28c | ||
|
|
ed7ad4525e | ||
|
|
7fcc07fb45 | ||
|
|
f937713fe9 | ||
|
|
359e0abe1d | ||
|
|
0704ba685a | ||
|
|
0f95b797af | ||
|
|
43722e45f4 | ||
|
|
ff4ed413a3 | ||
|
|
f1a42eb589 | ||
|
|
a90ffaba49 | ||
|
|
663d919b6f | ||
|
|
7b30845c01 | ||
|
|
46eb228379 | ||
|
|
8d3fc1ad3e | ||
|
|
93195b762b | ||
|
|
f0abf0d7d5 | ||
|
|
9d27449212 | ||
|
|
edb6590764 | ||
|
|
e77cf724b8 | ||
|
|
b633dfe880 | ||
|
|
8334aac111 | ||
|
|
4c1e2ba196 | ||
|
|
25c13663d2 | ||
|
|
0c3af32274 | ||
|
|
01cb01a8f2 | ||
|
|
0c9e99e04a | ||
|
|
d4cd51f23e | ||
|
|
962fe8552d | ||
|
|
eddaea869b | ||
|
|
300d072456 | ||
|
|
ac1c92e241 | ||
|
|
2452c7403b | ||
|
|
b97e22107c | ||
|
|
98faf95943 | ||
|
|
af28862e94 | ||
|
|
b133eb6c4a | ||
|
|
345ec1bf8c | ||
|
|
d1fea430d6 | ||
|
|
054b15bc45 | ||
|
|
6a2955d43c | ||
|
|
0ecd83afbb | ||
|
|
069f2c5fb6 | ||
|
|
acb15a1f04 | ||
|
|
e2af70f692 | ||
|
|
15b5ec89b2 | ||
|
|
b4aaa610a1 | ||
|
|
6158a648cd | ||
|
|
e2a6f5a6ea | ||
|
|
aebc883a0d | ||
|
|
f3dc113dba | ||
|
|
5961aaf588 | ||
|
|
e635e5b205 | ||
|
|
66991f0efe | ||
|
|
e339b07ac7 | ||
|
|
139f51efaa | ||
|
|
a43a40c1c9 | ||
|
|
0bdd0dba67 | ||
|
|
239efe7410 | ||
|
|
e5da756c47 | ||
|
|
a612557fe7 | ||
|
|
26582cbf2e | ||
|
|
d68636bc7f | ||
|
|
699e7efc61 | ||
|
|
ba6b8c9321 | ||
|
|
cc5fc0af1a | ||
|
|
0093218e41 | ||
|
|
c09ae21ab0 | ||
|
|
4c43616647 | ||
|
|
59575bcac9 | ||
|
|
703ce63c12 | ||
|
|
69845cad77 | ||
|
|
a07e604eea | ||
|
|
044d72064f | ||
|
|
5cb51263b0 | ||
|
|
d9d4a9954e | ||
|
|
3989b65667 | ||
|
|
9fe2cdd5ca | ||
|
|
cb163d8910 | ||
|
|
cd6e06f01e | ||
|
|
af5cc31565 | ||
|
|
5a5cdc1b02 | ||
|
|
31b1e6a7a8 | ||
|
|
05a5bad476 | ||
|
|
2fef9d3e5e | ||
|
|
14b3181e67 | ||
|
|
e7b94123ce | ||
|
|
76aad0f444 | ||
|
|
2c1c41d1d6 | ||
|
|
921a44d9fd | ||
|
|
22bbed0059 | ||
|
|
b1231f3ddf | ||
|
|
c2b30cf801 | ||
|
|
b647ca5dd2 | ||
|
|
c0f1a1d674 | ||
|
|
855f5d2986 | ||
|
|
5f0ed8388e | ||
|
|
a951c4f6ab | ||
|
|
0470d37072 | ||
|
|
15b649d760 | ||
|
|
2e56a59473 | ||
|
|
6fe86a3b30 | ||
|
|
83945ca54b | ||
|
|
47bb927029 | ||
|
|
597b21c40a | ||
|
|
39aa791dcc | ||
|
|
90ed37a655 | ||
|
|
d143187b7e | ||
|
|
3735e6b8af | ||
|
|
deb76f1c15 | ||
|
|
6baffd4ccb | ||
|
|
731cc0bd44 | ||
|
|
641ad51dd4 | ||
|
|
8e55c81bd5 | ||
|
|
f737267e54 | ||
|
|
44856f9500 | ||
|
|
4389e9d3c9 | ||
|
|
655f57e3f2 | ||
|
|
c0d4cabdb7 | ||
|
|
0e37b4926a | ||
|
|
25308ef9fa | ||
|
|
40afef8b9e | ||
|
|
c7d0ced5d1 | ||
|
|
3d12322103 | ||
|
|
b4881e3cd5 | ||
|
|
d1eaa8e09e | ||
|
|
5db8c5ad0c | ||
|
|
d7db8b1f5d | ||
|
|
6b8ec26c56 | ||
|
|
9b2aa66667 | ||
|
|
b9c140c17d | ||
|
|
8885d715f2 | ||
|
|
0a2763b380 | ||
|
|
3fcb4e8a12 | ||
|
|
db68c8a57b | ||
|
|
7899dc115e | ||
|
|
456ac5f9a3 | ||
|
|
92a91476ef | ||
|
|
868c8f5dd4 | ||
|
|
38fed735b2 | ||
|
|
4a446329b2 | ||
|
|
6b40fa01ec | ||
|
|
214121b0ab | ||
|
|
b99779fe94 | ||
|
|
b263efefeb | ||
|
|
ecfd7bdfa1 | ||
|
|
549b0f66fa | ||
|
|
27ec40f269 | ||
|
|
bb60b2f523 | ||
|
|
4072eb711f | ||
|
|
2473680759 | ||
|
|
c44a30672e | ||
|
|
db21f98053 | ||
|
|
b7adf55336 | ||
|
|
f06dfd6108 | ||
|
|
bb4c8ba83e | ||
|
|
16fef1c658 | ||
|
|
090d9627fe | ||
|
|
c7698cda1c | ||
|
|
a11f1804a2 | ||
|
|
0882908d2c | ||
|
|
a7325315a8 | ||
|
|
c3785e03ba | ||
|
|
297247d915 | ||
|
|
601f93a0a1 | ||
|
|
8c90e38770 | ||
|
|
661e48f549 | ||
|
|
b3f2a330dc | ||
|
|
5c14cd64c5 | ||
|
|
56fcca69d7 | ||
|
|
02b6d7706f | ||
|
|
0ed8246e28 | ||
|
|
dfe52e1752 | ||
|
|
52524d5acc | ||
|
|
4598344918 | ||
|
|
1a5c5153be | ||
|
|
6c00f5fff7 | ||
|
|
4f654d30ac | ||
|
|
18fbdd53b9 | ||
|
|
5be4fee810 | ||
|
|
8a02003d4b | ||
|
|
bdcd06a708 | ||
|
|
7e0d0502aa | ||
|
|
f14ff34797 | ||
|
|
16a0c9f463 | ||
|
|
70cb06d50f | ||
|
|
0725e2dba7 | ||
|
|
031b51e294 | ||
|
|
015c8c4cd2 | ||
|
|
efd27c7b2b | ||
|
|
f16a71f178 | ||
|
|
669e1ea787 | ||
|
|
7ba2c897ad | ||
|
|
34178aff2a | ||
|
|
4df74a3b9d | ||
|
|
f6dfaef537 | ||
|
|
a9d144ad1f | ||
|
|
9cf30002d4 | ||
|
|
e2faa391c3 | ||
|
|
5b83dffbe4 | ||
|
|
b8383da468 | ||
|
|
c7fb28d42e | ||
|
|
3a9c8eb8b1 | ||
|
|
a9f1de354b | ||
|
|
69c3d9dec2 | ||
|
|
b99ae2cbe4 | ||
|
|
bfa103317e | ||
|
|
dc1432f8d8 | ||
|
|
85b379c08c | ||
|
|
91b8760632 | ||
|
|
27e7be6279 | ||
|
|
ebd4541dcd | ||
|
|
32b5b8fa69 | ||
|
|
c6e3d67241 | ||
|
|
cb33deab36 | ||
|
|
113ac460c0 | ||
|
|
c496170c33 | ||
|
|
945edb6c8f | ||
|
|
24a5a9c112 | ||
|
|
0180051b5e | ||
|
|
ce79d8e295 | ||
|
|
9579a5520b | ||
|
|
9958afe79d | ||
|
|
8ad174ffd8 | ||
|
|
68b6a7c172 | ||
|
|
8c5c7352b1 | ||
|
|
b705ab0239 | ||
|
|
923d5fc903 | ||
|
|
9c1a294bf7 | ||
|
|
061960c9e2 | ||
|
|
80248cd296 | ||
|
|
95b6cca399 | ||
|
|
1478882b41 | ||
|
|
7c4950832c | ||
|
|
ce0b55d13e | ||
|
|
e63119b254 | ||
|
|
8492273fa7 | ||
|
|
5b4025efcd | ||
|
|
c69385e71d | ||
|
|
cdfa969ced | ||
|
|
a1dc4ddc40 | ||
|
|
648584b356 | ||
|
|
fb7a75d2c3 | ||
|
|
00a6361c66 | ||
|
|
6213c94a14 | ||
|
|
397b7d807a | ||
|
|
05876d6250 | ||
|
|
bd334c4192 | ||
|
|
4f38333e54 | ||
|
|
d6bd389d5c | ||
|
|
962be9b4f8 | ||
|
|
f77049ae74 | ||
|
|
f432f7851e | ||
|
|
b7e6012628 | ||
|
|
79434fc2d1 | ||
|
|
069ec1726b | ||
|
|
2a79c572a5 | ||
|
|
c2fbb40909 | ||
|
|
d32493d43b | ||
|
|
0b2b77c5b0 | ||
|
|
d8c26590ca | ||
|
|
cc741ed8af | ||
|
|
f97ac5f0d7 | ||
|
|
85a38e6053 | ||
|
|
7f07ad831e | ||
|
|
ad7d9ab1da | ||
|
|
e452b950d0 | ||
|
|
2e2cec3a61 | ||
|
|
c80ffc640f | ||
|
|
f6b4637a91 | ||
|
|
3e9bf58d81 | ||
|
|
a22c97739e | ||
|
|
ade61717a4 | ||
|
|
9149c2e4f4 | ||
|
|
07ba594023 | ||
|
|
ad01bfbcde | ||
|
|
439a84fcb9 | ||
|
|
e2be2a21d0 | ||
|
|
eaf7ae3774 | ||
|
|
1fa301c79c | ||
|
|
1c759f6404 | ||
|
|
4960acb285 | ||
|
|
127f05d531 | ||
|
|
2f02a2baa3 | ||
|
|
d4bea51482 | ||
|
|
4ea5661f8f | ||
|
|
5eece9a507 | ||
|
|
417480ffa8 | ||
|
|
dd5a3fe80d | ||
|
|
fa2acb3632 | ||
|
|
1562d3252b | ||
|
|
10341cbf7f | ||
|
|
0f730b5887 | ||
|
|
b313b46f79 | ||
|
|
a78defcd26 | ||
|
|
86f6a44da6 | ||
|
|
d978c19a41 | ||
|
|
588773cd0c | ||
|
|
62a5931843 | ||
|
|
6d6e8be10a | ||
|
|
144dcc3a69 | ||
|
|
0f27343364 | ||
|
|
3388ab74cf | ||
|
|
663557e44e | ||
|
|
5df77848bb | ||
|
|
ed3ab828a1 | ||
|
|
ee9b9e69dc | ||
|
|
460250251d | ||
|
|
4aa7ebc2b7 | ||
|
|
a1e3c2d47c | ||
|
|
cc58b5aca6 | ||
|
|
9a395530b7 | ||
|
|
5cc6de9e67 | ||
|
|
c041403a50 | ||
|
|
8d889f169e | ||
|
|
b030771174 | ||
|
|
abe679e90e | ||
|
|
bfda29f2ad | ||
|
|
e96c2f4949 | ||
|
|
a52f9d4549 | ||
|
|
29a7a587cf | ||
|
|
27254ad362 | ||
|
|
eb5e814eb4 | ||
|
|
0a8dbaaabc | ||
|
|
e65d98925b | ||
|
|
781b7d6870 | ||
|
|
dc4c1ca2b7 | ||
|
|
d72e18ba60 | ||
|
|
a4db560e55 | ||
|
|
e7ecd9329a | ||
|
|
3e7e447160 | ||
|
|
1b62e92159 | ||
|
|
aae27749d4 | ||
|
|
2ba81193b0 | ||
|
|
68c1ca755d | ||
|
|
ccfc650375 | ||
|
|
3d3dccbdb4 | ||
|
|
d04399ea96 | ||
|
|
0605118686 | ||
|
|
dab87ba252 | ||
|
|
eb429ab719 | ||
|
|
ed529965af | ||
|
|
c6343cac3a | ||
|
|
06bc80d314 | ||
|
|
60cab2dc73 | ||
|
|
63d692a833 | ||
|
|
d744aaa2cd | ||
|
|
91d5ffae5b | ||
|
|
cb49544fe3 | ||
|
|
11731c4afe | ||
|
|
5349b00e19 | ||
|
|
2e5429c94e | ||
|
|
0a632064d4 | ||
|
|
129bc763ee | ||
|
|
452cadc286 | ||
|
|
2a4c89827d | ||
|
|
5ab1efb537 | ||
|
|
d0793f00c5 | ||
|
|
d8d9f4482f | ||
|
|
4835ba7bdf | ||
|
|
6ef1a712c2 | ||
|
|
0bee1de486 | ||
|
|
d2d4e7e35d | ||
|
|
6e0248429f | ||
|
|
884d2a159d | ||
|
|
415af93ad8 | ||
|
|
0b35743d2c | ||
|
|
62811e8f7c | ||
|
|
d4ae063ad7 | ||
|
|
3d24a85121 | ||
|
|
888e8925ee | ||
|
|
18333293d0 | ||
|
|
e286069b20 | ||
|
|
de2f1fbf5c | ||
|
|
44fa2c6371 | ||
|
|
a8edfa42cc | ||
|
|
7edca61e44 | ||
|
|
1cb0edac00 | ||
|
|
88bce4af34 | ||
|
|
5287ddc114 | ||
|
|
22e01aa9f2 | ||
|
|
a79dab7919 | ||
|
|
9a4bf6c1a2 | ||
|
|
7992375a0e | ||
|
|
6c4bf22723 | ||
|
|
ea12ffabba | ||
|
|
5077c009c3 | ||
|
|
7f1900e96c | ||
|
|
3c5d5bfc7b | ||
|
|
4e6c8c5b1a | ||
|
|
7919074a6a | ||
|
|
f6eea8e1fa | ||
|
|
45e6eccfb4 | ||
|
|
b6c6b9b776 | ||
|
|
1a9b0da9ae | ||
|
|
025be93492 | ||
|
|
22a3bf53ad | ||
|
|
37459ee765 | ||
|
|
e9d63e8dd3 | ||
|
|
01b8a84e21 | ||
|
|
9d8364bdfa | ||
|
|
2b6a5aaaf9 | ||
|
|
6aa92956f6 | ||
|
|
eae2268dd1 | ||
|
|
6de485c27e | ||
|
|
3839f577ec | ||
|
|
fc38a0413e | ||
|
|
0dd0e23155 | ||
|
|
699e1c8a23 | ||
|
|
39b2570af5 | ||
|
|
200bb5e42a | ||
|
|
d249f78d79 | ||
|
|
e110f038bc | ||
|
|
5ee79dc4a8 | ||
|
|
4ab0a91e51 | ||
|
|
a69cb5c6e4 | ||
|
|
624c42421d | ||
|
|
f3ae67a94b | ||
|
|
b30a7d1e19 | ||
|
|
0d400faea2 | ||
|
|
67c380b197 | ||
|
|
89eedd2123 | ||
|
|
2182e1cdc9 | ||
|
|
6d2a2ebadf | ||
|
|
9aed0034ec | ||
|
|
f764522cbe | ||
|
|
c7ae03e1b2 | ||
|
|
4efc0f5286 | ||
|
|
9052620453 | ||
|
|
0174397f6e | ||
|
|
9f5caf8fea | ||
|
|
3b3f2c78e2 | ||
|
|
242e4bccbf | ||
|
|
59ab89c98a | ||
|
|
ac768207ac | ||
|
|
77d41024dc | ||
|
|
f03083f6c8 | ||
|
|
5ff9ae2108 | ||
|
|
5fa03edb29 | ||
|
|
ebe4c9c971 | ||
|
|
6efe5ef496 | ||
|
|
fbbf5514d1 | ||
|
|
220af25bce | ||
|
|
c9252c06c4 | ||
|
|
2c565f5d59 | ||
|
|
1cb58e859e | ||
|
|
d26839c1f2 | ||
|
|
2cb8430ad4 | ||
|
|
03356fc7b5 | ||
|
|
bdc4045e23 | ||
|
|
dc1bd1ef62 | ||
|
|
35380dd9bf | ||
|
|
9674575892 | ||
|
|
b7d0d7cbb6 | ||
|
|
28eadac172 | ||
|
|
d5181496c4 | ||
|
|
b337a5720c | ||
|
|
53b8eb57ee | ||
|
|
30b4b045f5 | ||
|
|
ec1e7ae005 | ||
|
|
a949673e33 | ||
|
|
996f4c7f5a | ||
|
|
3915a6e15a | ||
|
|
961dd21554 | ||
|
|
2e4908e7c4 | ||
|
|
da637668a8 | ||
|
|
20168ad729 | ||
|
|
0b07a9f227 | ||
|
|
5dca662d01 | ||
|
|
8c28d1080c | ||
|
|
6a54a430e1 | ||
|
|
908d220eb2 | ||
|
|
ff1fd77425 | ||
|
|
e27bc8312f | ||
|
|
78968c1e29 | ||
|
|
fb431c11c1 | ||
|
|
30ed59e9c8 | ||
|
|
2e2d5199e8 | ||
|
|
4fe31cf1b3 | ||
|
|
e82e582cdf | ||
|
|
0b2d9d8576 | ||
|
|
65e3f3e007 | ||
|
|
2c28e4e7a3 | ||
|
|
642254daa6 | ||
|
|
c41140e15a | ||
|
|
23a6c20dd4 | ||
|
|
514f5f904f | ||
|
|
5844436716 | ||
|
|
5879b26b4b | ||
|
|
566efaef89 | ||
|
|
d9062a7e30 | ||
|
|
3f344aee55 | ||
|
|
fd93751bf7 | ||
|
|
325a2503f7 | ||
|
|
2179ac683e | ||
|
|
0f4dabfad8 | ||
|
|
8724dcd41b | ||
|
|
89e1200b73 | ||
|
|
0f677a09b6 | ||
|
|
c5dcf49ded | ||
|
|
a5881f924f | ||
|
|
d93ec64b21 | ||
|
|
a9a5973b98 | ||
|
|
570efe3d04 | ||
|
|
2e9c3895f4 | ||
|
|
9033f6801b | ||
|
|
c0b3767757 | ||
|
|
e72ff1bb4f | ||
|
|
0cb59bb018 | ||
|
|
6e549c90ba | ||
|
|
813233373e | ||
|
|
5757ce8894 | ||
|
|
7c11367cd8 | ||
|
|
6d2c37caf1 | ||
|
|
812311f6f7 | ||
|
|
22d81ef0ed | ||
|
|
414fcda9a2 | ||
|
|
bb2fc4cd5e | ||
|
|
5fd6a92052 | ||
|
|
7ccbe48f54 | ||
|
|
7a46cc0681 | ||
|
|
92d21c72b8 | ||
|
|
0cb681476e | ||
|
|
fa7b8ab60d | ||
|
|
bdb80271a3 | ||
|
|
1b8eb16fc7 | ||
|
|
1222ebb6db | ||
|
|
3e15e2fc1e | ||
|
|
667466020e | ||
|
|
f63ab1e136 | ||
|
|
6841d96f36 | ||
|
|
cae24068d4 | ||
|
|
dc0b8fad4f | ||
|
|
d3375d98a8 | ||
|
|
9d4c7cb4f7 | ||
|
|
ae2b2e74ca | ||
|
|
83179376d4 | ||
|
|
c36a852329 | ||
|
|
650a278fee | ||
|
|
6816337589 | ||
|
|
2950e4aeb4 | ||
|
|
746cc142d0 | ||
|
|
261428118e | ||
|
|
544e5309ad | ||
|
|
23dd951e59 | ||
|
|
498417a8fc | ||
|
|
617325b726 | ||
|
|
9e5cc81340 | ||
|
|
f75a5a5df8 | ||
|
|
ae8df9c32b | ||
|
|
90cbe49496 | ||
|
|
c31f46d973 | ||
|
|
83781d0b74 | ||
|
|
6488b0527c | ||
|
|
eeb8779cfc | ||
|
|
f40bbb92d1 | ||
|
|
4f0bef929f | ||
|
|
81a83cf100 | ||
|
|
8bbc589edd | ||
|
|
32245a9062 | ||
|
|
28c4f28d32 | ||
|
|
a2d4701f6e | ||
|
|
8e4022f8aa | ||
|
|
42e766e909 | ||
|
|
a686be8bd0 | ||
|
|
e3c3dea5d2 | ||
|
|
7754cfb6c6 | ||
|
|
f55a380ade | ||
|
|
9a317ffc0f | ||
|
|
3be4894b8a | ||
|
|
646411b881 | ||
|
|
0e99e4af4f | ||
|
|
e87337a97a | ||
|
|
53523b3eef | ||
|
|
5ec02b28a5 | ||
|
|
1620290305 | ||
|
|
fc171434c5 | ||
|
|
b08f3f760d | ||
|
|
7495961d6b | ||
|
|
4dbf68b50c | ||
|
|
e24b060eb6 | ||
|
|
e90358cd8e | ||
|
|
80865ff3f2 | ||
|
|
60469ec7ee | ||
|
|
67be8bd4f0 | ||
|
|
3682bf1cda | ||
|
|
e203f66fe0 | ||
|
|
04df922ac9 | ||
|
|
0326be86b5 | ||
|
|
a7ccd52a95 | ||
|
|
1ced4754db | ||
|
|
b872f74fd3 | ||
|
|
c1c48a8af5 | ||
|
|
b88e6dc918 | ||
|
|
3868837471 | ||
|
|
60b1596c4d | ||
|
|
4f0dcd5e6e | ||
|
|
ac405c714f | ||
|
|
7d0e5a9dc4 | ||
|
|
feb1267fee | ||
|
|
7a9c297206 | ||
|
|
21deed0fb7 | ||
|
|
627791c54e | ||
|
|
3358bde42d | ||
|
|
9e45cffabc | ||
|
|
2c8ff7b36f | ||
|
|
a7ec0679b5 | ||
|
|
f717c5d852 | ||
|
|
0cec0ee6c3 | ||
|
|
2f392544a6 | ||
|
|
75ce8359eb | ||
|
|
f5cb04012e | ||
|
|
f461d1e024 | ||
|
|
bdbd0aaeb8 | ||
|
|
715d06a215 | ||
|
|
976a3af637 | ||
|
|
8f8d2d36c0 | ||
|
|
a264a097cc | ||
|
|
4330839bc1 | ||
|
|
835418d1e3 | ||
|
|
ae07dc7962 | ||
|
|
d071a0a90a | ||
|
|
2d7802c637 | ||
|
|
fcb663acde | ||
|
|
858dbbf038 | ||
|
|
49c2dd2cfc | ||
|
|
7a22e98c0f | ||
|
|
26da7c4114 | ||
|
|
7acb45d186 | ||
|
|
24a5000e47 | ||
|
|
65d920e13a | ||
|
|
d27d18b192 | ||
|
|
0e88085661 | ||
|
|
3f6435ac80 | ||
|
|
64b69d9216 | ||
|
|
13207a9de5 | ||
|
|
ab756ec094 | ||
|
|
499f05f34b | ||
|
|
0077654fb5 | ||
|
|
f8cac0fb41 | ||
|
|
607f086de9 | ||
|
|
3b18aee181 | ||
|
|
f43a7c67f2 | ||
|
|
199ddc6cdb | ||
|
|
023dfebc73 | ||
|
|
53c4a26184 | ||
|
|
5acc362f7e | ||
|
|
68edad13bc | ||
|
|
bb2f329562 | ||
|
|
5169209360 | ||
|
|
c4ca8a47b3 | ||
|
|
904898e405 | ||
|
|
7f96fcc51b | ||
|
|
24583864b4 | ||
|
|
db9153e8e4 | ||
|
|
cd6e3e8a09 | ||
|
|
fc7c530696 | ||
|
|
8f05f14b36 | ||
|
|
3b8701296b | ||
|
|
48f69c0762 |
162
.bazelrc
162
.bazelrc
@@ -16,9 +16,169 @@ run --host_force_python=PY2
|
||||
--experimental_sandbox_default_allow_network=false
|
||||
|
||||
# Use minimal protobufs at runtime
|
||||
run --define ssz=minimal
|
||||
run --define ssz=mainnet
|
||||
test --define ssz=mainnet
|
||||
build --define ssz=mainnet
|
||||
|
||||
# Prevent PATH changes from rebuilding when switching from IDE to command line.
|
||||
build --incompatible_strict_action_env
|
||||
test --incompatible_strict_action_env
|
||||
run --incompatible_strict_action_env
|
||||
|
||||
# Disable kafka by default, it takes a long time to build...
|
||||
build --define kafka_enabled=false
|
||||
test --define kafka_enabled=false
|
||||
run --define kafka_enabled=false
|
||||
|
||||
# Release flags
|
||||
build:release --workspace_status_command=./scripts/workspace_status.sh
|
||||
build:release --stamp
|
||||
build:release --compilation_mode=opt
|
||||
|
||||
# LLVM compiler for building C/C++ dependencies.
|
||||
build:llvm --crosstool_top=@llvm_toolchain//:toolchain
|
||||
build:llvm --define compiler=llvm
|
||||
|
||||
# multi-arch cross-compiling toolchain configs:
|
||||
-----------------------------------------------
|
||||
build:cross --crosstool_top=@prysm_toolchains//:multiarch_toolchain
|
||||
build:cross --host_platform=@io_bazel_rules_go//go/toolchain:linux_amd64
|
||||
build:cross --host_crosstool_top=@prysm_toolchains//:hostonly_toolchain
|
||||
|
||||
# linux_amd64 config for cross compiler toolchain, not strictly necessary since host/exec env is amd64
|
||||
build:linux_amd64 --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64_cgo
|
||||
|
||||
# osx_amd64 config for cross compiler toolchain
|
||||
build:osx_amd64 --config=cross
|
||||
build:osx_amd64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64_cgo
|
||||
build:osx_amd64 --compiler=osxcross
|
||||
|
||||
# windows
|
||||
build:windows_amd64 --config=cross
|
||||
build:windows_amd64 --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64_cgo
|
||||
build:windows_amd64 --compiler=mingw-w64
|
||||
|
||||
# linux_arm64 conifg for cross compiler toolchain
|
||||
build:linux_arm64 --config=cross
|
||||
build:linux_arm64 --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo
|
||||
build:linux_arm64 --copt=-funsafe-math-optimizations
|
||||
build:linux_arm64 --copt=-ftree-vectorize
|
||||
build:linux_arm64 --copt=-fomit-frame-pointer
|
||||
build:linux_arm64 --cpu=aarch64
|
||||
build:linux_arm64 --compiler=clang
|
||||
build:linux_arm64 --copt=-march=armv8-a
|
||||
|
||||
|
||||
# toolchain build debug configs
|
||||
#------------------------------
|
||||
build:debug --sandbox_debug
|
||||
build:debug --toolchain_resolution_debug
|
||||
build:debug --verbose_failures
|
||||
build:debug -s
|
||||
|
||||
# windows debug
|
||||
build:windows_amd64_debug --config=windows_amd64
|
||||
build:windows_amd64_debug --config=debug
|
||||
|
||||
# osx_amd64 debug config
|
||||
build:osx_amd64_debug --config=debug
|
||||
build:osx_amd64_debug --config=osx_amd64
|
||||
|
||||
# linux_arm64_debug
|
||||
build:linux_arm64_debug --config=linux_arm64
|
||||
build:linux_arm64_debug --config=debug
|
||||
|
||||
# linux_amd64_debug
|
||||
build:linux_amd64_debug --config=linux_amd64
|
||||
build:linux_amd64_debug --config=debug
|
||||
|
||||
|
||||
# Docker Sandbox Configs
|
||||
#-----------------------
|
||||
# Note all docker sandbox configs must run from a linux x86_64 host
|
||||
# build:docker-sandbox --experimental_docker_image=gcr.io/prysmaticlabs/rbe-worker:latest
|
||||
build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker
|
||||
build:docker-sandbox --define=EXECUTOR=remote
|
||||
build:docker-sandbox --experimental_docker_verbose
|
||||
build:docker-sandbox --experimental_enable_docker_sandbox
|
||||
build:docker-sandbox --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
|
||||
build:docker-sandbox --host_javabase=@rbe_ubuntu_clang//java:jdk
|
||||
build:docker-sandbox --javabase=@rbe_ubuntu_clang//java:jdk
|
||||
build:docker-sandbox --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
||||
build:docker-sandbox --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
||||
build:docker-sandbox --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
|
||||
build:docker-sandbox --host_platform=@rbe_ubuntu_clang//config:platform
|
||||
build:docker-sandbox --platforms=@rbe_ubuntu_clang//config:platform
|
||||
build:docker-sandbox --extra_toolchains=@prysm_toolchains//:cc-toolchain-multiarch
|
||||
|
||||
# windows_amd64 docker sandbox build config
|
||||
build:windows_amd64_docker --config=docker-sandbox --config=windows_amd64
|
||||
build:windows_amd64_docker_debug --config=windows_amd64_docker --config=debug
|
||||
|
||||
# osx_amd64 docker sandbox build config
|
||||
build:osx_amd64_docker --config=docker-sandbox --config=osx_amd64
|
||||
build:osx_amd64_docker_debug --config=osx_amd64_docker --config=debug
|
||||
|
||||
# linux_arm64 docker sandbox build config
|
||||
build:linux_arm64_docker --config=docker-sandbox --config=linux_arm64
|
||||
build:linux_arm64_docker_debug --config=linux_arm64_docker --config=debug
|
||||
|
||||
# linux_amd64 docker sandbox build config
|
||||
build:linux_amd64_docker --config=docker-sandbox --config=linux_amd64
|
||||
build:linux_amd64_docker_debug --config=linux_amd64_docker --config=debug
|
||||
|
||||
|
||||
# Remote Build Execution
|
||||
#-----------------------
|
||||
# Originally from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/bazel-2.0.0.bazelrc
|
||||
#
|
||||
# Depending on how many machines are in the remote execution instance, setting
|
||||
# this higher can make builds faster by allowing more jobs to run in parallel.
|
||||
# Setting it too high can result in jobs that timeout, however, while waiting
|
||||
# for a remote machine to execute them.
|
||||
build:remote --jobs=50
|
||||
|
||||
# Set several flags related to specifying the platform, toolchain and java
|
||||
# properties.
|
||||
# These flags should only be used as is for the rbe-ubuntu16-04 container
|
||||
# and need to be adapted to work with other toolchain containers.
|
||||
build:remote --host_javabase=@rbe_ubuntu_clang//java:jdk
|
||||
build:remote --javabase=@rbe_ubuntu_clang//java:jdk
|
||||
build:remote --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
||||
build:remote --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
||||
build:remote --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
|
||||
build:remote --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
|
||||
# Platform flags:
|
||||
# The toolchain container used for execution is defined in the target indicated
|
||||
# by "extra_execution_platforms", "host_platform" and "platforms".
|
||||
# More about platforms: https://docs.bazel.build/versions/master/platforms.html
|
||||
build:remote --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain
|
||||
build:remote --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
|
||||
build:remote --host_platform=@rbe_ubuntu_clang//config:platform
|
||||
build:remote --platforms=@rbe_ubuntu_clang//config:platform
|
||||
|
||||
# Starting with Bazel 0.27.0 strategies do not need to be explicitly
|
||||
# defined. See https://github.com/bazelbuild/bazel/issues/7480
|
||||
build:remote --define=EXECUTOR=remote
|
||||
|
||||
# Enable remote execution so actions are performed on the remote systems.
|
||||
# build:remote --remote_executor=grpcs://remotebuildexecution.googleapis.com
|
||||
|
||||
# Enforce stricter environment rules, which eliminates some non-hermetic
|
||||
# behavior and therefore improves both the remote cache hit rate and the
|
||||
# correctness and repeatability of the build.
|
||||
build:remote --incompatible_strict_action_env=true
|
||||
|
||||
# Set a higher timeout value, just in case.
|
||||
build:remote --remote_timeout=3600
|
||||
|
||||
# Enable authentication. This will pick up application default credentials by
|
||||
# default. You can use --google_credentials=some_file.json to use a service
|
||||
# account credential instead.
|
||||
# build:remote --google_default_credentials=true
|
||||
|
||||
# Enable build without the bytes
|
||||
# See: https://github.com/bazelbuild/bazel/issues/6862
|
||||
build:remote --experimental_remote_download_outputs=toplevel --experimental_inmemory_jdeps_files --experimental_inmemory_dotd_files
|
||||
|
||||
build:remote --remote_local_fallback
|
||||
|
||||
1
.bazelversion
Normal file
1
.bazelversion
Normal file
@@ -0,0 +1 @@
|
||||
2.1.1
|
||||
@@ -11,11 +11,10 @@ build:remote-cache --strategy=Closure=standalone
|
||||
build:remote-cache --strategy=Genrule=standalone
|
||||
|
||||
# Build results backend.
|
||||
build:remote-cache --bes_results_url="https://source.cloud.google.com/results/invocations/"
|
||||
build:remote-cache --bes_backend=buildeventservice.googleapis.com
|
||||
build:remote-cache --bes_timeout=60s
|
||||
build:remote-cache --project_id=prysmaticlabs
|
||||
build:remote-cache --bes_upload_mode=fully_async
|
||||
#build:remote-cache --bes_results_url="https://source.cloud.google.com/results/invocations/"
|
||||
#build:remote-cache --bes_backend=buildeventservice.googleapis.com
|
||||
#build:remote-cache --bes_timeout=60s
|
||||
#build:remote-cache --project_id=prysmaticlabs
|
||||
|
||||
# Prysm specific remote-cache properties.
|
||||
build:remote-cache --disk_cache=
|
||||
@@ -46,3 +45,6 @@ build --stamp
|
||||
test --local_test_jobs=2
|
||||
# Disabled race detection due to unstable test results under constrained environment build kite
|
||||
# build --features=race
|
||||
|
||||
# Enable kafka for CI tests only.
|
||||
test --define kafka_enabled=true
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -4,6 +4,9 @@ bazel-*
|
||||
.DS_Store
|
||||
.swp
|
||||
|
||||
# Ignore VI/Vim swapfiles
|
||||
.*.sw?
|
||||
|
||||
# IntelliJ
|
||||
.idea
|
||||
.ijwb
|
||||
@@ -26,3 +29,6 @@ password.txt
|
||||
# go dependancy
|
||||
/go.mod
|
||||
/go.sum
|
||||
|
||||
# Dist files
|
||||
dist
|
||||
|
||||
18
BUILD.bazel
18
BUILD.bazel
@@ -31,27 +31,21 @@ alias(
|
||||
alias(
|
||||
name = "grpc_proto_compiler",
|
||||
actual = "@io_bazel_rules_go//proto:gogofast_grpc",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Protobuf gRPC compiler without gogoproto. Required for gRPC gateway.
|
||||
alias(
|
||||
name = "grpc_nogogo_proto_compiler",
|
||||
actual = "@io_bazel_rules_go//proto:go_grpc",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Protobuf gRPC gateway compiler
|
||||
alias(
|
||||
name = "grpc_gateway_proto_compiler",
|
||||
actual = "@grpc_ecosystem_grpc_gateway//protoc-gen-grpc-gateway:go_gen_grpc_gateway",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
gometalinter(
|
||||
@@ -143,3 +137,9 @@ common_files = {
|
||||
),
|
||||
tags = ["manual"],
|
||||
) for pair in binary_targets]
|
||||
|
||||
toolchain(
|
||||
name = "built_cmake_toolchain",
|
||||
toolchain = "@rules_foreign_cc//tools/build_defs/native_tools:built_cmake",
|
||||
toolchain_type = "@rules_foreign_cc//tools/build_defs:cmake_toolchain",
|
||||
)
|
||||
|
||||
21
INTEROP.md
21
INTEROP.md
@@ -45,10 +45,9 @@ Open up two terminal windows, run:
|
||||
|
||||
```
|
||||
bazel run //beacon-chain -- \
|
||||
--no-genesis-delay \
|
||||
--bootstrap-node= \
|
||||
--deposit-contract 0xD775140349E6A5D12524C6ccc3d6A1d4519D4029 \
|
||||
--clear-db \
|
||||
--deposit-contract $(curl -s https://prylabs.net/contract) \
|
||||
--force-clear-db \
|
||||
--interop-num-validators 64 \
|
||||
--interop-eth1data-votes
|
||||
```
|
||||
@@ -58,11 +57,10 @@ the system with 64 validators and the genesis time set to the current unix times
|
||||
Wait a bit until your beacon chain starts, and in the other window:
|
||||
|
||||
```
|
||||
bazel run //validator -- --interop-num-validators 64
|
||||
bazel run //validator -- --keymanager=interop --keymanageropts='{"keys":64}'
|
||||
```
|
||||
|
||||
This will launch and kickstart the system with your 64 validators performing their duties accordingly.
|
||||
specify which keys
|
||||
|
||||
### Launching from `genesis.ssz`
|
||||
|
||||
@@ -70,10 +68,9 @@ Assuming you generated a `genesis.ssz` file with 64 validators, open up two term
|
||||
|
||||
```
|
||||
bazel run //beacon-chain -- \
|
||||
--no-genesis-delay \
|
||||
--bootstrap-node= \
|
||||
--deposit-contract 0xD775140349E6A5D12524C6ccc3d6A1d4519D4029 \
|
||||
--clear-db \
|
||||
--deposit-contract $(curl -s https://prylabs.net/contract) \
|
||||
--force-clear-db \
|
||||
--interop-genesis-state /path/to/genesis.ssz \
|
||||
--interop-eth1data-votes
|
||||
```
|
||||
@@ -81,13 +78,7 @@ Assuming you generated a `genesis.ssz` file with 64 validators, open up two term
|
||||
Wait a bit until your beacon chain starts, and in the other window:
|
||||
|
||||
```
|
||||
bazel run //validator -- --interop-num-validators 64
|
||||
bazel run //validator -- --keymanager=interop --keymanageropts='{"keys":64}'
|
||||
```
|
||||
|
||||
This will launch and kickstart the system with your 64 validators performing their duties accordingly.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
233
README.md
233
README.md
@@ -1,206 +1,235 @@
|
||||
# Prysm: Ethereum 'Serenity' 2.0 Go Implementation
|
||||
# Prysm: An Ethereum 2.0 Client Written in Go
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://github.com/ethereum/eth2.0-specs/commit/452ecf8e27c7852c7854597f2b1bb4a62b80c7ec)
|
||||
[](https://github.com/ethereum/eth2.0-specs/tree/v0.9.3)
|
||||
[](https://discord.gg/KSA7rPr)
|
||||
[](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
|
||||
This is the Core repository for Prysm, [Prysmatic Labs](https://prysmaticlabs.com)' [Go](https://golang.org/) implementation of the Ethereum protocol 2.0 (Serenity).
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the Ethereum 2.0 client specifications developed by [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
|
||||
### Need assistance?
|
||||
A more detailed set of installation and usage instructions as well as explanations of each component are available on our [official documentation portal](https://prysmaticlabs.gitbook.io/prysm/). If you still have questions, feel free to stop by either our [Discord](https://discord.gg/KSA7rPr) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) and a member of the team or our community will be happy to assist you.
|
||||
|
||||
**Interested in what's next?** Be sure to read our [Roadmap Reference Implementation](https://github.com/prysmaticlabs/prysm/blob/master/docs/ROADMAP.md) document. This page outlines the basics of sharding as well as the various short-term milestones that we hope to achieve over the coming year.
|
||||
A more detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by either our [Discord](https://discord.gg/KSA7rPr) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) and a member of the team or our community will be happy to assist you.
|
||||
|
||||
### Come join the testnet!
|
||||
Participation is now open to the public in our testnet release for Ethereum 2.0 phase 0. Visit [prylabs.net](https://prylabs.net) for more information on the project itself or to sign up as a validator on the network.
|
||||
Participation is now open to the public for our Ethereum 2.0 phase 0 testnet release. Visit [prylabs.net](https://prylabs.net) for more information on the project or to sign up as a validator on the network.
|
||||
|
||||
# Table of Contents
|
||||
|
||||
- [Dependencies](#dependencies)
|
||||
- [Installation](#installation)
|
||||
- [Build Via Docker](#build-via-docker)
|
||||
- [Build Via Bazel](#build-via-bazel)
|
||||
- [Running an Ethereum 2.0 Beacon Node](#running-an-ethereum-20-beacon-node)
|
||||
- [Staking ETH: Running a Validator Client](#staking-eth-running-a-validator-client)
|
||||
- [Installation](#installing-prysm)
|
||||
- [Build via Docker](#build-via-docker)
|
||||
- [Build via Bazel](#build-via-bazel)
|
||||
- [Connecting to the public testnet: running a beacon node](#connecting-to-the-testnet-running-a-beacon-node)
|
||||
- [Running via Docker](#running-via-docker)
|
||||
- [Running via Bazel](#running-via-bazel)
|
||||
- [Staking ETH: running a validator client](#staking-eth-running-a-validator-client)
|
||||
- [Activating your validator: depositing 3.2 Goerli ETH](#activating-your-validator-depositing-32-gerli-eth)
|
||||
- [Starting the validator with Bazel](#starting-the-validator-with-bazel)
|
||||
- [Setting up a local ETH2 development chain](#setting-up-a-local-eth2-development-chain)
|
||||
- [Installation and dependencies](#installation-and-dependencies)
|
||||
- [Running a local beacon node and validator client](#running-a-local-beacon-node-and-validator-client)
|
||||
- [Testing Prysm](#testing-prysm)
|
||||
- [Contributing](#contributing)
|
||||
- [License](#license)
|
||||
|
||||
## Dependencies
|
||||
Prysm can be installed either with Docker **(recommended method)** or using our build tool, Bazel. The below instructions include sections for performing both.
|
||||
|
||||
**For Docker installations:**
|
||||
- The latest release of [Docker](https://docs.docker.com/install/)
|
||||
Prysm can be installed either with Docker **\(recommended\)** or using our build tool, Bazel. The below instructions include sections for performing both.
|
||||
|
||||
**For Bazel installations:**
|
||||
- The latest release of [Bazel](https://docs.bazel.build/versions/master/install.html)
|
||||
- A modern UNIX operating system (MacOS included)
|
||||
#### **For Docker installations:**
|
||||
|
||||
## Installation
|
||||
* The latest release of [Docker](https://docs.docker.com/install/)
|
||||
|
||||
#### **For Bazel installations:**
|
||||
|
||||
* The latest release of [Bazel](https://docs.bazel.build/versions/master/install.html)
|
||||
* The latest release of `cmake`
|
||||
* The latest release of `git`
|
||||
* A modern UNIX operating system \(macOS included\)
|
||||
|
||||
## Installing Prysm
|
||||
|
||||
### Build via Docker
|
||||
|
||||
1. Ensure you are running the most recent version of Docker by issuing the command:
|
||||
```
|
||||
|
||||
```text
|
||||
docker -v
|
||||
```
|
||||
2. To pull the Prysm images from the server, issue the following commands:
|
||||
```
|
||||
|
||||
2. To pull the Prysm images, issue the following commands:
|
||||
|
||||
```text
|
||||
docker pull gcr.io/prysmaticlabs/prysm/validator:latest
|
||||
docker pull gcr.io/prysmaticlabs/prysm/beacon-chain:latest
|
||||
```
|
||||
|
||||
This process will also install any related dependencies.
|
||||
|
||||
### Build via Bazel
|
||||
|
||||
1. Open a terminal window. Ensure you are running the most recent version of Bazel by issuing the command:
|
||||
```
|
||||
|
||||
```text
|
||||
bazel version
|
||||
```
|
||||
2. Clone this repository and enter the directory:
|
||||
```
|
||||
|
||||
2. Clone Prysm's [main repository](https://github.com/prysmaticlabs/prysm) and enter the directory:
|
||||
|
||||
```text
|
||||
git clone https://github.com/prysmaticlabs/prysm
|
||||
cd prysm
|
||||
```
|
||||
3. Build both the beacon chain node implementation and the validator client:
|
||||
```
|
||||
|
||||
3. Build both the beacon chain node and the validator client:
|
||||
|
||||
```text
|
||||
bazel build //beacon-chain:beacon-chain
|
||||
bazel build //validator:validator
|
||||
```
|
||||
|
||||
Bazel will automatically pull and install any dependencies as well, including Go and necessary compilers.
|
||||
|
||||
4. Build the configuration for the Prysm testnet by issuing the commands:
|
||||
## Connecting to the testnet: running a beacon node
|
||||
|
||||
```
|
||||
bazel build --define ssz=minimal //beacon-chain:beacon-chain
|
||||
bazel build --define ssz=minimal //validator:validator
|
||||
```
|
||||
Below are instructions for initialising a beacon node and connecting to the public testnet. To further understand the role that the beacon node plays in Prysm, see [this section of the documentation.](https://docs.prylabs.network/docs/how-prysm-works/architecture-overview/)
|
||||
|
||||
The binaries will be built in an architecture-dependent subdirectory of `bazel-bin`, and are supplied as part of Bazel's build process. To fetch the location, issue the command:
|
||||
|
||||
```
|
||||
$ bazel build --define ssz=minimal //beacon-chain:beacon-chain
|
||||
...
|
||||
Target //beacon-chain:beacon-chain up-to-date:
|
||||
bazel-bin/beacon-chain/linux_amd64_stripped/beacon-chain
|
||||
...
|
||||
```
|
||||
|
||||
In the example above, the beacon chain binary has been created in `bazel-bin/beacon-chain/linux_amd64_stripped/beacon-chain`.
|
||||
|
||||
## Running an Ethereum 2.0 Beacon Node
|
||||
To understand the role that both the beacon node and validator play in Prysm, see [this section of our documentation](https://prysmaticlabs.gitbook.io/prysm/how-prysm-works/overview-technical).
|
||||
**NOTE:** It is recommended to open up port 13000 on your local router to improve connectivity and receive more peers from the network. To do so, navigate to `192.168.0.1` in your browser and login if required. Follow along with the interface to modify your routers firewall settings. When this task is completed, append the parameter`--p2p-host-ip=$(curl -s ident.me)` to your selected beacon startup command presented in this section to use the newly opened port.
|
||||
|
||||
### Running via Docker
|
||||
|
||||
**Docker on Linux/Mac:**
|
||||
#### **Docker on Linux/macOS:**
|
||||
|
||||
To start your beacon node, issue the following command:
|
||||
|
||||
```
|
||||
docker run -v $HOME/prysm-data:/data -p 4000:4000 \
|
||||
--name beacon-node \
|
||||
```text
|
||||
docker run -it -v $HOME/prysm:/data -p 4000:4000 -p 13000:13000 --name beacon-node \
|
||||
gcr.io/prysmaticlabs/prysm/beacon-chain:latest \
|
||||
--no-genesis-delay \
|
||||
--datadir=/data
|
||||
```
|
||||
|
||||
(Optional) If you want to enable gRPC, then run this command instead of the one above:
|
||||
The beacon node can be halted by either using `Ctrl+c` or with the command:
|
||||
|
||||
```
|
||||
docker run -v $HOME/prysm-data:/data -p 4000:4000 -p 7000:7000 \
|
||||
--name beacon-node \
|
||||
gcr.io/prysmaticlabs/prysm/beacon-chain:latest \
|
||||
--datadir=/data \
|
||||
--no-genesis-delay \
|
||||
--grpc-gateway-port=7000
|
||||
```
|
||||
|
||||
You can stop the beacon node using `Ctrl+c` or with the following command:
|
||||
=======
|
||||
|
||||
```
|
||||
```text
|
||||
docker stop beacon-node
|
||||
```
|
||||
|
||||
To restart the beacon node, issue the command:
|
||||
To restart the beacon node, issue the following command:
|
||||
|
||||
```
|
||||
```text
|
||||
docker start -ai beacon-node
|
||||
```
|
||||
|
||||
To delete a corrupted container, issue the command:
|
||||
To delete a corrupted container, issue the following command:
|
||||
|
||||
```
|
||||
```text
|
||||
docker rm beacon-node
|
||||
```
|
||||
|
||||
To recreate a deleted container and refresh the chain database, issue the start command with an additional `--force-clear-db` parameter:
|
||||
To recreate a deleted container and refresh the chain database, issue the start command with an additional `--clear-db` parameter:
|
||||
|
||||
```
|
||||
docker run -it -v $HOME/prysm-data:/data -p 4000:4000 --name beacon-node \
|
||||
```text
|
||||
docker run -it -v $HOME/prysm:/data -p 4000:4000 -p 13000:13000 --name beacon-node \
|
||||
gcr.io/prysmaticlabs/prysm/beacon-chain:latest \
|
||||
--datadir=/data \
|
||||
--force-clear-db
|
||||
--clear-db
|
||||
```
|
||||
|
||||
**Docker on Windows:**
|
||||
#### **Docker on Windows:**
|
||||
|
||||
1) You will need to share the local drive you wish to mount to to container (e.g. C:).
|
||||
1. Enter Docker settings (right click the tray icon)
|
||||
2. Click 'Shared Drives'
|
||||
3. Select a drive to share
|
||||
4. Click 'Apply'
|
||||
1. You will need to 'share' the local drive you wish to mount to \(e.g. C:\).
|
||||
1. Enter Docker settings \(right click the tray icon\)
|
||||
2. Click 'Shared Drives'
|
||||
3. Select a drive to share
|
||||
4. Click 'Apply'
|
||||
|
||||
2. You will next need to create a directory named `/prysm/` within your selected shared Drive. This folder will be used as a local data directory for Beacon Node chain data as well as account and keystore information required by the validator. Docker will **not** create this directory if it does not exist already. For the purposes of these instructions, it is assumed that `C:` is your prior-selected shared Drive.
|
||||
3. To run the beacon node, issue the following command:
|
||||
|
||||
2) You will next need to create a directory named ```/tmp/prysm-data/``` within your selected shared Drive. This folder will be used as a local data directory for Beacon Node chain data as well as account and keystore information required by the validator. Docker will **not** create this directory if it does not exist already. For the purposes of these instructions, it is assumed that ```C:``` is your prior-selected shared Drive.
|
||||
|
||||
4) To run the beacon node, issue the command:
|
||||
```
|
||||
docker run -it -v c:/tmp/prysm-data:/data -p 4000:4000 gcr.io/prysmaticlabs/prysm/beacon-chain:latest --datadir=/data
|
||||
```text
|
||||
docker run -it -v c:/prysm/:/data -p 4000:4000 -p 13000:13000 --name beacon-node gcr.io/prysmaticlabs/prysm/beacon-chain:latest --datadir=/data --clear-db
|
||||
```
|
||||
|
||||
### Running via Bazel
|
||||
|
||||
1) To start your Beacon Node with Bazel, issue the command:
|
||||
To start your Beacon Node with Bazel, issue the following command:
|
||||
|
||||
```text
|
||||
bazel run //beacon-chain -- --clear-db --datadir=$HOME/prysm
|
||||
```
|
||||
bazel run //beacon-chain -- --datadir=/tmp/prysm-data
|
||||
```
|
||||
This will sync up the Beacon Node with the latest head block in the network. Note that the beacon node must be **completely synced** before attempting to initialise a validator client, otherwise the validator will not be able to complete the deposit and funds will be lost.
|
||||
|
||||
This will sync up the beacon node with the latest head block in the network.
|
||||
|
||||
|
||||
## Staking ETH: Running a Validator Client
|
||||
**NOTE:** The beacon node must be **completely synced** before attempting to initialise a validator client, otherwise the validator will not be able to complete the deposit and **funds will lost**.
|
||||
|
||||
Once your beacon node is up, the chain will be waiting for you to deposit 3.2 Goerli ETH into the Validator Deposit Contract to activate your validator (discussed in the section below). First though, you will need to create a validator client to connect to this node in order to stake and participate. Each validator represents 3.2 Goerli ETH being staked in the system, and it is possible to spin up as many as you desire in order to have more stake in the network.
|
||||
|
||||
### Activating Your Validator: Depositing 3.2 Goerli ETH
|
||||
## Staking ETH: Running a validator client
|
||||
|
||||
Using your validator deposit data from the previous step, follow the instructions found on https://prylabs.net/participate to make a deposit.
|
||||
Once your beacon node is up, the chain will be waiting for you to deposit 3.2 Goerli ETH into a [validator deposit contract](https://docs.prylabs.network/docs/how-prysm-works/validator-deposit-contract) in order to activate your validator \(discussed in the section below\). First though, you will need to create this validator and connect to this node to participate in consensus.
|
||||
|
||||
It will take a while for the nodes in the network to process your deposit, but once your node is active, the validator will begin doing its responsibility. In your validator client, you will be able to frequently see your validator balance as it goes up over time. Note that, should your node ever go offline for a long period, you'll start gradually losing your deposit until you are removed from the system.
|
||||
Each validator represents 3.2 Goerli ETH being staked in the system, and it is possible to spin up as many as you desire in order to have more stake in the network.
|
||||
|
||||
### Starting the validator with Bazel
|
||||
### Activating your validator: depositing 3.2 Göerli ETH
|
||||
|
||||
To begin setting up a validator, follow the instructions found on [prylabs.net](https://prylabs.net) to use the Göerli ETH faucet and make a deposit. For step-by-step assistance with the deposit page, see the [Activating a Validator ](https://docs.prylabs.network/docs/activating-a-validator)section of this documentation.
|
||||
|
||||
It will take a while for the nodes in the network to process a deposit. Once the node is active, the validator will immediately begin performing its responsibilities.
|
||||
|
||||
In your validator client, you will be able to frequently see your validator balance as it goes up over time. Note that, should your node ever go offline for a long period, a validator will start gradually losing its deposit until it is removed from the network entirely.
|
||||
|
||||
1. Open another terminal window. Enter your Prysm directory and run the validator by issuing the following command:
|
||||
```
|
||||
cd prysm
|
||||
bazel run //validator
|
||||
```
|
||||
**Congratulations, you are now running Ethereum 2.0 Phase 0!**
|
||||
|
||||
## Setting up a local ETH2 development chain
|
||||
|
||||
This section outlines the process of setting up Prysm for local testing with other Ethereum 2.0 client implementations. See the [INTEROP.md](https://github.com/prysmaticlabs/prysm/blob/master/INTEROP.md) file for advanced configuration options. For more background information on interoperability development, see [this blog post](https://blog.ethereum.org/2019/09/19/eth2-interop-in-review/).
|
||||
|
||||
### Installation and dependencies
|
||||
|
||||
To begin setting up a local ETH2 development chain, follow the **Bazel** instructions found in the [dependencies](https://github.com/prysmaticlabs/prysm#dependencies) and [installation](https://github.com/prysmaticlabs/prysm#installation) sections respectively.
|
||||
|
||||
### Running a local beacon node and validator client
|
||||
|
||||
The example below will generate a beacon genesis state and initiate Prysm with 64 validators with the genesis time set to your machines UNIX time.
|
||||
|
||||
Open up two terminal windows. In the first, issue the command:
|
||||
|
||||
```text
|
||||
bazel run //beacon-chain -- \
|
||||
--custom-genesis-delay=0 \
|
||||
--bootstrap-node= \
|
||||
--deposit-contract $(curl https://prylabs.net/contract) \
|
||||
--clear-db \
|
||||
--interop-num-validators 64 \
|
||||
--interop-eth1data-votes
|
||||
```
|
||||
|
||||
Wait a moment for the beacon chain to start. In the other terminal, issue the command:
|
||||
|
||||
```text
|
||||
bazel run //validator -- --keymanager=interop --keymanageropts='{"keys":64}'
|
||||
```
|
||||
|
||||
This command will kickstart the system with your 64 validators performing their duties accordingly.
|
||||
|
||||
## Testing Prysm
|
||||
|
||||
To run the unit tests of our system, issue the command:
|
||||
```
|
||||
|
||||
```text
|
||||
bazel test //...
|
||||
```
|
||||
|
||||
To run the linter, make sure you have [golangci-lint](https://github.com/golangci/golangci-lint) installed and then issue the command:
|
||||
```
|
||||
To run our linter, make sure you have [golangci-lint](https://github.com/golangci/golangci-lint) installed and then issue the command:
|
||||
|
||||
```text
|
||||
golangci-lint run
|
||||
```
|
||||
|
||||
|
||||
## Contributing
|
||||
We have put all of our contribution guidelines into [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/master/CONTRIBUTING.md)! Check it out to get started.
|
||||
Want to get involved? Check out our [Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/) to learn more!
|
||||
|
||||
## License
|
||||
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||
|
||||
499
WORKSPACE
499
WORKSPACE
@@ -1,6 +1,48 @@
|
||||
workspace(name = "prysm")
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
||||
|
||||
http_archive(
|
||||
name = "bazel_toolchains",
|
||||
sha256 = "b5a8039df7119d618402472f3adff8a1bd0ae9d5e253f53fcc4c47122e91a3d2",
|
||||
strip_prefix = "bazel-toolchains-2.1.1",
|
||||
urls = [
|
||||
"https://github.com/bazelbuild/bazel-toolchains/releases/download/2.1.1/bazel-toolchains-2.1.1.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/2.1.1.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_grail_bazel_toolchain",
|
||||
sha256 = "0bec89e35d8a141c87f28cfc506d6d344785c8eb2ff3a453140a1fe972ada79d",
|
||||
strip_prefix = "bazel-toolchain-77a87103145f86f03f90475d19c2c8854398a444",
|
||||
urls = ["https://github.com/grailbio/bazel-toolchain/archive/77a87103145f86f03f90475d19c2c8854398a444.tar.gz"],
|
||||
)
|
||||
|
||||
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
|
||||
|
||||
bazel_toolchain_dependencies()
|
||||
|
||||
load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
|
||||
|
||||
llvm_toolchain(
|
||||
name = "llvm_toolchain",
|
||||
llvm_version = "9.0.0",
|
||||
)
|
||||
|
||||
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
|
||||
|
||||
llvm_register_toolchains()
|
||||
|
||||
load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_toolchains")
|
||||
|
||||
configure_prysm_toolchains()
|
||||
|
||||
load("@prysm//tools/cross-toolchain:rbe_toolchains_config.bzl", "rbe_toolchains_config")
|
||||
|
||||
rbe_toolchains_config()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_skylib",
|
||||
sha256 = "2ea8a5ed2b448baf4a6855d3ce049c4c452a6470b1efd1504fdb7c1c134d220a",
|
||||
@@ -8,21 +50,12 @@ http_archive(
|
||||
url = "https://github.com/bazelbuild/bazel-skylib/archive/0.8.0.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
sha256 = "513c12397db1bc9aa46dd62f02dd94b49a9b5d17444d49b5a04c5a89f3053c1c",
|
||||
urls = [
|
||||
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/rules_go/releases/download/v0.19.5/rules_go-v0.19.5.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.19.5/rules_go-v0.19.5.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "7fc87f4170011201b1690326e8c16c5d802836e3a0d617d8f75c3af2b23180c4",
|
||||
sha256 = "d8c45ee70ec39a57e7a05e5027c32b1576cc7f16d9dd37135b0eddde45cf1b10",
|
||||
urls = [
|
||||
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/0.18.2/bazel-gazelle-0.18.2.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/0.18.2/bazel-gazelle-0.18.2.tar.gz",
|
||||
"https://storage.googleapis.com/bazel-mirror/github.com/bazelbuild/bazel-gazelle/releases/download/v0.20.0/bazel-gazelle-v0.20.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.20.0/bazel-gazelle-v0.20.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -35,9 +68,18 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_docker",
|
||||
sha256 = "9ff889216e28c918811b77999257d4ac001c26c1f7c7fb17a79bc28abf74182e",
|
||||
strip_prefix = "rules_docker-0.10.1",
|
||||
url = "https://github.com/bazelbuild/rules_docker/archive/v0.10.1.tar.gz",
|
||||
sha256 = "dc97fccceacd4c6be14e800b2a00693d5e8d07f69ee187babfd04a80a9f8e250",
|
||||
strip_prefix = "rules_docker-0.14.1",
|
||||
url = "https://github.com/bazelbuild/rules_docker/archive/v0.14.1.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
sha256 = "e6a6c016b0663e06fa5fccf1cd8152eab8aa8180c583ec20c872f4f9953a7ac5",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.22.1/rules_go-v0.22.1.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.22.1/rules_go-v0.22.1.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -50,21 +92,21 @@ git_repository(
|
||||
name = "graknlabs_bazel_distribution",
|
||||
commit = "962f3a7e56942430c0ec120c24f9e9f2a9c2ce1a",
|
||||
remote = "https://github.com/graknlabs/bazel-distribution",
|
||||
shallow_since = "1563544980 +0300",
|
||||
shallow_since = "1569509514 +0300",
|
||||
)
|
||||
|
||||
# Override default import in rules_go with special patch until
|
||||
# https://github.com/gogo/protobuf/pull/582 is merged.
|
||||
git_repository(
|
||||
name = "com_github_gogo_protobuf",
|
||||
commit = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c", # v1.2.1, as of 2019-03-03
|
||||
commit = "5628607bb4c51c3157aacc3a50f0ab707582b805",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
"@io_bazel_rules_go//third_party:com_github_gogo_protobuf-gazelle.patch",
|
||||
"//third_party:com_github_gogo_protobuf-equal.patch",
|
||||
],
|
||||
remote = "https://github.com/gogo/protobuf",
|
||||
shallow_since = "1550471403 +0200",
|
||||
shallow_since = "1571033717 +0200",
|
||||
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
|
||||
)
|
||||
|
||||
@@ -75,6 +117,10 @@ load(
|
||||
|
||||
container_repositories()
|
||||
|
||||
load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
|
||||
|
||||
bls_dependencies()
|
||||
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
|
||||
|
||||
go_rules_dependencies()
|
||||
@@ -94,8 +140,19 @@ load(
|
||||
_go_image_repos = "repositories",
|
||||
)
|
||||
|
||||
# Golang images
|
||||
# This is using gcr.io/distroless/base
|
||||
_go_image_repos()
|
||||
|
||||
# CC images
|
||||
# This is using gcr.io/distroless/base
|
||||
load(
|
||||
"@io_bazel_rules_docker//cc:image.bzl",
|
||||
_cc_image_repos = "repositories",
|
||||
)
|
||||
|
||||
_cc_image_repos()
|
||||
|
||||
http_archive(
|
||||
name = "prysm_testnet_site",
|
||||
build_file_content = """
|
||||
@@ -104,16 +161,16 @@ proto_library(
|
||||
srcs = ["src/proto/faucet.proto"],
|
||||
visibility = ["//visibility:public"],
|
||||
)""",
|
||||
sha256 = "1184e44a7a9b8b172e68e82c02cc3b15a80122340e05a92bd1edeafe5e68debe",
|
||||
strip_prefix = "prysm-testnet-site-ec6a4a4e421bf4445845969167d06e93ee8d7acc",
|
||||
url = "https://github.com/prestonvanloon/prysm-testnet-site/archive/ec6a4a4e421bf4445845969167d06e93ee8d7acc.tar.gz",
|
||||
sha256 = "29742136ff9faf47343073c4569a7cf21b8ed138f726929e09e3c38ab83544f7",
|
||||
strip_prefix = "prysm-testnet-site-5c711600f0a77fc553b18cf37b880eaffef4afdb",
|
||||
url = "https://github.com/prestonvanloon/prysm-testnet-site/archive/5c711600f0a77fc553b18cf37b880eaffef4afdb.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "io_kubernetes_build",
|
||||
sha256 = "5ab110312cd7665a1940ba0523b67b9fbb6053beb9dd4e147643867bebd7e809",
|
||||
strip_prefix = "repo-infra-db6ceb5f992254db76af7c25db2edc5469b5ea82",
|
||||
url = "https://github.com/kubernetes/repo-infra/archive/db6ceb5f992254db76af7c25db2edc5469b5ea82.tar.gz",
|
||||
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
|
||||
strip_prefix = "repo-infra-6537f2101fb432b679f3d103ee729dd8ac5d30a0",
|
||||
url = "https://github.com/kubernetes/repo-infra/archive/6537f2101fb432b679f3d103ee729dd8ac5d30a0.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -128,8 +185,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "5c5b65a961b5e7251435efc9548648b45142a07993ad3e100850c240cb76e9af",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.0/general.tar.gz",
|
||||
sha256 = "72c6ee3c20d19736b1203f364a6eb0ddee2c173073e20bee2beccd288fdc42be",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.4/general.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -144,8 +201,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "3b5f0168af4331d09da52bebc26609def9d11be3e6c784ce7c3df3596617808d",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.0/minimal.tar.gz",
|
||||
sha256 = "a3cc860a3679f6f62ee57b65677a9b48a65fdebb151cdcbf50f23852632845ef",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.4/minimal.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -160,8 +217,8 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "f3ff68508dfe9696f23506daf0ca895cda955e30398741e00cffa33a01b0565c",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.0/mainnet.tar.gz",
|
||||
sha256 = "8fc1b6220973ca30fa4ddc4ed24d66b1719abadca8bedb5e06c3bd9bc0df28e9",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v0.9.4/mainnet.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -183,7 +240,7 @@ go_repository(
|
||||
|
||||
git_repository(
|
||||
name = "com_google_protobuf",
|
||||
commit = "09745575a923640154bcf307fba8aedff47f240a",
|
||||
commit = "4cf5bfee9546101d98754d23ff378ff718ba8438",
|
||||
remote = "https://github.com/protocolbuffers/protobuf",
|
||||
shallow_since = "1558721209 -0700",
|
||||
)
|
||||
@@ -192,6 +249,30 @@ load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
|
||||
|
||||
protobuf_deps()
|
||||
|
||||
# Group the sources of the library so that CMake rule have access to it
|
||||
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
|
||||
|
||||
http_archive(
|
||||
name = "rules_foreign_cc",
|
||||
sha256 = "450563dc2938f38566a59596bb30a3e905fbbcc35b3fff5a1791b122bc140465",
|
||||
strip_prefix = "rules_foreign_cc-456425521973736ef346d93d3d6ba07d807047df",
|
||||
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/456425521973736ef346d93d3d6ba07d807047df.zip",
|
||||
)
|
||||
|
||||
load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
|
||||
|
||||
rules_foreign_cc_dependencies([
|
||||
"@prysm//:built_cmake_toolchain",
|
||||
])
|
||||
|
||||
http_archive(
|
||||
name = "librdkafka",
|
||||
build_file_content = all_content,
|
||||
sha256 = "f6be27772babfdacbbf2e4c5432ea46c57ef5b7d82e52a81b885e7b804781fd6",
|
||||
strip_prefix = "librdkafka-1.2.1",
|
||||
urls = ["https://github.com/edenhill/librdkafka/archive/v1.2.1.tar.gz"],
|
||||
)
|
||||
|
||||
# External dependencies
|
||||
|
||||
go_repository(
|
||||
@@ -209,14 +290,12 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_go_ssz",
|
||||
commit = "58b2f86b0f02f06e634db06dee0c838ad41849f8",
|
||||
commit = "e24db4d9e9637cf88ee9e4a779e339a1686a84ee",
|
||||
importpath = "github.com/prysmaticlabs/go-ssz",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_urfave_cli",
|
||||
commit = "e6cf83ec39f6e1158ced1927d4ed14578fda8edb", # v1.21.0
|
||||
importpath = "github.com/urfave/cli",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
"//third_party:com_github_prysmaticlabs_go_ssz.patch",
|
||||
],
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -245,7 +324,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p",
|
||||
commit = "c1687281a5c19b61ee5e0dc07fad15697c3bde94", # v0.4.0
|
||||
commit = "76944c4fc848530530f6be36fb22b70431ca506c", # v0.5.1
|
||||
importpath = "github.com/libp2p/go-libp2p",
|
||||
)
|
||||
|
||||
@@ -264,7 +343,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_multiformats_go_multiaddr",
|
||||
commit = "f96df18bf0c217c77f6cc0f9e810a178cea12f38", # v0.1.1
|
||||
commit = "8c6cee15b340d7210c30a82a19231ee333b69b1d", # v0.2.0
|
||||
importpath = "github.com/multiformats/go-multiaddr",
|
||||
)
|
||||
|
||||
@@ -276,7 +355,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_multiformats_go_multihash",
|
||||
commit = "249ead2008065c476a2ee45e8e75e8b85d846a72", # v0.0.8
|
||||
commit = "6b39927dce4869bc1726861b65ada415ee1f7fc7", # v0.0.13
|
||||
importpath = "github.com/multiformats/go-multihash",
|
||||
)
|
||||
|
||||
@@ -294,13 +373,13 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_peerstore",
|
||||
commit = "f4c9af195c69379f1cf284dba31985482a56f78e", # v0.1.3
|
||||
commit = "dee88d7532302c001604811fa3fbb5a7f83225e7", # v0.1.4
|
||||
importpath = "github.com/libp2p/go-libp2p-peerstore",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_circuit",
|
||||
commit = "0305622f3f146485f0ff6df0ae6c010787331ca7", # v0.1.3
|
||||
commit = "61af9db0dd78e01e53b9fb044be44dcc7255667e", # v0.1.4
|
||||
importpath = "github.com/libp2p/go-libp2p-circuit",
|
||||
)
|
||||
|
||||
@@ -379,7 +458,7 @@ go_repository(
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_secio",
|
||||
build_file_proto_mode = "disable_global",
|
||||
commit = "7c3f577d99debb69c3b68be35fe14d9445a6569c", # v0.2.0
|
||||
commit = "6f83420d5715a8b1c4082aaf9c5c7785923e702e", # v0.2.1
|
||||
importpath = "github.com/libp2p/go-libp2p-secio",
|
||||
)
|
||||
|
||||
@@ -397,7 +476,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_jbenet_goprocess",
|
||||
commit = "1dc239722b2ba3784472fb5301f62640fa5a8bc3", # v0.1.3
|
||||
commit = "7f9d9ed286badffcf2122cfeb383ec37daf92508",
|
||||
importpath = "github.com/jbenet/goprocess",
|
||||
)
|
||||
|
||||
@@ -415,7 +494,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_nat",
|
||||
commit = "c50c291a61bceccb914366d93eb24f58594e9134", # v0.0.4
|
||||
commit = "873ef75f6ab6273821d77197660c1fb3af4cc02e", # v0.0.5
|
||||
importpath = "github.com/libp2p/go-libp2p-nat",
|
||||
)
|
||||
|
||||
@@ -433,7 +512,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_mattn_go_isatty",
|
||||
commit = "e1f7b56ace729e4a73a29a6b4fac6cd5fcda7ab3", # v0.0.9
|
||||
commit = "7b513a986450394f7bbf1476909911b3aa3a55ce",
|
||||
importpath = "github.com/mattn/go-isatty",
|
||||
)
|
||||
|
||||
@@ -517,7 +596,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_flow_metrics",
|
||||
commit = "1f5b3acc846b2c8ce4c4e713296af74f5c24df55", # v0.0.1
|
||||
commit = "e5a6a4db89199d99b2a74b8da198277a826241d8", # v0.0.3
|
||||
importpath = "github.com/libp2p/go-flow-metrics",
|
||||
)
|
||||
|
||||
@@ -541,14 +620,15 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_ws_transport",
|
||||
commit = "8cca0dbc7f3533b122bd2cbeaa4a9b07c2913b9d", # v0.1.2
|
||||
commit = "370d1a3a7420e27423417c37630cad3754ad5702", # v0.2.0
|
||||
importpath = "github.com/libp2p/go-ws-transport",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "org_golang_x_crypto",
|
||||
commit = "4def268fd1a49955bfb3dda92fe3db4f924f2285",
|
||||
importpath = "golang.org/x/crypto",
|
||||
sum = "h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw=",
|
||||
version = "v0.0.0-20200221231518-2aa609cf4a9d",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -581,6 +661,12 @@ go_repository(
|
||||
importpath = "github.com/syndtr/goleveldb",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_emicklei_dot",
|
||||
commit = "5810de2f2ab7aac98cd7bcbd59147a7ca6071768",
|
||||
importpath = "github.com/emicklei/dot",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_blankhost",
|
||||
commit = "da3b45205dfce3ef3926054ffd5dee76f5903382", # v0.1.4
|
||||
@@ -589,22 +675,23 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "io_opencensus_go",
|
||||
commit = "7bbec1755a8162b5923fc214a494773a701d506a", # v0.22.0
|
||||
importpath = "go.opencensus.io",
|
||||
sum = "h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=",
|
||||
version = "v0.22.2",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "io_opencensus_go_contrib_exporter_jaeger",
|
||||
commit = "5b8293c22f362562285c2acbc52f4a1870a47a33",
|
||||
importpath = "contrib.go.opencensus.io/exporter/jaeger",
|
||||
remote = "http://github.com/census-ecosystem/opencensus-go-exporter-jaeger",
|
||||
vcs = "git",
|
||||
sum = "h1:nhTv/Ry3lGmqbJ/JGvCjWxBl5ozRfqo86Ngz59UAlfk=",
|
||||
version = "v0.2.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "org_golang_google_api",
|
||||
commit = "aac82e61c0c8fe133c297b4b59316b9f481e1f0a", # v0.6.0
|
||||
importpath = "google.golang.org/api",
|
||||
sum = "h1:uMf5uLi4eQMRrMKhCplNik4U4H8Z6C1br3zOtAa/aDE=",
|
||||
version = "v0.14.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -639,19 +726,19 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_prometheus_client_model",
|
||||
commit = "fd36f4220a901265f90734c3183c5f0c91daa0b8",
|
||||
commit = "7bc5445566f0fe75b15de23e6b93886e982d7bf9",
|
||||
importpath = "github.com/prometheus/client_model",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_prometheus_common",
|
||||
commit = "287d3e634a1e550c9e463dd7e5a75a422c614505", # v0.7.0
|
||||
commit = "d978bcb1309602d68bb4ba69cf3f8ed900e07308",
|
||||
importpath = "github.com/prometheus/common",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_prometheus_procfs",
|
||||
commit = "499c85531f756d1129edd26485a5f73871eeb308", # v0.0.5
|
||||
commit = "6d489fc7f1d9cd890a250f3ea3431b1744b9623f",
|
||||
importpath = "github.com/prometheus/procfs",
|
||||
)
|
||||
|
||||
@@ -667,10 +754,12 @@ go_repository(
|
||||
importpath = "github.com/matttproud/golang_protobuf_extensions",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_boltdb_bolt",
|
||||
commit = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8", # v1.3.1
|
||||
importpath = "github.com/boltdb/bolt",
|
||||
http_archive(
|
||||
name = "com_github_boltdb_bolt", # v1.3.1
|
||||
build_file = "//third_party:boltdb/bolt.BUILD",
|
||||
sha256 = "95dc5842dab55f7519b7002bbec648321277b5d6f0ad59aab509ee59313b6386",
|
||||
strip_prefix = "bolt-2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8",
|
||||
urls = ["https://github.com/boltdb/bolt/archive/2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8.tar.gz"],
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -694,8 +783,9 @@ go_repository(
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_pubsub",
|
||||
build_file_proto_mode = "disable_global",
|
||||
commit = "9f04364996b415168f0e0d7e9fc82272fbed4005", # v0.1.1
|
||||
importpath = "github.com/libp2p/go-libp2p-pubsub",
|
||||
sum = "h1:+Iz8zeI1KO6HX8cexU9g98cCGjae52Vujeg087SkuME=",
|
||||
version = "v0.2.6-0.20191219233527-97846b574895",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -719,7 +809,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_ipfs_go_datastore",
|
||||
commit = "d0ca9bc39f9d5b77bd602abe1a897473e105be7f", # v0.1.1
|
||||
commit = "e7a498916ccca1b0b40fb08630659cd4d68a01e8", # v0.3.1
|
||||
importpath = "github.com/ipfs/go-datastore",
|
||||
)
|
||||
|
||||
@@ -731,14 +821,14 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_ipfs_go_cid",
|
||||
commit = "9bb7ea69202c6c9553479eb355ab8a8a97d43a2e", # v0.0.3
|
||||
commit = "3da5bbbe45260437a44f777e6b2e5effa2606901", # v0.0.4
|
||||
importpath = "github.com/ipfs/go-cid",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_record",
|
||||
build_file_proto_mode = "disable_global",
|
||||
commit = "3f535b1abcdf698e11ac16f618c2e64c4e5a114a", # v0.1.1
|
||||
commit = "8ccbca30634f70a8f03d133ac64cbf245d079e1e", # v0.1.2
|
||||
importpath = "github.com/libp2p/go-libp2p-record",
|
||||
)
|
||||
|
||||
@@ -750,7 +840,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_kbucket",
|
||||
commit = "8b77351e0f784a5f71749d23000897c8aee71a76", # v0.2.1
|
||||
commit = "a0cac6f63c491504b18eeba24be2ac0bbbfa0e5c", # v0.2.3
|
||||
importpath = "github.com/libp2p/go-libp2p-kbucket",
|
||||
)
|
||||
|
||||
@@ -774,7 +864,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_hashicorp_golang_lru",
|
||||
commit = "7f827b33c0f158ec5dfbba01bb0b14a4541fd81d", # v0.5.3
|
||||
commit = "14eae340515388ca95aa8e7b86f0de668e981f54", # v0.5.4
|
||||
importpath = "github.com/hashicorp/golang-lru",
|
||||
)
|
||||
|
||||
@@ -786,13 +876,14 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_discovery",
|
||||
commit = "d248d63b0af8c023307da18ad7000a12020e06f0", # v0.1.0
|
||||
importpath = "github.com/libp2p/go-libp2p-discovery",
|
||||
sum = "h1:1p3YSOq7VsgaL+xVHPi8XAmtGyas6D2J6rWBEfz/aiY=",
|
||||
version = "v0.2.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_autonat",
|
||||
commit = "3464f9b4f7bfbd7bb008813eacb626c7ab7fb9a3", # v0.1.0
|
||||
commit = "60bf479cf6bc73c939f4db97ad711756e949e522", # v0.1.1
|
||||
importpath = "github.com/libp2p/go-libp2p-autonat",
|
||||
)
|
||||
|
||||
@@ -818,7 +909,7 @@ go_repository(
|
||||
go_repository(
|
||||
name = "io_k8s_apimachinery",
|
||||
build_file_proto_mode = "disable_global",
|
||||
commit = "bfcf53abc9f82bad3e534fcb1c36599d3c989ebf",
|
||||
commit = "79c2a76c473a20cdc4ce59cae4b72529b5d9d16b", # v0.17.2
|
||||
importpath = "k8s.io/apimachinery",
|
||||
)
|
||||
|
||||
@@ -830,8 +921,9 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_google_gofuzz",
|
||||
commit = "f140a6486e521aad38f5917de355cbf147cc0496", # v1.0.0
|
||||
importpath = "github.com/google/gofuzz",
|
||||
sum = "h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=",
|
||||
version = "v1.0.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -899,7 +991,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_google_cloud_go",
|
||||
commit = "264def2dd949cdb8a803bb9f50fa29a67b798a6a", # v0.46.3
|
||||
commit = "6daa679260d92196ffca2362d652c924fdcb7a22", # v0.52.0
|
||||
importpath = "cloud.google.com/go",
|
||||
)
|
||||
|
||||
@@ -941,7 +1033,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_pkg_errors",
|
||||
commit = "ba968bfe8b2f7e042a574c888954fccecfa385b4", # v0.8.1
|
||||
commit = "614d223910a179a466c1767a985424175c39b465", # v0.9.1
|
||||
importpath = "github.com/pkg/errors",
|
||||
)
|
||||
|
||||
@@ -983,7 +1075,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_apache_thrift",
|
||||
commit = "384647d290e2e4a55a14b1b7ef1b7e66293a2c33", # v0.12.0
|
||||
commit = "cecee50308fc7e6f77f55b3fd906c1c6c471fa2f", # v0.13.0
|
||||
importpath = "github.com/apache/thrift",
|
||||
)
|
||||
|
||||
@@ -993,15 +1085,9 @@ go_repository(
|
||||
importpath = "github.com/grpc-ecosystem/go-grpc-prometheus",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_karlseguin_ccache",
|
||||
commit = "ec06cd93a07565b373789b0078ba88fe697fddd9", # v2.0.3
|
||||
importpath = "github.com/karlseguin/ccache",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_connmgr",
|
||||
commit = "b46e9bdbcd8436b4fe4b30a53ec913c07e5e09c9", # v0.1.1
|
||||
commit = "273839464339f1885413b385feee35301c5cb76f", # v0.2.1
|
||||
importpath = "github.com/libp2p/go-libp2p-connmgr",
|
||||
)
|
||||
|
||||
@@ -1032,13 +1118,13 @@ go_repository(
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_core",
|
||||
build_file_proto_mode = "disable_global",
|
||||
commit = "26b960839df84e2783f8f6125fa822a9978c2b8f", # v0.2.3
|
||||
commit = "f7f724862d85ec9f9ee7c58b0f79836abdee8cd9", # v0.3.0
|
||||
importpath = "github.com/libp2p/go-libp2p-core",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_testing",
|
||||
commit = "1fa303da162dc57872d8fc553497f7602aa11c10", # v0.1.0
|
||||
commit = "82713a62880a5fe72d438bd58d737f0d3c4b7f36", # v0.1.1
|
||||
importpath = "github.com/libp2p/go-libp2p-testing",
|
||||
)
|
||||
|
||||
@@ -1066,6 +1152,12 @@ go_repository(
|
||||
importpath = "github.com/multiformats/go-multiaddr-fmt",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_multiformats_go_varint",
|
||||
commit = "0aa688902217dff2cba0f678c7e4a0f547b4983e",
|
||||
importpath = "github.com/multiformats/go-varint",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_yamux",
|
||||
commit = "663972181d409e7263040f0b668462f87c85e1bd", # v1.2.3
|
||||
@@ -1074,7 +1166,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_nat",
|
||||
commit = "d13fdefb3bbb2fde2c6fc090a7ea992cec8b26df", # v0.0.3
|
||||
commit = "4b355d438085545df006ad9349686f30d8d37a27", # v0.0.4
|
||||
importpath = "github.com/libp2p/go-nat",
|
||||
)
|
||||
|
||||
@@ -1109,8 +1201,9 @@ go_ssz_dependencies()
|
||||
go_repository(
|
||||
name = "org_golang_google_grpc",
|
||||
build_file_proto_mode = "disable",
|
||||
commit = "1d89a3c832915b2314551c1d2a506874d62e53f7", # v1.22.0
|
||||
importpath = "google.golang.org/grpc",
|
||||
sum = "h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=",
|
||||
version = "v1.27.1",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -1139,7 +1232,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_googleapis_gnostic",
|
||||
commit = "ab0dd09aa10e2952b28e12ecd35681b20463ebab", # v0.3.1
|
||||
commit = "896953e6749863beec38e27029c804e88c3144b8", # v0.4.1
|
||||
importpath = "github.com/googleapis/gnostic",
|
||||
)
|
||||
|
||||
@@ -1163,7 +1256,7 @@ go_repository(
|
||||
|
||||
go_repository(
|
||||
name = "com_github_google_go_cmp",
|
||||
commit = "2d0692c2e9617365a95b295612ac0d4415ba4627", # v0.3.1
|
||||
commit = "5a6f75716e1203a923a78c9efb94089d857df0f6", # v0.4.0
|
||||
importpath = "github.com/google/go-cmp",
|
||||
)
|
||||
|
||||
@@ -1194,21 +1287,26 @@ go_repository(
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_googleapis_gnostic",
|
||||
commit = "25d8b0b6698593f520d9d8dc5a88e6b16ca9ecc0",
|
||||
importpath = "github.com/googleapis/gnostic",
|
||||
name = "com_github_patrickmn_go_cache",
|
||||
commit = "46f407853014144407b6c2ec7ccc76bf67958d93",
|
||||
importpath = "github.com/patrickmn/go-cache",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_ethereumapis",
|
||||
commit = "c7f1fd03716c94dcc287a0d35905ed35b8a0afe1",
|
||||
commit = "25f267e475788bf8e5e01cb9d73cfd0c87020822",
|
||||
importpath = "github.com/prysmaticlabs/ethereumapis",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
"//third_party:com_github_prysmaticlabs_ethereumapis-tags.patch",
|
||||
],
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_cloudflare_roughtime",
|
||||
commit = "d41fdcee702eb3e5c3296288a453b9340184d37e",
|
||||
importpath = "github.com/cloudflare/roughtime",
|
||||
sum = "h1:jeSxE3fepJdhASERvBHI6RFkMhISv6Ir2JUybYLIVXs=",
|
||||
version = "v0.0.0-20200205191924-a69ef1dab727",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
@@ -1238,13 +1336,6 @@ go_repository(
|
||||
version = "v0.0.0-20161005185022-dfcf01d20ee9",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_kilic_bls12-381",
|
||||
importpath = "github.com/kilic/bls12-381",
|
||||
sum = "h1:hCD4IWWYsETkACK7U+isYppKfB/6d54sBkCDk3k+w2U=",
|
||||
version = "v0.0.0-20191005202515-c798d6202457",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_minio_highwayhash",
|
||||
importpath = "github.com/minio/highwayhash",
|
||||
@@ -1259,6 +1350,15 @@ go_repository(
|
||||
version = "v0.0.0-20191002040644-a1355ae1e2c3",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "in_gopkg_confluentinc_confluent_kafka_go_v1",
|
||||
importpath = "gopkg.in/confluentinc/confluent-kafka-go.v1",
|
||||
patch_args = ["-p1"],
|
||||
patches = ["//third_party:in_gopkg_confluentinc_confluent_kafka_go_v1.patch"],
|
||||
sum = "h1:roy97m/3wj9/o8OuU3sZ5wildk30ep38k2x8nhNbKrI=",
|
||||
version = "v1.1.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_naoina_toml",
|
||||
importpath = "github.com/naoina/toml",
|
||||
@@ -1273,13 +1373,6 @@ go_repository(
|
||||
version = "v0.10.5",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "in_gopkg_urfave_cli_v1",
|
||||
importpath = "gopkg.in/urfave/cli.v1",
|
||||
sum = "h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=",
|
||||
version = "v1.20.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_naoina_go_stringutil",
|
||||
importpath = "github.com/naoina/go-stringutil",
|
||||
@@ -1349,3 +1442,195 @@ go_repository(
|
||||
sum = "h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=",
|
||||
version = "v1.7.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_protolambda_zssz",
|
||||
commit = "632f11e5e281660402bd0ac58f76090f3503def0",
|
||||
importpath = "github.com/protolambda/zssz",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_googleapis_gnostic",
|
||||
commit = "25d8b0b6698593f520d9d8dc5a88e6b16ca9ecc0",
|
||||
importpath = "github.com/googleapis/gnostic",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_googleapis_gax_go_v2",
|
||||
importpath = "github.com/googleapis/gax-go/v2",
|
||||
sum = "h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=",
|
||||
version = "v2.0.5",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_golang_groupcache",
|
||||
importpath = "github.com/golang/groupcache",
|
||||
sum = "h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE=",
|
||||
version = "v0.0.0-20191027212112-611e8accdfc9",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_uber_jaeger_client_go",
|
||||
importpath = "github.com/uber/jaeger-client-go",
|
||||
sum = "h1:HgqpYBng0n7tLJIlyT4kPCIv5XgCsF+kai1NnnrJzEU=",
|
||||
version = "v2.20.1+incompatible",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_dgraph_io_ristretto",
|
||||
commit = "99d1bbbf28e64530eb246be0568fc7709a35ebdd", # v0.0.1
|
||||
importpath = "github.com/dgraph-io/ristretto",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_cespare_xxhash",
|
||||
commit = "d7df74196a9e781ede915320c11c378c1b2f3a1f",
|
||||
importpath = "github.com/cespare/xxhash",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_ipfs_go_detect_race",
|
||||
importpath = "github.com/ipfs/go-detect-race",
|
||||
sum = "h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=",
|
||||
version = "v0.0.1",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_kevinms_leakybucket_go",
|
||||
importpath = "github.com/kevinms/leakybucket-go",
|
||||
sum = "h1:oq6BiN7v0MfWCRcJAxSV+hesVMAAV8COrQbTjYNnso4=",
|
||||
version = "v0.0.0-20190611015032-8a3d0352aa79",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_wealdtech_go_eth2_wallet",
|
||||
commit = "6970d62e60d86fdae3c3e510e800e8a60d755a7d",
|
||||
importpath = "github.com/wealdtech/go-eth2-wallet",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_wealdtech_go_eth2_wallet_hd",
|
||||
commit = "ce0a252a01c621687e9786a64899cfbfe802ba73",
|
||||
importpath = "github.com/wealdtech/go-eth2-wallet-hd",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_wealdtech_go_eth2_wallet_nd",
|
||||
commit = "12c8c41cdbd16797ff292e27f58e126bb89e9706",
|
||||
importpath = "github.com/wealdtech/go-eth2-wallet-nd",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_wealdtech_go_eth2_wallet_store_filesystem",
|
||||
commit = "1eea6a48d75380047d2ebe7c8c4bd8985bcfdeca",
|
||||
importpath = "github.com/wealdtech/go-eth2-wallet-store-filesystem",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_wealdtech_go_eth2_wallet_store_s3",
|
||||
commit = "1c821b5161f7bb0b3efa2030eff687eea5e70e53",
|
||||
importpath = "github.com/wealdtech/go-eth2-wallet-store-s3",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_wealdtech_go_eth2_wallet_encryptor_keystorev4",
|
||||
commit = "0c11c07b9544eb662210fadded94f40f309d8c8f",
|
||||
importpath = "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_wealdtech_go_eth2_wallet_types",
|
||||
commit = "af67d8101be61e7c4dd8126d2b3eba20cff5dab2",
|
||||
importpath = "github.com/wealdtech/go-eth2-wallet-types",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_wealdtech_go_eth2_types",
|
||||
commit = "f9c31ddf180537dd5712d5998a3d56c45864d71f",
|
||||
importpath = "github.com/wealdtech/go-eth2-types",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_wealdtech_go_eth2_util",
|
||||
commit = "326ebb1755651131bb8f4506ea9a23be6d9ad1dd",
|
||||
importpath = "github.com/wealdtech/go-eth2-util",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_wealdtech_go_ecodec",
|
||||
commit = "7473d835445a3490e61a5fcf48fe4e9755a37957",
|
||||
importpath = "github.com/wealdtech/go-ecodec",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_wealdtech_go_bytesutil",
|
||||
commit = "e564d0ade555b9f97494f0f669196ddcc6bc531d",
|
||||
importpath = "github.com/wealdtech/go-bytesutil",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_wealdtech_go_indexer",
|
||||
commit = "334862c32b1e3a5c6738a2618f5c0a8ebeb8cd51",
|
||||
importpath = "github.com/wealdtech/go-indexer",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_shibukawa_configdir",
|
||||
commit = "e180dbdc8da04c4fa04272e875ce64949f38bd3e",
|
||||
importpath = "github.com/shibukawa/configdir",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_noise",
|
||||
importpath = "github.com/libp2p/go-libp2p-noise",
|
||||
sum = "h1:J1gHJRNFEk7NdiaPQQqAvxEy+7hhCsVv3uzduWybmqY=",
|
||||
version = "v0.0.0-20200302201340-8c54356e12c9",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_ferranbt_fastssz",
|
||||
commit = "06015a5d84f9e4eefe2c21377ca678fa8f1a1b09",
|
||||
importpath = "github.com/ferranbt/fastssz",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_burntsushi_toml",
|
||||
importpath = "github.com/BurntSushi/toml",
|
||||
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
|
||||
version = "v0.3.1",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_cpuguy83_go_md2man_v2",
|
||||
importpath = "github.com/cpuguy83/go-md2man/v2",
|
||||
sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=",
|
||||
version = "v2.0.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_russross_blackfriday_v2",
|
||||
importpath = "github.com/russross/blackfriday/v2",
|
||||
sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=",
|
||||
version = "v2.0.1",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "com_github_shurcool_sanitized_anchor_name",
|
||||
importpath = "github.com/shurcooL/sanitized_anchor_name",
|
||||
sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=",
|
||||
version = "v1.0.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "in_gopkg_urfave_cli_v2",
|
||||
importpath = "gopkg.in/urfave/cli.v2",
|
||||
sum = "h1:OvXt/p4cdwNl+mwcWMq/AxaKFkhdxcjx+tx+qf4EOvY=",
|
||||
version = "v2.0.0-20190806201727-b62605953717",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "in_gopkg_urfave_cli_v1",
|
||||
importpath = "gopkg.in/urfave/cli.v1",
|
||||
sum = "h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=",
|
||||
version = "v1.20.0",
|
||||
)
|
||||
|
||||
11
bazel.sh
Executable file
11
bazel.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script serves as a wrapper around bazel to limit the scope of environment variables that
|
||||
# may change the action output. Using this script should result in a higher cache hit ratio for
|
||||
# cached actions with a more heremtic build.
|
||||
|
||||
env -i \
|
||||
PATH=/usr/bin:/bin \
|
||||
HOME=$HOME \
|
||||
GOOGLE_APPLICATION_CREDENTIALS=$GOOGLE_APPLICATION_CREDENTIALS \
|
||||
bazel "$@"
|
||||
@@ -1,7 +1,7 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
|
||||
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle")
|
||||
load("//tools:binary_targets.bzl", "binary_targets")
|
||||
load("//tools:binary_targets.bzl", "binary_targets", "go_image_debug")
|
||||
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
|
||||
|
||||
go_library(
|
||||
@@ -23,9 +23,10 @@ go_library(
|
||||
"@com_github_ipfs_go_log//:go_default_library",
|
||||
"@com_github_joonix_log//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli//:go_default_library",
|
||||
"@com_github_whyrusleeping_go_logging//:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
"@in_gopkg_urfave_cli_v2//:go_default_library",
|
||||
"@in_gopkg_urfave_cli_v2//altsrc:go_default_library",
|
||||
"@org_uber_go_automaxprocs//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -36,6 +37,7 @@ go_image(
|
||||
"main.go",
|
||||
"usage.go",
|
||||
],
|
||||
base = "//tools:cc_image",
|
||||
goarch = "amd64",
|
||||
goos = "linux",
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain",
|
||||
@@ -54,9 +56,10 @@ go_image(
|
||||
"@com_github_ipfs_go_log//:go_default_library",
|
||||
"@com_github_joonix_log//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli//:go_default_library",
|
||||
"@com_github_whyrusleeping_go_logging//:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
"@in_gopkg_urfave_cli_v2//:go_default_library",
|
||||
"@in_gopkg_urfave_cli_v2//altsrc:go_default_library",
|
||||
"@org_uber_go_automaxprocs//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -70,16 +73,39 @@ container_bundle(
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
go_image_debug(
|
||||
name = "image_debug",
|
||||
image = ":image",
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle_debug",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-debug": ":image_debug",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-debug": ":image_debug",
|
||||
},
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images",
|
||||
bundle = ":image_bundle",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images_debug",
|
||||
bundle = ":image_bundle_debug",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "beacon-chain",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//endtoend:__pkg__",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
@@ -87,7 +113,10 @@ go_test(
|
||||
size = "small",
|
||||
srcs = ["usage_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["@com_github_urfave_cli//:go_default_library"],
|
||||
deps = [
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"@in_gopkg_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
[go_binary(
|
||||
|
||||
@@ -5,6 +5,6 @@ This is the main project folder for the beacon chain implementation of Ethereum
|
||||
[](https://discord.gg/KSA7rPr)
|
||||
[](https://gitter.im/prysmaticlabs/prysm?badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
|
||||
Also, read the latest beacon chain [design spec](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at prysmatic labs.
|
||||
Also, read the latest beacon chain [design spec](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at prysmatic labs.
|
||||
Check out the [FAQs](https://notes.ethereum.org/9MMuzWeFTTSg-3Tz_YeiBA?view). Refer this page on [why](http://email.mg2.substack.com/c/eJwlj9GOhCAMRb9G3jRQQPGBh5mM8xsbhKrsDGIAM9m_X9xN2qZtbpt7rCm4xvSjj5gLOTOmL-809CMbKXFaOKakIl4DZYr2AGyQIGjHOnWH22OiYnoIxmDijaBhhS6fcy7GvjobA9m0mSXOcnZq5GBqLkilXBZhBsus5ZK89VbKkRt-a-BZI6DzZ7iur1lQ953KJ9bemnxgahuQU9XJu6pFPdu8meT8vragzEjpMCwMGLlgLo6h5z1JumQTu4IJd4v15xqMf_8ZLP_Y1bSLdbnrD-LL71i2Kj7DLxaWWF4)
|
||||
we are combining sharding and casper together.
|
||||
|
||||
@@ -7,14 +7,16 @@ go_library(
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -25,14 +27,18 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
|
||||
@@ -5,13 +5,15 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -21,31 +23,33 @@ var log = logrus.WithField("prefix", "archiver")
|
||||
// Service defining archiver functionality for persisting checkpointed
|
||||
// beacon chain information to a database backend for historical purposes.
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.Database
|
||||
headFetcher blockchain.HeadFetcher
|
||||
newHeadNotifier blockchain.NewHeadNotifier
|
||||
newHeadRootChan chan [32]byte
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.NoHeadAccessDatabase
|
||||
headFetcher blockchain.HeadFetcher
|
||||
participationFetcher blockchain.ParticipationFetcher
|
||||
stateNotifier statefeed.Notifier
|
||||
lastArchivedEpoch uint64
|
||||
}
|
||||
|
||||
// Config options for the archiver service.
|
||||
type Config struct {
|
||||
BeaconDB db.Database
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
NewHeadNotifier blockchain.NewHeadNotifier
|
||||
BeaconDB db.NoHeadAccessDatabase
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
ParticipationFetcher blockchain.ParticipationFetcher
|
||||
StateNotifier statefeed.Notifier
|
||||
}
|
||||
|
||||
// NewArchiverService initializes the service from configuration options.
|
||||
func NewArchiverService(ctx context.Context, cfg *Config) *Service {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
headFetcher: cfg.HeadFetcher,
|
||||
newHeadNotifier: cfg.NewHeadNotifier,
|
||||
newHeadRootChan: make(chan [32]byte, 1),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
headFetcher: cfg.HeadFetcher,
|
||||
participationFetcher: cfg.ParticipationFetcher,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,41 +71,46 @@ func (s *Service) Status() error {
|
||||
}
|
||||
|
||||
// We archive committee information pertaining to the head state's epoch.
|
||||
func (s *Service) archiveCommitteeInfo(ctx context.Context, headState *pb.BeaconState) error {
|
||||
currentEpoch := helpers.SlotToEpoch(headState.Slot)
|
||||
proposerSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconProposer)
|
||||
func (s *Service) archiveCommitteeInfo(ctx context.Context, headState *state.BeaconState, epoch uint64) error {
|
||||
proposerSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
attesterSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
attesterSeed, err := helpers.Seed(headState, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
|
||||
info := ðpb.ArchivedCommitteeInfo{
|
||||
info := &pb.ArchivedCommitteeInfo{
|
||||
ProposerSeed: proposerSeed[:],
|
||||
AttesterSeed: attesterSeed[:],
|
||||
}
|
||||
if err := s.beaconDB.SaveArchivedCommitteeInfo(ctx, currentEpoch, info); err != nil {
|
||||
if err := s.beaconDB.SaveArchivedCommitteeInfo(ctx, epoch, info); err != nil {
|
||||
return errors.Wrap(err, "could not archive committee info")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// We archive active validator set changes that happened during the epoch.
|
||||
func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *pb.BeaconState) error {
|
||||
activations := validators.ActivatedValidatorIndices(headState)
|
||||
slashings := validators.SlashedValidatorIndices(headState)
|
||||
exited, err := validators.ExitedValidatorIndices(headState)
|
||||
// We archive active validator set changes that happened during the previous epoch.
|
||||
func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *state.BeaconState, epoch uint64) error {
|
||||
prevEpoch := epoch - 1
|
||||
vals := headState.Validators()
|
||||
activations := validators.ActivatedValidatorIndices(prevEpoch, vals)
|
||||
slashings := validators.SlashedValidatorIndices(prevEpoch, vals)
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(headState, prevEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
exited, err := validators.ExitedValidatorIndices(prevEpoch, vals, activeValidatorCount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine exited validator indices")
|
||||
}
|
||||
activeSetChanges := ðpb.ArchivedActiveSetChanges{
|
||||
activeSetChanges := &pb.ArchivedActiveSetChanges{
|
||||
Activated: activations,
|
||||
Exited: exited,
|
||||
Slashed: slashings,
|
||||
}
|
||||
if err := s.beaconDB.SaveArchivedActiveValidatorChanges(ctx, helpers.CurrentEpoch(headState), activeSetChanges); err != nil {
|
||||
if err := s.beaconDB.SaveArchivedActiveValidatorChanges(ctx, prevEpoch, activeSetChanges); err != nil {
|
||||
return errors.Wrap(err, "could not archive active validator set changes")
|
||||
}
|
||||
return nil
|
||||
@@ -109,60 +118,78 @@ func (s *Service) archiveActiveSetChanges(ctx context.Context, headState *pb.Bea
|
||||
|
||||
// We compute participation metrics by first retrieving the head state and
|
||||
// matching validator attestations during the epoch.
|
||||
func (s *Service) archiveParticipation(ctx context.Context, headState *pb.BeaconState) error {
|
||||
participation, err := epoch.ComputeValidatorParticipation(headState, helpers.SlotToEpoch(headState.Slot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute participation")
|
||||
func (s *Service) archiveParticipation(ctx context.Context, epoch uint64) error {
|
||||
p := s.participationFetcher.Participation(epoch)
|
||||
participation := ðpb.ValidatorParticipation{}
|
||||
if p != nil {
|
||||
participation = ðpb.ValidatorParticipation{
|
||||
EligibleEther: p.PrevEpoch,
|
||||
VotedEther: p.PrevEpochTargetAttesters,
|
||||
GlobalParticipationRate: float32(p.PrevEpochTargetAttesters) / float32(p.PrevEpoch),
|
||||
}
|
||||
}
|
||||
return s.beaconDB.SaveArchivedValidatorParticipation(ctx, helpers.SlotToEpoch(headState.Slot), participation)
|
||||
return s.beaconDB.SaveArchivedValidatorParticipation(ctx, epoch, participation)
|
||||
}
|
||||
|
||||
// We archive validator balances and active indices.
|
||||
func (s *Service) archiveBalances(ctx context.Context, headState *pb.BeaconState) error {
|
||||
balances := headState.Balances
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
if err := s.beaconDB.SaveArchivedBalances(ctx, currentEpoch, balances); err != nil {
|
||||
func (s *Service) archiveBalances(ctx context.Context, balances []uint64, epoch uint64) error {
|
||||
if err := s.beaconDB.SaveArchivedBalances(ctx, epoch, balances); err != nil {
|
||||
return errors.Wrap(err, "could not archive balances")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) run(ctx context.Context) {
|
||||
sub := s.newHeadNotifier.HeadUpdatedFeed().Subscribe(s.newHeadRootChan)
|
||||
defer sub.Unsubscribe()
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case r := <-s.newHeadRootChan:
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", r)).Debug("New chain head event")
|
||||
headState := s.headFetcher.HeadState()
|
||||
if !helpers.IsEpochEnd(headState.Slot) {
|
||||
continue
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.BlockProcessed {
|
||||
data := event.Data.(*statefeed.BlockProcessedData)
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", data.BlockRoot)).Debug("Received block processed event")
|
||||
headState, err := s.headFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Head state is not available")
|
||||
continue
|
||||
}
|
||||
slot := headState.Slot()
|
||||
currentEpoch := helpers.SlotToEpoch(slot)
|
||||
if !helpers.IsEpochEnd(slot) && currentEpoch <= s.lastArchivedEpoch {
|
||||
continue
|
||||
}
|
||||
epochToArchive := currentEpoch
|
||||
if !helpers.IsEpochEnd(slot) {
|
||||
epochToArchive--
|
||||
}
|
||||
if err := s.archiveCommitteeInfo(ctx, headState, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive committee info")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveActiveSetChanges(ctx, headState, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive active validator set changes")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveParticipation(ctx, epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator participation")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveBalances(ctx, headState.Balances(), epochToArchive); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator balances and active indices")
|
||||
continue
|
||||
}
|
||||
log.WithField(
|
||||
"epoch",
|
||||
epochToArchive,
|
||||
).Debug("Successfully archived beacon chain data during epoch")
|
||||
s.lastArchivedEpoch = epochToArchive
|
||||
}
|
||||
if err := s.archiveCommitteeInfo(ctx, headState); err != nil {
|
||||
log.WithError(err).Error("Could not archive committee info")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveActiveSetChanges(ctx, headState); err != nil {
|
||||
log.WithError(err).Error("Could not archive active validator set changes")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveParticipation(ctx, headState); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator participation")
|
||||
continue
|
||||
}
|
||||
if err := s.archiveBalances(ctx, headState); err != nil {
|
||||
log.WithError(err).Error("Could not archive validator balances and active indices")
|
||||
continue
|
||||
}
|
||||
log.WithField(
|
||||
"epoch",
|
||||
helpers.CurrentEpoch(headState),
|
||||
).Debug("Successfully archived beacon chain data during epoch")
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-sub.Err():
|
||||
log.WithError(err).Error("Subscription to new chain head notifier failed")
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state feed notifier failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,13 +8,17 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -24,20 +28,32 @@ import (
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
}
|
||||
|
||||
func TestArchiverService_ReceivesNewChainHeadEvent(t *testing.T) {
|
||||
func TestArchiverService_ReceivesBlockProcessedEvent(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: &pb.BeaconState{Slot: 1},
|
||||
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: 1,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
headRoot := [32]byte{1, 2, 3}
|
||||
triggerNewHeadEvent(t, svc, headRoot)
|
||||
testutil.AssertLogsContain(t, hook, fmt.Sprintf("%#x", headRoot))
|
||||
testutil.AssertLogsContain(t, hook, "New chain head event")
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: st,
|
||||
}
|
||||
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
testutil.AssertLogsContain(t, hook, fmt.Sprintf("%#x", event.Data.(*statefeed.BlockProcessedData).BlockRoot))
|
||||
testutil.AssertLogsContain(t, hook, "Received block processed event")
|
||||
}
|
||||
|
||||
func TestArchiverService_OnlyArchiveAtEpochEnd(t *testing.T) {
|
||||
@@ -45,33 +61,112 @@ func TestArchiverService_OnlyArchiveAtEpochEnd(t *testing.T) {
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
// The head state is NOT an epoch end.
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: &pb.BeaconState{Slot: params.BeaconConfig().SlotsPerEpoch - 3},
|
||||
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch - 2,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: st,
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
// The context should have been canceled.
|
||||
if svc.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "New chain head event")
|
||||
testutil.AssertLogsContain(t, hook, "Received block processed event")
|
||||
// The service should ONLY log any archival logs if we receive a
|
||||
// head slot that is an epoch end.
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_ArchivesEvenThroughSkipSlot(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
svc, beaconDB := setupService(t)
|
||||
validatorCount := uint64(100)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
svc.run(svc.ctx)
|
||||
<-exitRoutine
|
||||
}()
|
||||
|
||||
// Send out an event every slot, skipping the end slot of the epoch.
|
||||
for i := uint64(0); i < params.BeaconConfig().SlotsPerEpoch+1; i++ {
|
||||
if err := headState.SetSlot(i); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
if helpers.IsEpochEnd(i) {
|
||||
continue
|
||||
}
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = svc.stateNotifier.StateFeed().Send(event)
|
||||
}
|
||||
}
|
||||
if err := svc.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exitRoutine <- true
|
||||
|
||||
// The context should have been canceled.
|
||||
if svc.ctx.Err() != context.Canceled {
|
||||
t.Error("context was not canceled")
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Received block processed event")
|
||||
// Even though there was a skip slot, we should still be able to archive
|
||||
// upon the next block event afterwards.
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
attestedBalance := uint64(1)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
wanted := ðpb.ValidatorParticipation{
|
||||
VotedEther: attestedBalance,
|
||||
@@ -85,7 +180,7 @@ func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
|
||||
}
|
||||
|
||||
if !proto.Equal(wanted, retrieved) {
|
||||
t.Errorf("Wanted participation for epoch %d %v, retrieved %v", currentEpoch, wanted, retrieved)
|
||||
t.Errorf("Wanted participation for epoch %d %v, retrieved %v", currentEpoch-1, wanted, retrieved)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
@@ -93,23 +188,33 @@ func TestArchiverService_ComputesAndSavesParticipation(t *testing.T) {
|
||||
func TestArchiverService_SavesIndicesAndBalances(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
retrieved, err := svc.beaconDB.ArchivedBalances(svc.ctx, helpers.CurrentEpoch(headState))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(headState.Balances, retrieved) {
|
||||
if !reflect.DeepEqual(headState.Balances(), retrieved) {
|
||||
t.Errorf(
|
||||
"Wanted balances for epoch %d %v, retrieved %v",
|
||||
helpers.CurrentEpoch(headState),
|
||||
headState.Balances,
|
||||
headState.Balances(),
|
||||
retrieved,
|
||||
)
|
||||
}
|
||||
@@ -119,13 +224,23 @@ func TestArchiverService_SavesIndicesAndBalances(t *testing.T) {
|
||||
func TestArchiverService_SavesCommitteeInfo(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
proposerSeed, err := helpers.Seed(headState, currentEpoch, params.BeaconConfig().DomainBeaconProposer)
|
||||
@@ -136,7 +251,7 @@ func TestArchiverService_SavesCommitteeInfo(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted := ðpb.ArchivedCommitteeInfo{
|
||||
wanted := &pb.ArchivedCommitteeInfo{
|
||||
ProposerSeed: proposerSeed[:],
|
||||
AttesterSeed: attesterSeed[:],
|
||||
}
|
||||
@@ -159,22 +274,49 @@ func TestArchiverService_SavesCommitteeInfo(t *testing.T) {
|
||||
func TestArchiverService_SavesActivatedValidatorChanges(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
delayedActEpoch := helpers.DelayedActivationExitEpoch(currentEpoch)
|
||||
headState.Validators[4].ActivationEpoch = delayedActEpoch
|
||||
headState.Validators[5].ActivationEpoch = delayedActEpoch
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
|
||||
prevEpoch := helpers.PrevEpoch(headState)
|
||||
delayedActEpoch := helpers.ActivationExitEpoch(prevEpoch)
|
||||
val1, err := headState.ValidatorAtIndex(4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
val1.ActivationEpoch = delayedActEpoch
|
||||
val2, err := headState.ValidatorAtIndex(5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
val2.ActivationEpoch = delayedActEpoch
|
||||
if err := headState.UpdateValidatorAtIndex(4, val1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := headState.UpdateValidatorAtIndex(5, val1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Fatal("Retrieved indices are nil")
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Activated, []uint64{4, 5}) {
|
||||
t.Errorf("Wanted indices 4 5 activated, received %v", retrieved.Activated)
|
||||
}
|
||||
@@ -184,21 +326,48 @@ func TestArchiverService_SavesActivatedValidatorChanges(t *testing.T) {
|
||||
func TestArchiverService_SavesSlashedValidatorChanges(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
headState.Validators[95].Slashed = true
|
||||
headState.Validators[96].Slashed = true
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
|
||||
prevEpoch := helpers.PrevEpoch(headState)
|
||||
val1, err := headState.ValidatorAtIndex(95)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
val1.Slashed = true
|
||||
val2, err := headState.ValidatorAtIndex(96)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
val2.Slashed = true
|
||||
if err := headState.UpdateValidatorAtIndex(95, val1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := headState.UpdateValidatorAtIndex(96, val1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Fatal("Retrieved indices are nil")
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Slashed, []uint64{95, 96}) {
|
||||
t.Errorf("Wanted indices 95, 96 slashed, received %v", retrieved.Slashed)
|
||||
}
|
||||
@@ -208,28 +377,47 @@ func TestArchiverService_SavesSlashedValidatorChanges(t *testing.T) {
|
||||
func TestArchiverService_SavesExitedValidatorChanges(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
validatorCount := uint64(100)
|
||||
headState := setupState(t, validatorCount)
|
||||
headState, err := setupState(validatorCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
svc, beaconDB := setupService(t)
|
||||
defer dbutil.TeardownDB(t, beaconDB)
|
||||
svc.headFetcher = &mock.ChainService{
|
||||
State: headState,
|
||||
}
|
||||
currentEpoch := helpers.CurrentEpoch(headState)
|
||||
headState.Validators[95].ExitEpoch = currentEpoch + 1
|
||||
headState.Validators[95].WithdrawableEpoch = currentEpoch + 1 + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
triggerNewHeadEvent(t, svc, [32]byte{})
|
||||
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, currentEpoch)
|
||||
prevEpoch := helpers.PrevEpoch(headState)
|
||||
val, err := headState.ValidatorAtIndex(95)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
val.ExitEpoch = prevEpoch
|
||||
val.WithdrawableEpoch = prevEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
if err := headState.UpdateValidatorAtIndex(95, val); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
event := &feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
BlockRoot: [32]byte{1, 2, 3},
|
||||
Verified: true,
|
||||
},
|
||||
}
|
||||
triggerStateEvent(t, svc, event)
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
retrieved, err := beaconDB.ArchivedActiveValidatorChanges(svc.ctx, prevEpoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if retrieved == nil {
|
||||
t.Fatal("Retrieved indices are nil")
|
||||
}
|
||||
if !reflect.DeepEqual(retrieved.Exited, []uint64{95}) {
|
||||
t.Errorf("Wanted indices 95 exited, received %v", retrieved.Exited)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Successfully archived")
|
||||
}
|
||||
|
||||
func setupState(t *testing.T, validatorCount uint64) *pb.BeaconState {
|
||||
func setupState(validatorCount uint64) (*stateTrie.BeaconState, error) {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
balances := make([]uint64, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
@@ -245,40 +433,47 @@ func setupState(t *testing.T, validatorCount uint64) *pb.BeaconState {
|
||||
|
||||
// We initialize a head state that has attestations from participated
|
||||
// validators in a simulated fashion.
|
||||
return &pb.BeaconState{
|
||||
return stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: (2 * params.BeaconConfig().SlotsPerEpoch) - 1,
|
||||
Validators: validators,
|
||||
Balances: balances,
|
||||
BlockRoots: make([][]byte, 128),
|
||||
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
|
||||
Slashings: []uint64{0, 1e9, 1e9},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CurrentEpochAttestations: atts,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
JustificationBits: bitfield.Bitvector4{0x00},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func setupService(t *testing.T) (*Service, db.Database) {
|
||||
beaconDB := dbutil.SetupDB(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
validatorCount := uint64(100)
|
||||
totalBalance := validatorCount * params.BeaconConfig().MaxEffectiveBalance
|
||||
mockChainService := &mock.ChainService{}
|
||||
return &Service{
|
||||
beaconDB: beaconDB,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
newHeadRootChan: make(chan [32]byte, 0),
|
||||
newHeadNotifier: &mock.ChainService{},
|
||||
beaconDB: beaconDB,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
stateNotifier: mockChainService.StateNotifier(),
|
||||
participationFetcher: &mock.ChainService{
|
||||
Balance: &precompute.Balance{PrevEpoch: totalBalance, PrevEpochTargetAttesters: 1}},
|
||||
}, beaconDB
|
||||
}
|
||||
|
||||
func triggerNewHeadEvent(t *testing.T, svc *Service, headRoot [32]byte) {
|
||||
func triggerStateEvent(t *testing.T, svc *Service, event *feed.Event) {
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
svc.run(svc.ctx)
|
||||
<-exitRoutine
|
||||
}()
|
||||
|
||||
svc.newHeadRootChan <- headRoot
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = svc.stateNotifier.StateFeed().Send(event)
|
||||
}
|
||||
if err := svc.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -4,9 +4,15 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"head.go",
|
||||
"info.go",
|
||||
"init_sync_process_block.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"process_attestation.go",
|
||||
"process_attestation_helpers.go",
|
||||
"process_block.go",
|
||||
"process_block_helpers.go",
|
||||
"receive_attestation.go",
|
||||
"receive_block.go",
|
||||
"service.go",
|
||||
@@ -14,26 +20,38 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/forkchoice:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/slotutil:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_emicklei_dot//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
@@ -53,8 +71,11 @@ go_test(
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"chain_info_test.go",
|
||||
"head_test.go",
|
||||
"init_sync_process_block_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@@ -67,8 +88,9 @@ go_test(
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//proto/beacon/db:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
@@ -77,9 +99,11 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
|
||||
"@org_golang_x_net//context:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -91,6 +115,13 @@ go_test(
|
||||
"service_norace_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
gc_goopts = [
|
||||
# Go 1.14 enables checkptr by default when building with -race or -msan. There is a pointer
|
||||
# issue in boltdb, so must disable checkptr at compile time. This flag can be removed once
|
||||
# the project is migrated to etcd's version of boltdb and the issue has been fixed.
|
||||
# See: https://github.com/etcd-io/bbolt/issues/187.
|
||||
"-d=checkptr=0",
|
||||
],
|
||||
race = "on",
|
||||
tags = ["race_on"],
|
||||
deps = [
|
||||
@@ -103,7 +134,6 @@ go_test(
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
@@ -112,6 +142,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
@@ -13,28 +18,24 @@ import (
|
||||
// directly retrieves chain info related data.
|
||||
type ChainInfoFetcher interface {
|
||||
HeadFetcher
|
||||
CanonicalRootFetcher
|
||||
FinalizationFetcher
|
||||
}
|
||||
|
||||
// GenesisTimeFetcher retrieves the Eth2 genesis timestamp.
|
||||
type GenesisTimeFetcher interface {
|
||||
// TimeFetcher retrieves the Eth2 data that's related to time.
|
||||
type TimeFetcher interface {
|
||||
GenesisTime() time.Time
|
||||
CurrentSlot() uint64
|
||||
}
|
||||
|
||||
// HeadFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves head related data.
|
||||
type HeadFetcher interface {
|
||||
HeadSlot() uint64
|
||||
HeadRoot() []byte
|
||||
HeadBlock() *ethpb.BeaconBlock
|
||||
HeadState() *pb.BeaconState
|
||||
}
|
||||
|
||||
// CanonicalRootFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves canonical roots related data.
|
||||
type CanonicalRootFetcher interface {
|
||||
CanonicalRoot(slot uint64) []byte
|
||||
HeadRoot(ctx context.Context) ([]byte, error)
|
||||
HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error)
|
||||
HeadState(ctx context.Context) (*state.BeaconState, error)
|
||||
HeadValidatorsIndices(epoch uint64) ([]uint64, error)
|
||||
HeadSeed(epoch uint64) ([32]byte, error)
|
||||
}
|
||||
|
||||
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
|
||||
@@ -43,64 +44,133 @@ type ForkFetcher interface {
|
||||
}
|
||||
|
||||
// FinalizationFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves finalization related data.
|
||||
// directly retrieves finalization and justification related data.
|
||||
type FinalizationFetcher interface {
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckpt() *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckpt() *ethpb.Checkpoint
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint tracked in fork choice service.
|
||||
// ParticipationFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves validator participation related data.
|
||||
type ParticipationFetcher interface {
|
||||
Participation(epoch uint64) *precompute.Balance
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from head state.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
cp := s.forkChoiceStore.FinalizedCheckpt()
|
||||
if cp != nil {
|
||||
return cp
|
||||
if s.finalizedCheckpt == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
// If head state exists but there hasn't been a finalized check point,
|
||||
// the check point's root should refer to genesis block root.
|
||||
if bytes.Equal(s.finalizedCheckpt.Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
return ðpb.Checkpoint{Root: s.genesisRoot[:]}
|
||||
}
|
||||
|
||||
return state.CopyCheckpoint(s.finalizedCheckpt)
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from head state.
|
||||
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
if s.justifiedCheckpt == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
// If head state exists but there hasn't been a justified check point,
|
||||
// the check point root should refer to genesis block root.
|
||||
if bytes.Equal(s.justifiedCheckpt.Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
return ðpb.Checkpoint{Root: s.genesisRoot[:]}
|
||||
}
|
||||
|
||||
return state.CopyCheckpoint(s.justifiedCheckpt)
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt returns the previous justified checkpoint from head state.
|
||||
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
if s.prevJustifiedCheckpt == nil {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
// If head state exists but there hasn't been a justified check point,
|
||||
// the check point root should refer to genesis block root.
|
||||
if bytes.Equal(s.prevJustifiedCheckpt.Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
return ðpb.Checkpoint{Root: s.genesisRoot[:]}
|
||||
}
|
||||
|
||||
return state.CopyCheckpoint(s.prevJustifiedCheckpt)
|
||||
}
|
||||
|
||||
// HeadSlot returns the slot of the head of the chain.
|
||||
func (s *Service) HeadSlot() uint64 {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
if !s.hasHeadState() {
|
||||
return 0
|
||||
}
|
||||
|
||||
return s.headSlot
|
||||
return s.headSlot()
|
||||
}
|
||||
|
||||
// HeadRoot returns the root of the head of the chain.
|
||||
func (s *Service) HeadRoot() []byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
root := s.canonicalRoots[s.headSlot]
|
||||
if len(root) != 0 {
|
||||
return root
|
||||
func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
if s.headRoot() != params.BeaconConfig().ZeroHash {
|
||||
r := s.headRoot()
|
||||
return r[:], nil
|
||||
}
|
||||
|
||||
return params.BeaconConfig().ZeroHash[:]
|
||||
b, err := s.beaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == nil {
|
||||
return params.BeaconConfig().ZeroHash[:], nil
|
||||
}
|
||||
|
||||
r, err := ssz.HashTreeRoot(b.Block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r[:], nil
|
||||
}
|
||||
|
||||
// HeadBlock returns the head block of the chain.
|
||||
func (s *Service) HeadBlock() *ethpb.BeaconBlock {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
// If the head state is nil from service struct,
|
||||
// it will attempt to get the head block from DB.
|
||||
func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
if s.hasHeadState() {
|
||||
return s.headBlock(), nil
|
||||
}
|
||||
|
||||
return proto.Clone(s.headBlock).(*ethpb.BeaconBlock)
|
||||
return s.beaconDB.HeadBlock(ctx)
|
||||
}
|
||||
|
||||
// HeadState returns the head state of the chain.
|
||||
func (s *Service) HeadState() *pb.BeaconState {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
// If the head state is nil from service struct,
|
||||
// it will attempt to get the head state from DB.
|
||||
func (s *Service) HeadState(ctx context.Context) (*state.BeaconState, error) {
|
||||
if s.hasHeadState() {
|
||||
return s.headState(), nil
|
||||
}
|
||||
|
||||
return proto.Clone(s.headState).(*pb.BeaconState)
|
||||
return s.beaconDB.HeadState(ctx)
|
||||
}
|
||||
|
||||
// CanonicalRoot returns the canonical root of a given slot.
|
||||
func (s *Service) CanonicalRoot(slot uint64) []byte {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
|
||||
func (s *Service) HeadValidatorsIndices(epoch uint64) ([]uint64, error) {
|
||||
if !s.hasHeadState() {
|
||||
return []uint64{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(s.headState(), epoch)
|
||||
}
|
||||
|
||||
return s.canonicalRoots[slot]
|
||||
// HeadSeed returns the seed from the head view of a given epoch.
|
||||
func (s *Service) HeadSeed(epoch uint64) ([32]byte, error) {
|
||||
if !s.hasHeadState() {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
|
||||
return helpers.Seed(s.headState(), epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
// GenesisTime returns the genesis time of beacon chain.
|
||||
@@ -110,11 +180,19 @@ func (s *Service) GenesisTime() time.Time {
|
||||
|
||||
// CurrentFork retrieves the latest fork information of the beacon chain.
|
||||
func (s *Service) CurrentFork() *pb.Fork {
|
||||
if s.headState == nil {
|
||||
if !s.hasHeadState() {
|
||||
return &pb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
}
|
||||
return proto.Clone(s.headState.Fork).(*pb.Fork)
|
||||
return s.head.state.Fork()
|
||||
}
|
||||
|
||||
// Participation returns the participation stats of a given epoch.
|
||||
func (s *Service) Participation(epoch uint64) *precompute.Balance {
|
||||
s.epochParticipationLock.RLock()
|
||||
defer s.epochParticipationLock.RUnlock()
|
||||
|
||||
return s.epochParticipation[epoch]
|
||||
}
|
||||
|
||||
@@ -4,21 +4,19 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
)
|
||||
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
beaconDB: db,
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
@@ -29,49 +27,47 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
beaconDB: db,
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.HeadRoot()
|
||||
if _, err := s.HeadRoot(context.Background()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadBlock_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
beaconDB: db,
|
||||
head: &head{block: ðpb.SignedBeaconBlock{}},
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.HeadBlock()
|
||||
s.HeadBlock(context.Background())
|
||||
}
|
||||
|
||||
func TestHeadState_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
beaconDB: db,
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.HeadState()
|
||||
s.HeadState(context.Background())
|
||||
}
|
||||
|
||||
@@ -7,27 +7,37 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// Ensure Service implements chain info interface.
|
||||
var _ = ChainInfoFetcher(&Service{})
|
||||
var _ = GenesisTimeFetcher(&Service{})
|
||||
var _ = TimeFetcher(&Service{})
|
||||
var _ = ForkFetcher(&Service{})
|
||||
|
||||
func TestFinalizedCheckpt_Nil(t *testing.T) {
|
||||
c := setupBeaconChain(t, nil)
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
c := setupBeaconChain(t, db)
|
||||
if !bytes.Equal(c.FinalizedCheckpt().Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
t.Error("Incorrect pre chain start value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
c := setupBeaconChain(t, nil)
|
||||
if !bytes.Equal(c.HeadRoot(), params.BeaconConfig().ZeroHash[:]) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
c := setupBeaconChain(t, db)
|
||||
headRoot, err := c.HeadRoot(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(headRoot, params.BeaconConfig().ZeroHash[:]) {
|
||||
t.Error("Incorrect pre chain start value")
|
||||
}
|
||||
}
|
||||
@@ -35,48 +45,132 @@ func TestHeadRoot_Nil(t *testing.T) {
|
||||
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 5, Root: []byte("foo")}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.finalizedCheckpt = cp
|
||||
|
||||
if err := c.forkChoiceStore.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
if c.FinalizedCheckpt().Epoch != cp.Epoch {
|
||||
t.Errorf("Finalized epoch at genesis should be %d, got: %d", cp.Epoch, c.FinalizedCheckpt().Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
if c.FinalizedCheckpt().Epoch != 0 {
|
||||
t.Errorf("Finalized epoch at genesis should be 0, got: %d", c.FinalizedCheckpt().Epoch)
|
||||
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
genesisRoot := [32]byte{'A'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.finalizedCheckpt = cp
|
||||
c.genesisRoot = genesisRoot
|
||||
|
||||
if !bytes.Equal(c.FinalizedCheckpt().Root, c.genesisRoot[:]) {
|
||||
t.Errorf("Got: %v, wanted: %v", c.FinalizedCheckpt().Root, c.genesisRoot[:])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 6, Root: []byte("foo")}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.justifiedCheckpt = cp
|
||||
|
||||
if c.CurrentJustifiedCheckpt().Epoch != cp.Epoch {
|
||||
t.Errorf("Current Justifiied epoch at genesis should be %d, got: %d", cp.Epoch, c.CurrentJustifiedCheckpt().Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
genesisRoot := [32]byte{'B'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.justifiedCheckpt = cp
|
||||
c.genesisRoot = genesisRoot
|
||||
|
||||
if !bytes.Equal(c.CurrentJustifiedCheckpt().Root, c.genesisRoot[:]) {
|
||||
t.Errorf("Got: %v, wanted: %v", c.CurrentJustifiedCheckpt().Root, c.genesisRoot[:])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 7, Root: []byte("foo")}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.prevJustifiedCheckpt = cp
|
||||
|
||||
if c.PreviousJustifiedCheckpt().Epoch != cp.Epoch {
|
||||
t.Errorf("Previous Justifiied epoch at genesis should be %d, got: %d", cp.Epoch, c.PreviousJustifiedCheckpt().Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
genesisRoot := [32]byte{'C'}
|
||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
||||
c := setupBeaconChain(t, db)
|
||||
c.prevJustifiedCheckpt = cp
|
||||
c.genesisRoot = genesisRoot
|
||||
|
||||
if !bytes.Equal(c.PreviousJustifiedCheckpt().Root, c.genesisRoot[:]) {
|
||||
t.Errorf("Got: %v, wanted: %v", c.PreviousJustifiedCheckpt().Root, c.genesisRoot[:])
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
c.headSlot = 100
|
||||
s, _ := state.InitializeFromProto(&pb.BeaconState{})
|
||||
c.head = &head{slot: 100, state: s}
|
||||
if c.HeadSlot() != 100 {
|
||||
t.Errorf("Wanted head slot: %d, got: %d", 100, c.HeadSlot())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{canonicalRoots: make(map[uint64][]byte)}
|
||||
c.headSlot = 100
|
||||
c.canonicalRoots[c.headSlot] = []byte{'A'}
|
||||
if !bytes.Equal([]byte{'A'}, c.HeadRoot()) {
|
||||
t.Errorf("Wanted head root: %v, got: %d", []byte{'A'}, c.HeadRoot())
|
||||
c := &Service{}
|
||||
c.head = &head{root: [32]byte{'A'}}
|
||||
if [32]byte{'A'} != c.headRoot() {
|
||||
t.Errorf("Wanted head root: %v, got: %d", []byte{'A'}, c.headRoot())
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
b := ðpb.BeaconBlock{Slot: 1}
|
||||
c := &Service{headBlock: b}
|
||||
if !reflect.DeepEqual(b, c.HeadBlock()) {
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}}
|
||||
s, _ := state.InitializeFromProto(&pb.BeaconState{})
|
||||
c := &Service{}
|
||||
c.head = &head{block: b, state: s}
|
||||
|
||||
recevied, err := c.HeadBlock(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(b, recevied) {
|
||||
t.Error("incorrect head block received")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
s := &pb.BeaconState{Slot: 2}
|
||||
c := &Service{headState: s}
|
||||
if !reflect.DeepEqual(s, c.HeadState()) {
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{Slot: 2})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
headState, err := c.HeadState(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(s.InnerStateUnsafe(), headState.InnerStateUnsafe()) {
|
||||
t.Error("incorrect head state received")
|
||||
}
|
||||
}
|
||||
@@ -91,19 +185,13 @@ func TestGenesisTime_CanRetrieve(t *testing.T) {
|
||||
|
||||
func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||
f := &pb.Fork{Epoch: 999}
|
||||
s := &pb.BeaconState{Fork: f}
|
||||
c := &Service{headState: s}
|
||||
if !reflect.DeepEqual(c.CurrentFork(), f) {
|
||||
t.Error("Recieved incorrect fork version")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanonicalRoot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{canonicalRoots: make(map[uint64][]byte)}
|
||||
slot := uint64(123)
|
||||
r := []byte{'B'}
|
||||
c.canonicalRoots[slot] = r
|
||||
if !bytes.Equal(r, c.CanonicalRoot(slot)) {
|
||||
t.Errorf("Wanted head root: %v, got: %d", []byte{'A'}, c.CanonicalRoot(slot))
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{Fork: f})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
if !proto.Equal(c.CurrentFork(), f) {
|
||||
t.Error("Received incorrect fork version")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,178 +0,0 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
)
|
||||
|
||||
func BenchmarkForkChoiceTree1(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(b)
|
||||
defer testDB.TeardownDB(b, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Benchmark fork choice with 1024 validators
|
||||
validators := make([]*ethpb.Validator, 1024)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Spread out the votes evenly for all 3 leaf nodes
|
||||
for i := 0; i < len(validators); i++ {
|
||||
switch {
|
||||
case i < 256:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
case i > 768:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
default:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[8]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkForkChoiceTree2(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(b)
|
||||
defer testDB.TeardownDB(b, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree2(db)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Benchmark fork choice with 1024 validators
|
||||
validators := make([]*ethpb.Validator, 1024)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Spread out the votes evenly for all the leaf nodes. 8 to 15
|
||||
nodeIndex := 8
|
||||
for i := 0; i < len(validators); i++ {
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[nodeIndex]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if i%155 == 0 {
|
||||
nodeIndex++
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkForkChoiceTree3(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(b)
|
||||
defer testDB.TeardownDB(b, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree3(db)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Benchmark fork choice with 1024 validators
|
||||
validators := make([]*ethpb.Validator, 1024)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// All validators vote on the same head
|
||||
for i := 0; i < len(validators); i++ {
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[len(roots)-1]}); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
/*
|
||||
Package forkchoice implements the Latest Message Driven GHOST (Greediest Heaviest Observed
|
||||
Sub-Tree) algorithm as the Ethereum Serenity beacon chain fork choice rule. This algorithm is designed to
|
||||
properly detect the canonical chain based on validator votes even in the presence of high network
|
||||
latency, network partitions, and many conflicting blocks. To read more about fork choice, read the
|
||||
official accompanying document:
|
||||
https://github.com/ethereum/eth2.0-specs/blob/v0.8.3/specs/core/0_fork-choice.md
|
||||
*/
|
||||
package forkchoice
|
||||
@@ -1,59 +0,0 @@
|
||||
test_cases:
|
||||
# GHOST chooses b3 with the heaviest weight
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b1'
|
||||
- id: 'b3'
|
||||
parent: 'b1'
|
||||
weights:
|
||||
b0: 0
|
||||
b1: 0
|
||||
b2: 5
|
||||
b3: 10
|
||||
head: 'b3'
|
||||
# GHOST chooses b1 with the heaviest weight
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
- id: 'b3'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
b1: 5
|
||||
b2: 4
|
||||
b3: 3
|
||||
head: 'b1'
|
||||
# Equal weights children, GHOST chooses b2 because it is higher lexicographically than b3
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
- id: 'b3'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
b1: 5
|
||||
b2: 6
|
||||
b3: 6
|
||||
head: 'b3'
|
||||
# Equal weights children, GHOST chooses b2 because it is higher lexicographically than b1
|
||||
- blocks:
|
||||
- id: 'b0'
|
||||
parent: 'b0'
|
||||
- id: 'b1'
|
||||
parent: 'b0'
|
||||
- id: 'b2'
|
||||
parent: 'b0'
|
||||
weights:
|
||||
b1: 0
|
||||
b2: 0
|
||||
head: 'b2'
|
||||
@@ -1,140 +0,0 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
TestCases []struct {
|
||||
Blocks []struct {
|
||||
ID string `yaml:"id"`
|
||||
Parent string `yaml:"parent"`
|
||||
} `yaml:"blocks"`
|
||||
Weights map[string]int `yaml:"weights"`
|
||||
Head string `yaml:"head"`
|
||||
} `yaml:"test_cases"`
|
||||
}
|
||||
|
||||
func TestGetHeadFromYaml(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
filename, _ := filepath.Abs("./lmd_ghost_test.yaml")
|
||||
yamlFile, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var c *Config
|
||||
err = yaml.Unmarshal(yamlFile, &c)
|
||||
|
||||
for _, test := range c.TestCases {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
blksRoot := make(map[int][]byte)
|
||||
// Construct block tree from yaml.
|
||||
for _, blk := range test.Blocks {
|
||||
// genesis block condition
|
||||
if blk.ID == blk.Parent {
|
||||
b := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
if err := db.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blksRoot[0] = root[:]
|
||||
} else {
|
||||
slot, err := strconv.Atoi(blk.ID[1:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
parentSlot, err := strconv.Atoi(blk.Parent[1:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := ðpb.BeaconBlock{Slot: uint64(slot), ParentRoot: blksRoot[parentSlot]}
|
||||
if err := db.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blksRoot[slot] = root[:]
|
||||
}
|
||||
}
|
||||
|
||||
// Assign validator votes to the blocks as weights.
|
||||
count := 0
|
||||
for blk, votes := range test.Weights {
|
||||
slot, err := strconv.Atoi(blk[1:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
max := count + votes
|
||||
for i := count; i < max; i++ {
|
||||
if err := db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: blksRoot[slot]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store.justifiedCheckpt.Root = blksRoot[0]
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(blksRoot[0])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
head, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
headSlot, err := strconv.Atoi(test.Head[1:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantedHead := blksRoot[headSlot]
|
||||
|
||||
if !bytes.Equal(head, wantedHead) {
|
||||
t.Errorf("wanted root %#x, got root %#x", wantedHead, head)
|
||||
}
|
||||
|
||||
helpers.ClearAllCaches()
|
||||
testDB.TeardownDB(t, db)
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "forkchoice")
|
||||
|
||||
// logs epoch related data during epoch boundary.
|
||||
func logEpochData(beaconState *pb.BeaconState) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"epoch": helpers.CurrentEpoch(beaconState),
|
||||
"finalizedEpoch": beaconState.FinalizedCheckpoint.Epoch,
|
||||
"justifiedEpoch": beaconState.CurrentJustifiedCheckpoint.Epoch,
|
||||
"previousJustifiedEpoch": beaconState.PreviousJustifiedCheckpoint.Epoch,
|
||||
}).Info("Starting next epoch")
|
||||
activeVals, err := helpers.ActiveValidatorIndices(beaconState, helpers.CurrentEpoch(beaconState))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get active validator indices")
|
||||
return
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"totalValidators": len(beaconState.Validators),
|
||||
"activeValidators": len(activeVals),
|
||||
"averageBalance": fmt.Sprintf("%.5f ETH", averageBalance(beaconState.Balances)),
|
||||
}).Info("Validator registry information")
|
||||
}
|
||||
|
||||
func averageBalance(balances []uint64) float64 {
|
||||
total := uint64(0)
|
||||
for i := 0; i < len(balances); i++ {
|
||||
total += balances[i]
|
||||
}
|
||||
return float64(total) / float64(len(balances)) / float64(params.BeaconConfig().GweiPerEth)
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
var (
|
||||
beaconFinalizedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_finalized_epoch",
|
||||
Help: "Last finalized epoch of the processed state",
|
||||
})
|
||||
beaconFinalizedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_finalized_root",
|
||||
Help: "Last finalized root of the processed state",
|
||||
})
|
||||
beaconCurrentJustifiedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_current_justified_epoch",
|
||||
Help: "Current justified epoch of the processed state",
|
||||
})
|
||||
beaconCurrentJustifiedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_current_justified_root",
|
||||
Help: "Current justified root of the processed state",
|
||||
})
|
||||
beaconPrevJustifiedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_previous_justified_epoch",
|
||||
Help: "Previous justified epoch of the processed state",
|
||||
})
|
||||
beaconPrevJustifiedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_previous_justified_root",
|
||||
Help: "Previous justified root of the processed state",
|
||||
})
|
||||
activeValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "state_active_validators",
|
||||
Help: "Total number of active validators",
|
||||
})
|
||||
slashedValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "state_slashed_validators",
|
||||
Help: "Total slashed validators",
|
||||
})
|
||||
withdrawnValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "state_withdrawn_validators",
|
||||
Help: "Total withdrawn validators",
|
||||
})
|
||||
totalValidatorsGauge = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_current_validators",
|
||||
Help: "Number of status=pending|active|exited|withdrawable validators in current epoch",
|
||||
})
|
||||
)
|
||||
|
||||
func reportEpochMetrics(state *pb.BeaconState) {
|
||||
currentEpoch := state.Slot / params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Validator counts
|
||||
var active float64
|
||||
var slashed float64
|
||||
var withdrawn float64
|
||||
for _, v := range state.Validators {
|
||||
if v.ActivationEpoch <= currentEpoch && currentEpoch < v.ExitEpoch {
|
||||
active++
|
||||
}
|
||||
if v.Slashed {
|
||||
slashed++
|
||||
}
|
||||
if currentEpoch >= v.ExitEpoch {
|
||||
withdrawn++
|
||||
}
|
||||
}
|
||||
activeValidatorsGauge.Set(active)
|
||||
slashedValidatorsGauge.Set(slashed)
|
||||
withdrawnValidatorsGauge.Set(withdrawn)
|
||||
totalValidatorsGauge.Set(float64(len(state.Validators)))
|
||||
|
||||
// Last justified slot
|
||||
if state.CurrentJustifiedCheckpoint != nil {
|
||||
beaconCurrentJustifiedEpoch.Set(float64(state.CurrentJustifiedCheckpoint.Epoch))
|
||||
beaconCurrentJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.CurrentJustifiedCheckpoint.Root)))
|
||||
}
|
||||
// Last previous justified slot
|
||||
if state.PreviousJustifiedCheckpoint != nil {
|
||||
beaconPrevJustifiedEpoch.Set(float64(state.PreviousJustifiedCheckpoint.Epoch))
|
||||
beaconPrevJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.PreviousJustifiedCheckpoint.Root)))
|
||||
}
|
||||
// Last finalized slot
|
||||
if state.FinalizedCheckpoint != nil {
|
||||
beaconFinalizedEpoch.Set(float64(state.FinalizedCheckpoint.Epoch))
|
||||
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(state.FinalizedCheckpoint.Root)))
|
||||
}
|
||||
}
|
||||
@@ -1,293 +0,0 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// OnAttestation is called whenever an attestation is received, it updates validators latest vote,
|
||||
// as well as the fork choice store struct.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def on_attestation(store: Store, attestation: Attestation) -> None:
|
||||
// target = attestation.data.target
|
||||
//
|
||||
// # Cannot calculate the current shuffling if have not seen the target
|
||||
// assert target.root in store.blocks
|
||||
//
|
||||
// # Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
|
||||
// base_state = store.block_states[target.root].copy()
|
||||
// assert store.time >= base_state.genesis_time + compute_start_slot_of_epoch(target.epoch) * SECONDS_PER_SLOT
|
||||
//
|
||||
// # Store target checkpoint state if not yet seen
|
||||
// if target not in store.checkpoint_states:
|
||||
// process_slots(base_state, compute_start_slot_of_epoch(target.epoch))
|
||||
// store.checkpoint_states[target] = base_state
|
||||
// target_state = store.checkpoint_states[target]
|
||||
//
|
||||
// # Attestations can only affect the fork choice of subsequent slots.
|
||||
// # Delay consideration in the fork choice until their slot is in the past.
|
||||
// attestation_slot = get_attestation_data_slot(target_state, attestation.data)
|
||||
// assert store.time >= (attestation_slot + 1) * SECONDS_PER_SLOT
|
||||
//
|
||||
// # Get state at the `target` to validate attestation and calculate the committees
|
||||
// indexed_attestation = get_indexed_attestation(target_state, attestation)
|
||||
// assert is_valid_indexed_attestation(target_state, indexed_attestation)
|
||||
//
|
||||
// # Update latest messages
|
||||
// for i in indexed_attestation.custody_bit_0_indices + indexed_attestation.custody_bit_1_indices:
|
||||
// if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
|
||||
// store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=attestation.data.beacon_block_root)
|
||||
func (s *Store) OnAttestation(ctx context.Context, a *ethpb.Attestation) (uint64, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.onAttestation")
|
||||
defer span.End()
|
||||
|
||||
tgt := proto.Clone(a.Data.Target).(*ethpb.Checkpoint)
|
||||
tgtSlot := helpers.StartSlot(tgt.Epoch)
|
||||
|
||||
// Verify beacon node has seen the target block before.
|
||||
if !s.db.HasBlock(ctx, bytesutil.ToBytes32(tgt.Root)) {
|
||||
return 0, fmt.Errorf("target root %#x does not exist in db", bytesutil.Trunc(tgt.Root))
|
||||
}
|
||||
|
||||
// Verify attestation target has had a valid pre state produced by the target block.
|
||||
baseState, err := s.verifyAttPreState(ctx, tgt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Verify Attestations cannot be from future epochs.
|
||||
if err := helpers.VerifySlotTime(baseState.GenesisTime, tgtSlot); err != nil {
|
||||
return 0, errors.Wrap(err, "could not verify attestation target slot")
|
||||
}
|
||||
|
||||
// Store target checkpoint state if not yet seen.
|
||||
baseState, err = s.saveCheckpointState(ctx, baseState, tgt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Delay attestation processing until the subsequent slot.
|
||||
if err := s.waitForAttInclDelay(ctx, a, baseState); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := helpers.VerifySlotTime(baseState.GenesisTime, a.Data.Slot+1); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
s.attsQueueLock.Lock()
|
||||
defer s.attsQueueLock.Unlock()
|
||||
atts := make([]*ethpb.Attestation, 0, len(s.attsQueue))
|
||||
for root, a := range s.attsQueue {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"AggregatedBitfield": fmt.Sprintf("%08b", a.AggregationBits),
|
||||
"Root": fmt.Sprintf("%#x", root),
|
||||
})
|
||||
log.Debug("Updating latest votes")
|
||||
|
||||
// Use the target state to to validate attestation and calculate the committees.
|
||||
indexedAtt, err := s.verifyAttestation(ctx, baseState, a)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Removing attestation from queue.")
|
||||
delete(s.attsQueue, root)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update every validator's latest vote.
|
||||
if err := s.updateAttVotes(ctx, indexedAtt, tgt.Root, tgt.Epoch); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Mark attestation as seen we don't update votes when it appears in block.
|
||||
if err := s.setSeenAtt(a); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
delete(s.attsQueue, root)
|
||||
att, err := s.aggregatedAttestations(ctx, a)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
atts = append(atts, att...)
|
||||
}
|
||||
|
||||
if err := s.db.SaveAttestations(ctx, atts); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return tgtSlot, nil
|
||||
}
|
||||
|
||||
// verifyAttPreState validates input attested check point has a valid pre-state.
|
||||
func (s *Store) verifyAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*pb.BeaconState, error) {
|
||||
baseState, err := s.db.State(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
if baseState == nil {
|
||||
return nil, fmt.Errorf("pre state of target block %d does not exist", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
return baseState, nil
|
||||
}
|
||||
|
||||
// saveCheckpointState saves and returns the processed state with the associated check point.
|
||||
func (s *Store) saveCheckpointState(ctx context.Context, baseState *pb.BeaconState, c *ethpb.Checkpoint) (*pb.BeaconState, error) {
|
||||
s.checkpointStateLock.Lock()
|
||||
defer s.checkpointStateLock.Unlock()
|
||||
cachedState, err := s.checkpointState.StateByCheckpoint(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get cached checkpoint state")
|
||||
}
|
||||
if cachedState != nil {
|
||||
return cachedState, nil
|
||||
}
|
||||
|
||||
// Advance slots only when it's higher than current state slot.
|
||||
if helpers.StartSlot(c.Epoch) > baseState.Slot {
|
||||
stateCopy := proto.Clone(baseState).(*pb.BeaconState)
|
||||
baseState, err = state.ProcessSlots(ctx, stateCopy, helpers.StartSlot(c.Epoch))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to %d", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: c,
|
||||
State: baseState,
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
|
||||
return baseState, nil
|
||||
}
|
||||
|
||||
// waitForAttInclDelay waits until the next slot because attestation can only affect
|
||||
// fork choice of subsequent slot. This is to delay attestation inclusion for fork choice
|
||||
// until the attested slot is in the past.
|
||||
func (s *Store) waitForAttInclDelay(ctx context.Context, a *ethpb.Attestation, targetState *pb.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.forkchoice.waitForAttInclDelay")
|
||||
defer span.End()
|
||||
|
||||
nextSlot := a.Data.Slot + 1
|
||||
duration := time.Duration(nextSlot*params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
timeToInclude := time.Unix(int64(targetState.GenesisTime), 0).Add(duration)
|
||||
|
||||
if err := s.aggregateAttestation(ctx, a); err != nil {
|
||||
return errors.Wrap(err, "could not aggregate attestation")
|
||||
}
|
||||
|
||||
time.Sleep(time.Until(timeToInclude))
|
||||
return nil
|
||||
}
|
||||
|
||||
// aggregateAttestation aggregates the attestations in the pending queue.
|
||||
func (s *Store) aggregateAttestation(ctx context.Context, att *ethpb.Attestation) error {
|
||||
s.attsQueueLock.Lock()
|
||||
defer s.attsQueueLock.Unlock()
|
||||
root, err := ssz.HashTreeRoot(att.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if a, ok := s.attsQueue[root]; ok {
|
||||
a, err := helpers.AggregateAttestation(a, att)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
s.attsQueue[root] = a
|
||||
return nil
|
||||
}
|
||||
s.attsQueue[root] = proto.Clone(att).(*ethpb.Attestation)
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyAttestation validates input attestation is valid.
|
||||
func (s *Store) verifyAttestation(ctx context.Context, baseState *pb.BeaconState, a *ethpb.Attestation) (*ethpb.IndexedAttestation, error) {
|
||||
indexedAtt, err := blocks.ConvertToIndexed(ctx, baseState, a)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert attestation to indexed attestation")
|
||||
}
|
||||
if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify indexed attestation")
|
||||
}
|
||||
return indexedAtt, nil
|
||||
}
|
||||
|
||||
// updateAttVotes updates validator's latest votes based on the incoming attestation.
|
||||
func (s *Store) updateAttVotes(
|
||||
ctx context.Context,
|
||||
indexedAtt *ethpb.IndexedAttestation,
|
||||
tgtRoot []byte,
|
||||
tgtEpoch uint64) error {
|
||||
|
||||
indices := append(indexedAtt.CustodyBit_0Indices, indexedAtt.CustodyBit_1Indices...)
|
||||
newVoteIndices := make([]uint64, 0, len(indices))
|
||||
newVotes := make([]*pb.ValidatorLatestVote, 0, len(indices))
|
||||
for _, i := range indices {
|
||||
vote, err := s.db.ValidatorLatestVote(ctx, i)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get latest vote for validator %d", i)
|
||||
}
|
||||
if vote == nil || tgtEpoch > vote.Epoch {
|
||||
newVotes = append(newVotes, &pb.ValidatorLatestVote{
|
||||
Epoch: tgtEpoch,
|
||||
Root: tgtRoot,
|
||||
})
|
||||
newVoteIndices = append(newVoteIndices, i)
|
||||
}
|
||||
}
|
||||
return s.db.SaveValidatorLatestVotes(ctx, newVoteIndices, newVotes)
|
||||
}
|
||||
|
||||
// setSeenAtt sets the attestation hash in seen attestation map to true.
|
||||
func (s *Store) setSeenAtt(a *ethpb.Attestation) error {
|
||||
s.seenAttsLock.Lock()
|
||||
defer s.seenAttsLock.Unlock()
|
||||
|
||||
r, err := hashutil.HashProto(a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.seenAtts[r] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// aggregatedAttestation returns the aggregated attestation after checking saved one in db.
|
||||
func (s *Store) aggregatedAttestations(ctx context.Context, att *ethpb.Attestation) ([]*ethpb.Attestation, error) {
|
||||
r, err := ssz.HashTreeRoot(att.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
saved, err := s.db.AttestationsByDataRoot(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if saved == nil {
|
||||
return []*ethpb.Attestation{att}, nil
|
||||
}
|
||||
|
||||
aggregated, err := helpers.AggregateAttestations(append(saved, att))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return aggregated, nil
|
||||
}
|
||||
@@ -1,260 +0,0 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
func TestStore_OnAttestation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
_, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
BlkWithOutState := ðpb.BeaconBlock{Slot: 0}
|
||||
if err := db.SaveBlock(ctx, BlkWithOutState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithOutStateRoot, _ := ssz.SigningRoot(BlkWithOutState)
|
||||
|
||||
BlkWithStateBadAtt := ðpb.BeaconBlock{Slot: 1}
|
||||
if err := db.SaveBlock(ctx, BlkWithStateBadAtt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithStateBadAttRoot, _ := ssz.SigningRoot(BlkWithStateBadAtt)
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, BlkWithStateBadAttRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
BlkWithValidState := ðpb.BeaconBlock{Slot: 2}
|
||||
if err := db.SaveBlock(ctx, BlkWithValidState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithValidStateRoot, _ := ssz.SigningRoot(BlkWithValidState)
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{
|
||||
Fork: &pb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
}, BlkWithValidStateRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
s *pb.BeaconState
|
||||
wantErr bool
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "attestation's target root not in db",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: []byte{'A'}}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "target root 0x41 does not exist in db",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "pre state of target block 0 does not exist",
|
||||
},
|
||||
{
|
||||
name: "process attestation from future epoch",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Epoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Root: BlkWithStateBadAttRoot[:]}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "could not process slot from the future",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err := store.OnAttestation(ctx, tt.a)
|
||||
if tt.wantErr {
|
||||
if !strings.Contains(err.Error(), tt.wantErrString) {
|
||||
t.Errorf("Store.OnAttestation() error = %v, wantErr = %v", err, tt.wantErrString)
|
||||
}
|
||||
} else {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseDemoBeaconConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
s := &pb.BeaconState{
|
||||
Fork: &pb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
StateRoots: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{},
|
||||
JustificationBits: []byte{0},
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
}
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
|
||||
s1, err := store.saveCheckpointState(ctx, s, cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s1.Slot != 1*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot)
|
||||
}
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: []byte{'B'}}
|
||||
s2, err := store.saveCheckpointState(ctx, s, cp2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s2.Slot != 2*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot)
|
||||
}
|
||||
|
||||
s1, err = store.saveCheckpointState(ctx, nil, cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s1.Slot != 1*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot)
|
||||
}
|
||||
|
||||
s1, err = store.checkpointState.StateByCheckpoint(cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s1.Slot != 1*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot)
|
||||
}
|
||||
|
||||
s2, err = store.checkpointState.StateByCheckpoint(cp2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s2.Slot != 2*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot)
|
||||
}
|
||||
|
||||
s.Slot = params.BeaconConfig().SlotsPerEpoch + 1
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'C'}}
|
||||
s3, err := store.saveCheckpointState(ctx, s, cp3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s3.Slot != s.Slot {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", s.Slot, s3.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_AggregateAttestation(t *testing.T) {
|
||||
_, _, privKeys := testutil.SetupInitialDeposits(t, 100)
|
||||
f := &pb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
Epoch: 0,
|
||||
}
|
||||
domain := helpers.Domain(f, 0, params.BeaconConfig().DomainBeaconAttester)
|
||||
sig := privKeys[0].Sign([]byte{}, domain)
|
||||
|
||||
store := &Store{attsQueue: make(map[[32]byte]*ethpb.Attestation)}
|
||||
|
||||
b1 := bitfield.NewBitlist(8)
|
||||
b1.SetBitAt(0, true)
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: b1, Signature: sig.Marshal()}
|
||||
|
||||
if err := store.aggregateAttestation(context.Background(), a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, _ := ssz.HashTreeRoot(a.Data)
|
||||
if !bytes.Equal(store.attsQueue[r].AggregationBits, b1) {
|
||||
t.Error("Received incorrect aggregation bitfield")
|
||||
}
|
||||
|
||||
b2 := bitfield.NewBitlist(8)
|
||||
b2.SetBitAt(1, true)
|
||||
a = ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: b2, Signature: sig.Marshal()}
|
||||
if err := store.aggregateAttestation(context.Background(), a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(store.attsQueue[r].AggregationBits, []byte{3, 1}) {
|
||||
t.Error("Received incorrect aggregation bitfield")
|
||||
}
|
||||
|
||||
b3 := bitfield.NewBitlist(8)
|
||||
b3.SetBitAt(7, true)
|
||||
a = ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: b3, Signature: sig.Marshal()}
|
||||
if err := store.aggregateAttestation(context.Background(), a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(store.attsQueue[r].AggregationBits, []byte{131, 1}) {
|
||||
t.Error("Received incorrect aggregation bitfield")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_ReturnAggregatedAttestation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
a1 := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0x02}}
|
||||
err := store.db.SaveAttestation(ctx, a1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a2 := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0x03}}
|
||||
saved, err := store.aggregatedAttestations(ctx, a2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
}
|
||||
@@ -1,442 +0,0 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// OnBlock is called when a gossip block is received. It runs regular state transition on the block and
|
||||
// update fork choice store.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def on_block(store: Store, block: BeaconBlock) -> None:
|
||||
// # Make a copy of the state to avoid mutability issues
|
||||
// assert block.parent_root in store.block_states
|
||||
// pre_state = store.block_states[block.parent_root].copy()
|
||||
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
|
||||
// assert store.time >= pre_state.genesis_time + block.slot * SECONDS_PER_SLOT
|
||||
// # Add new block to the store
|
||||
// store.blocks[signing_root(block)] = block
|
||||
// # Check block is a descendant of the finalized block
|
||||
// assert (
|
||||
// get_ancestor(store, signing_root(block), store.blocks[store.finalized_checkpoint.root].slot) ==
|
||||
// store.finalized_checkpoint.root
|
||||
// )
|
||||
// # Check that block is later than the finalized epoch slot
|
||||
// assert block.slot > compute_start_slot_of_epoch(store.finalized_checkpoint.epoch)
|
||||
// # Check the block is valid and compute the post-state
|
||||
// state = state_transition(pre_state, block)
|
||||
// # Add new state for this block to the store
|
||||
// store.block_states[signing_root(block)] = state
|
||||
//
|
||||
// # Update justified checkpoint
|
||||
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
//
|
||||
// # Update finalized checkpoint
|
||||
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
// store.finalized_checkpoint = state.finalized_checkpoint
|
||||
func (s *Store) OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.onBlock")
|
||||
defer span.End()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preStateValidatorCount := len(preState.Validators)
|
||||
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Slot,
|
||||
"root": fmt.Sprintf("0x%s...", hex.EncodeToString(root[:])[:8]),
|
||||
}).Info("Executing state transition on block")
|
||||
postState, err := state.ExecuteStateTransition(ctx, preState, b)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
if err := s.updateBlockAttestationsVotes(ctx, b.Body.Attestations); err != nil {
|
||||
return errors.Wrap(err, "could not update votes for attestations in block")
|
||||
}
|
||||
|
||||
if err := s.db.SaveBlock(ctx, b); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
|
||||
}
|
||||
if err := s.db.SaveState(ctx, postState, root); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
if postState.CurrentJustifiedCheckpoint.Epoch > s.JustifiedCheckpt().Epoch {
|
||||
s.justifiedCheckpt = postState.CurrentJustifiedCheckpoint
|
||||
if err := s.db.SaveJustifiedCheckpoint(ctx, postState.CurrentJustifiedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save justified checkpoint")
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
// Prune the block cache and helper caches on every new finalized epoch.
|
||||
if postState.FinalizedCheckpoint.Epoch > s.finalizedCheckpt.Epoch {
|
||||
s.clearSeenAtts()
|
||||
helpers.ClearAllCaches()
|
||||
if err := s.db.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
|
||||
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch) + 1
|
||||
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
|
||||
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
|
||||
startSlot, endSlot+params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint
|
||||
}
|
||||
|
||||
// Update validator indices in database as needed.
|
||||
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
// Save the unseen attestations from block to db.
|
||||
if err := s.saveNewBlockAttestations(ctx, b.Body.Attestations); err != nil {
|
||||
return errors.Wrap(err, "could not save attestations")
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
if helpers.IsEpochStart(postState.Slot) {
|
||||
logEpochData(postState)
|
||||
reportEpochMetrics(postState)
|
||||
|
||||
// Update committee shuffled indices at the end of every epoch
|
||||
if featureconfig.Get().EnableNewCache {
|
||||
if err := helpers.UpdateCommitteeCache(postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnBlockNoVerifyStateTransition is called when an initial sync block is received.
|
||||
// It runs state transition on the block and without any BLS verification. The BLS verification
|
||||
// includes proposer signature, randao and attestation's aggregated signature.
|
||||
func (s *Store) OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.onBlock")
|
||||
defer span.End()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preStateValidatorCount := len(preState.Validators)
|
||||
|
||||
log.WithField("slot", b.Slot).Debug("Executing state transition on block")
|
||||
|
||||
postState, err := state.ExecuteStateTransitionNoVerify(ctx, preState, b)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
|
||||
if err := s.db.SaveBlock(ctx, b); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
|
||||
}
|
||||
if err := s.db.SaveState(ctx, postState, root); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
if postState.CurrentJustifiedCheckpoint.Epoch > s.JustifiedCheckpt().Epoch {
|
||||
s.justifiedCheckpt = postState.CurrentJustifiedCheckpoint
|
||||
if err := s.db.SaveJustifiedCheckpoint(ctx, postState.CurrentJustifiedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save justified checkpoint")
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
// Prune the block cache and helper caches on every new finalized epoch.
|
||||
if postState.FinalizedCheckpoint.Epoch > s.finalizedCheckpt.Epoch {
|
||||
s.clearSeenAtts()
|
||||
helpers.ClearAllCaches()
|
||||
|
||||
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch) + 1
|
||||
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
|
||||
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
|
||||
startSlot, endSlot+params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
if err := s.db.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint
|
||||
}
|
||||
|
||||
// Update validator indices in database as needed.
|
||||
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
// Save the unseen attestations from block to db.
|
||||
if err := s.saveNewBlockAttestations(ctx, b.Body.Attestations); err != nil {
|
||||
return errors.Wrap(err, "could not save attestations")
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
if helpers.IsEpochStart(postState.Slot) {
|
||||
reportEpochMetrics(postState)
|
||||
|
||||
// Update committee shuffled indices at the end of every epoch
|
||||
if featureconfig.Get().EnableNewCache {
|
||||
if err := helpers.UpdateCommitteeCache(postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
func (s *Store) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*pb.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.getBlockPreState")
|
||||
defer span.End()
|
||||
|
||||
// Verify incoming block has a valid pre state.
|
||||
preState, err := s.verifyBlkPreState(ctx, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify block slot time is not from the feature.
|
||||
if err := helpers.VerifySlotTime(preState.GenesisTime, b.Slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify block is a descendent of a finalized block.
|
||||
if err := s.verifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot), b.Slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify block is later than the finalized epoch slot.
|
||||
if err := s.verifyBlkFinalizedSlot(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
// updateBlockAttestationsVotes checks the attestations in block and filter out the seen ones,
|
||||
// the unseen ones get passed to updateBlockAttestationVote for updating fork choice votes.
|
||||
func (s *Store) updateBlockAttestationsVotes(ctx context.Context, atts []*ethpb.Attestation) error {
|
||||
s.seenAttsLock.Lock()
|
||||
defer s.seenAttsLock.Unlock()
|
||||
|
||||
for _, att := range atts {
|
||||
// If we have not seen the attestation yet
|
||||
r, err := hashutil.HashProto(att)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.seenAtts[r] {
|
||||
continue
|
||||
}
|
||||
if err := s.updateBlockAttestationVote(ctx, att); err != nil {
|
||||
log.WithError(err).Warn("Attestation failed to update vote")
|
||||
}
|
||||
s.seenAtts[r] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateBlockAttestationVotes checks the attestation to update validator's latest votes.
|
||||
func (s *Store) updateBlockAttestationVote(ctx context.Context, att *ethpb.Attestation) error {
|
||||
tgt := att.Data.Target
|
||||
baseState, err := s.db.State(ctx, bytesutil.ToBytes32(tgt.Root))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get state for attestation tgt root")
|
||||
}
|
||||
if baseState == nil {
|
||||
return errors.New("no state found in db with attestation tgt root")
|
||||
}
|
||||
indexedAtt, err := blocks.ConvertToIndexed(ctx, baseState, att)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not convert attestation to indexed attestation")
|
||||
}
|
||||
for _, i := range append(indexedAtt.CustodyBit_0Indices, indexedAtt.CustodyBit_1Indices...) {
|
||||
vote, err := s.db.ValidatorLatestVote(ctx, i)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get latest vote for validator %d", i)
|
||||
}
|
||||
if vote == nil || tgt.Epoch > vote.Epoch {
|
||||
if err := s.db.SaveValidatorLatestVote(ctx, i, &pb.ValidatorLatestVote{
|
||||
Epoch: tgt.Epoch,
|
||||
Root: tgt.Root,
|
||||
}); err != nil {
|
||||
return errors.Wrapf(err, "could not save latest vote for validator %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyBlkPreState validates input block has a valid pre-state.
|
||||
func (s *Store) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) (*pb.BeaconState, error) {
|
||||
preState, err := s.db.State(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
|
||||
}
|
||||
if preState == nil {
|
||||
return nil, fmt.Errorf("pre state of slot %d does not exist", b.Slot)
|
||||
}
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
// verifyBlkDescendant validates input block root is a descendant of the
|
||||
// current finalized block root.
|
||||
func (s *Store) verifyBlkDescendant(ctx context.Context, root [32]byte, slot uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.verifyBlkDescendant")
|
||||
defer span.End()
|
||||
|
||||
finalizedBlk, err := s.db.Block(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
||||
if err != nil || finalizedBlk == nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block root")
|
||||
}
|
||||
if !bytes.Equal(bFinalizedRoot, s.finalizedCheckpt.Root) {
|
||||
err := fmt.Errorf("block from slot %d is not a descendent of the current finalized block slot %d, %#x != %#x",
|
||||
slot, finalizedBlk.Slot, bytesutil.Trunc(bFinalizedRoot), bytesutil.Trunc(s.finalizedCheckpt.Root))
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
||||
// to current finalized slot.
|
||||
func (s *Store) verifyBlkFinalizedSlot(b *ethpb.BeaconBlock) error {
|
||||
finalizedSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if finalizedSlot >= b.Slot {
|
||||
return fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot, finalizedSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveNewValidators saves newly added validator index from state to db. Does nothing if validator count has not
|
||||
// changed.
|
||||
func (s *Store) saveNewValidators(ctx context.Context, preStateValidatorCount int, postState *pb.BeaconState) error {
|
||||
postStateValidatorCount := len(postState.Validators)
|
||||
if preStateValidatorCount != postStateValidatorCount {
|
||||
for i := preStateValidatorCount; i < postStateValidatorCount; i++ {
|
||||
pubKey := postState.Validators[i].PublicKey
|
||||
if err := s.db.SaveValidatorIndex(ctx, bytesutil.ToBytes48(pubKey), uint64(i)); err != nil {
|
||||
return errors.Wrapf(err, "could not save activated validator: %d", i)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"index": i,
|
||||
"pubKey": hex.EncodeToString(bytesutil.Trunc(pubKey)),
|
||||
"totalValidatorCount": i + 1,
|
||||
}).Info("New validator index saved in DB")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveNewBlockAttestations saves the new attestations in block to DB.
|
||||
func (s *Store) saveNewBlockAttestations(ctx context.Context, atts []*ethpb.Attestation) error {
|
||||
attestations := make([]*ethpb.Attestation, 0, len(atts))
|
||||
for _, att := range atts {
|
||||
aggregated, err := s.aggregatedAttestations(ctx, att)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
attestations = append(attestations, aggregated...)
|
||||
}
|
||||
if err := s.db.SaveAttestations(ctx, atts); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// clearSeenAtts clears seen attestations map, it gets called upon new finalization.
|
||||
func (s *Store) clearSeenAtts() {
|
||||
s.seenAttsLock.Lock()
|
||||
s.seenAttsLock.Unlock()
|
||||
s.seenAtts = make(map[[32]byte]bool)
|
||||
}
|
||||
|
||||
// rmStatesOlderThanLastFinalized deletes the states in db since last finalized check point.
|
||||
func (s *Store) rmStatesOlderThanLastFinalized(ctx context.Context, startSlot uint64, endSlot uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.rmStatesBySlots")
|
||||
defer span.End()
|
||||
|
||||
if !featureconfig.Get().PruneFinalizedStates {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure finalized slot is not a skipped slot.
|
||||
for i := endSlot; i > 0; i-- {
|
||||
filter := filters.NewFilter().SetStartSlot(i).SetEndSlot(i)
|
||||
b, err := s.db.Blocks(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
endSlot = i - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Do not remove genesis state
|
||||
if startSlot == 0 {
|
||||
startSlot++
|
||||
}
|
||||
|
||||
// Do not remove finalized state that's in the middle of slot ranges.
|
||||
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot)
|
||||
roots, err := s.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.db.DeleteStates(ctx, roots); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,343 +0,0 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
func init() {
|
||||
fc := featureconfig.Get()
|
||||
fc.PruneFinalizedStates = true
|
||||
featureconfig.Init(fc)
|
||||
}
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
randomParentRoot := []byte{'a'}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, bytesutil.ToBytes32(randomParentRoot)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
randomParentRoot2 := roots[1]
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, bytesutil.ToBytes32(randomParentRoot2)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
validGenesisRoot := []byte{'g'}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{}, bytesutil.ToBytes32(validGenesisRoot)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk *ethpb.BeaconBlock
|
||||
s *pb.BeaconState
|
||||
time uint64
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "parent block root does not have a state",
|
||||
blk: ðpb.BeaconBlock{},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "pre state of slot 0 does not exist",
|
||||
},
|
||||
{
|
||||
name: "block is from the feature",
|
||||
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot, Slot: params.BeaconConfig().FarFutureEpoch},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "could not process slot from the future",
|
||||
},
|
||||
{
|
||||
name: "could not get finalized block",
|
||||
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "block from slot 0 is not a descendent of the current finalized block",
|
||||
},
|
||||
{
|
||||
name: "same slot as finalized block",
|
||||
blk: ðpb.BeaconBlock{Slot: 0, ParentRoot: randomParentRoot2},
|
||||
s: &pb.BeaconState{},
|
||||
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := store.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.finalizedCheckpt.Root = roots[0]
|
||||
|
||||
err := store.OnBlock(ctx, tt.blk)
|
||||
if !strings.Contains(err.Error(), tt.wantErrString) {
|
||||
t.Errorf("Store.OnBlock() error = %v, wantErr = %v", err, tt.wantErrString)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SaveNewValidators(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
preCount := 2 // validators 0 and validators 1
|
||||
s := &pb.BeaconState{Validators: []*ethpb.Validator{
|
||||
{PublicKey: []byte{0}}, {PublicKey: []byte{1}},
|
||||
{PublicKey: []byte{2}}, {PublicKey: []byte{3}},
|
||||
}}
|
||||
if err := store.saveNewValidators(ctx, preCount, s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !db.HasValidatorIndex(ctx, bytesutil.ToBytes48([]byte{2})) {
|
||||
t.Error("Wanted validator saved in db")
|
||||
}
|
||||
if !db.HasValidatorIndex(ctx, bytesutil.ToBytes48([]byte{3})) {
|
||||
t.Error("Wanted validator saved in db")
|
||||
}
|
||||
if db.HasValidatorIndex(ctx, bytesutil.ToBytes48([]byte{1})) {
|
||||
t.Error("validator not suppose to be saved in db")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_UpdateBlockAttestationVote(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, uint64(0), ðpb.Eth1Data{BlockHash: make([]byte, 32)})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
r := [32]byte{'A'}
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: r[:]},
|
||||
},
|
||||
AggregationBits: []byte{255},
|
||||
CustodyBits: []byte{255},
|
||||
}
|
||||
if err := store.db.SaveState(ctx, beaconState, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
indices, err := blocks.ConvertToIndexed(ctx, beaconState, att)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var attestedIndices []uint64
|
||||
for _, k := range append(indices.CustodyBit_0Indices, indices.CustodyBit_1Indices...) {
|
||||
attestedIndices = append(attestedIndices, k)
|
||||
}
|
||||
|
||||
if err := store.updateBlockAttestationVote(ctx, att); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, i := range attestedIndices {
|
||||
v, err := store.db.ValidatorLatestVote(ctx, i)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(v.Root, r[:]) {
|
||||
t.Error("Attested roots don't match")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_UpdateBlockAttestationsVote(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, uint64(0), ðpb.Eth1Data{BlockHash: make([]byte, 32)})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
r := [32]byte{'A'}
|
||||
atts := make([]*ethpb.Attestation, 5)
|
||||
hashes := make([][32]byte, 5)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
atts[i] = ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: r[:]},
|
||||
},
|
||||
AggregationBits: []byte{255},
|
||||
CustodyBits: []byte{255},
|
||||
}
|
||||
h, _ := hashutil.HashProto(atts[i])
|
||||
hashes[i] = h
|
||||
}
|
||||
|
||||
if err := store.db.SaveState(ctx, beaconState, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := store.updateBlockAttestationsVotes(ctx, atts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, h := range hashes {
|
||||
if !store.seenAtts[h] {
|
||||
t.Error("Seen attestation did not get recorded")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SavesNewBlockAttestations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
a1 := ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b101}, CustodyBits: bitfield.NewBitlist(2)}
|
||||
a2 := ðpb.Attestation{Data: ðpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b110}, CustodyBits: bitfield.NewBitlist(2)}
|
||||
r1, _ := ssz.HashTreeRoot(a1.Data)
|
||||
r2, _ := ssz.HashTreeRoot(a2.Data)
|
||||
|
||||
if err := store.saveNewBlockAttestations(ctx, []*ethpb.Attestation{a1, a2}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
saved, err := store.db.AttestationsByDataRoot(ctx, r1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a1}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
|
||||
saved, err = store.db.AttestationsByDataRoot(ctx, r2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
|
||||
a1 = ðpb.Attestation{Data: ðpb.AttestationData{}, AggregationBits: bitfield.Bitlist{0b111}, CustodyBits: bitfield.NewBitlist(2)}
|
||||
a2 = ðpb.Attestation{Data: ðpb.AttestationData{BeaconBlockRoot: []byte{'A'}}, AggregationBits: bitfield.Bitlist{0b111}, CustodyBits: bitfield.NewBitlist(2)}
|
||||
|
||||
if err := store.saveNewBlockAttestations(ctx, []*ethpb.Attestation{a1, a2}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
saved, err = store.db.AttestationsByDataRoot(ctx, r1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a1}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
|
||||
saved, err = store.db.AttestationsByDataRoot(ctx, r2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual([]*ethpb.Attestation{a2}, saved) {
|
||||
t.Error("did not retrieve saved attestation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveStateSinceLastFinalized(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
// Save 100 blocks in DB, each has a state.
|
||||
numBlocks := 100
|
||||
totalBlocks := make([]*ethpb.BeaconBlock, numBlocks)
|
||||
blockRoots := make([][32]byte, 0)
|
||||
for i := 0; i < len(totalBlocks); i++ {
|
||||
totalBlocks[i] = ðpb.BeaconBlock{
|
||||
Slot: uint64(i),
|
||||
}
|
||||
r, err := ssz.SigningRoot(totalBlocks[i])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, &pb.BeaconState{Slot: uint64(i)}, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveBlock(ctx, totalBlocks[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blockRoots = append(blockRoots, r)
|
||||
}
|
||||
|
||||
// New finalized epoch: 1
|
||||
finalizedEpoch := uint64(1)
|
||||
finalizedSlot := finalizedEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
endSlot := helpers.StartSlot(finalizedEpoch+1) - 1 // Inclusive
|
||||
if err := store.rmStatesOlderThanLastFinalized(ctx, 0, endSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, r := range blockRoots {
|
||||
s, err := store.db.State(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Also verifies genesis state didnt get deleted
|
||||
if s != nil && s.Slot != finalizedSlot && s.Slot != 0 && s.Slot < endSlot {
|
||||
t.Errorf("State with slot %d should not be in DB", s.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
// New finalized epoch: 5
|
||||
newFinalizedEpoch := uint64(5)
|
||||
newFinalizedSlot := newFinalizedEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
endSlot = helpers.StartSlot(newFinalizedEpoch+1) - 1 // Inclusive
|
||||
if err := store.rmStatesOlderThanLastFinalized(ctx, helpers.StartSlot(finalizedEpoch+1)-1, endSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, r := range blockRoots {
|
||||
s, err := store.db.State(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Also verifies genesis state didnt get deleted
|
||||
if s != nil && s.Slot != newFinalizedSlot && s.Slot != finalizedSlot && s.Slot != 0 && s.Slot < endSlot {
|
||||
t.Errorf("State with slot %d should not be in DB", s.Slot)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,257 +0,0 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ForkChoicer defines a common interface for methods useful for directly applying fork choice
|
||||
// to beacon blocks to compute head.
|
||||
type ForkChoicer interface {
|
||||
Head(ctx context.Context) ([]byte, error)
|
||||
OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error
|
||||
OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.BeaconBlock) error
|
||||
OnAttestation(ctx context.Context, a *ethpb.Attestation) (uint64, error)
|
||||
GenesisStore(ctx context.Context, justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) error
|
||||
FinalizedCheckpt() *ethpb.Checkpoint
|
||||
}
|
||||
|
||||
// Store represents a service struct that handles the forkchoice
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Store struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
db db.Database
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
checkpointState *cache.CheckpointStateCache
|
||||
checkpointStateLock sync.Mutex
|
||||
attsQueue map[[32]byte]*ethpb.Attestation
|
||||
attsQueueLock sync.Mutex
|
||||
seenAtts map[[32]byte]bool
|
||||
seenAttsLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewForkChoiceService instantiates a new service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewForkChoiceService(ctx context.Context, db db.Database) *Store {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Store{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
db: db,
|
||||
checkpointState: cache.NewCheckpointStateCache(),
|
||||
attsQueue: make(map[[32]byte]*ethpb.Attestation),
|
||||
seenAtts: make(map[[32]byte]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// GenesisStore initializes the store struct before beacon chain
|
||||
// starts to advance.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_genesis_store(genesis_state: BeaconState) -> Store:
|
||||
// genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))
|
||||
// root = signing_root(genesis_block)
|
||||
// justified_checkpoint = Checkpoint(epoch=GENESIS_EPOCH, root=root)
|
||||
// finalized_checkpoint = Checkpoint(epoch=GENESIS_EPOCH, root=root)
|
||||
// return Store(
|
||||
// time=genesis_state.genesis_time,
|
||||
// justified_checkpoint=justified_checkpoint,
|
||||
// finalized_checkpoint=finalized_checkpoint,
|
||||
// blocks={root: genesis_block},
|
||||
// block_states={root: genesis_state.copy()},
|
||||
// checkpoint_states={justified_checkpoint: genesis_state.copy()},
|
||||
// )
|
||||
func (s *Store) GenesisStore(
|
||||
ctx context.Context,
|
||||
justifiedCheckpoint *ethpb.Checkpoint,
|
||||
finalizedCheckpoint *ethpb.Checkpoint) error {
|
||||
|
||||
s.justifiedCheckpt = proto.Clone(justifiedCheckpoint).(*ethpb.Checkpoint)
|
||||
s.finalizedCheckpt = proto.Clone(finalizedCheckpoint).(*ethpb.Checkpoint)
|
||||
s.prevFinalizedCheckpt = proto.Clone(finalizedCheckpoint).(*ethpb.Checkpoint)
|
||||
|
||||
justifiedState, err := s.db.State(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve last justified state")
|
||||
}
|
||||
|
||||
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: s.justifiedCheckpt,
|
||||
State: justifiedState,
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state in check point cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ancestor returns the block root of an ancestry block from the input block root.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
|
||||
// block = store.blocks[root]
|
||||
// if block.slot > slot:
|
||||
// return get_ancestor(store, block.parent_root, slot)
|
||||
// elif block.slot == slot:
|
||||
// return root
|
||||
// else:
|
||||
// return Bytes32() # root is older than queried slot: no results.
|
||||
func (s *Store) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.ancestor")
|
||||
defer span.End()
|
||||
|
||||
b, err := s.db.Block(ctx, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get ancestor block")
|
||||
}
|
||||
|
||||
// If we dont have the ancestor in the DB, simply return nil so rest of fork choice
|
||||
// operation can proceed. This is not an error condition.
|
||||
if b == nil || b.Slot < slot {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if b.Slot == slot {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
return s.ancestor(ctx, b.ParentRoot, slot)
|
||||
}
|
||||
|
||||
// latestAttestingBalance returns the staked balance of a block from the input block root.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_latest_attesting_balance(store: Store, root: Hash) -> Gwei:
|
||||
// state = store.checkpoint_states[store.justified_checkpoint]
|
||||
// active_indices = get_active_validator_indices(state, get_current_epoch(state))
|
||||
// return Gwei(sum(
|
||||
// state.validators[i].effective_balance for i in active_indices
|
||||
// if (i in store.latest_messages
|
||||
// and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
|
||||
// ))
|
||||
func (s *Store) latestAttestingBalance(ctx context.Context, root []byte) (uint64, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.latestAttestingBalance")
|
||||
defer span.End()
|
||||
|
||||
lastJustifiedState, err := s.checkpointState.StateByCheckpoint(s.JustifiedCheckpt())
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not retrieve cached state via last justified check point")
|
||||
}
|
||||
if lastJustifiedState == nil {
|
||||
return 0, errors.Wrapf(err, "could not get justified state at epoch %d", s.JustifiedCheckpt().Epoch)
|
||||
}
|
||||
|
||||
lastJustifiedEpoch := helpers.CurrentEpoch(lastJustifiedState)
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(lastJustifiedState, lastJustifiedEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get active indices for last justified checkpoint")
|
||||
}
|
||||
|
||||
wantedBlk, err := s.db.Block(ctx, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get target block")
|
||||
}
|
||||
|
||||
balances := uint64(0)
|
||||
for _, i := range activeIndices {
|
||||
vote, err := s.db.ValidatorLatestVote(ctx, i)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "could not get validator %d's latest vote", i)
|
||||
}
|
||||
if vote == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wantedRoot, err := s.ancestor(ctx, vote.Root, wantedBlk.Slot)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "could not get ancestor root for slot %d", wantedBlk.Slot)
|
||||
}
|
||||
if bytes.Equal(wantedRoot, root) {
|
||||
balances += lastJustifiedState.Validators[i].EffectiveBalance
|
||||
}
|
||||
}
|
||||
return balances, nil
|
||||
}
|
||||
|
||||
// Head returns the head of the beacon chain.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_head(store: Store) -> Hash:
|
||||
// # Execute the LMD-GHOST fork choice
|
||||
// head = store.justified_checkpoint.root
|
||||
// justified_slot = compute_start_slot_of_epoch(store.justified_checkpoint.epoch)
|
||||
// while True:
|
||||
// children = [
|
||||
// root for root in store.blocks.keys()
|
||||
// if store.blocks[root].parent_root == head and store.blocks[root].slot > justified_slot
|
||||
// ]
|
||||
// if len(children) == 0:
|
||||
// return head
|
||||
// # Sort by latest attesting balance with ties broken lexicographically
|
||||
// head = max(children, key=lambda root: (get_latest_attesting_balance(store, root), root))
|
||||
func (s *Store) Head(ctx context.Context) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.head")
|
||||
defer span.End()
|
||||
|
||||
head := s.JustifiedCheckpt().Root
|
||||
|
||||
for {
|
||||
startSlot := s.JustifiedCheckpt().Epoch * params.BeaconConfig().SlotsPerEpoch
|
||||
filter := filters.NewFilter().SetParentRoot(head).SetStartSlot(startSlot)
|
||||
children, err := s.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve children info")
|
||||
}
|
||||
|
||||
if len(children) == 0 {
|
||||
return head, nil
|
||||
}
|
||||
|
||||
// if a block has one child, then we don't have to lookup anything to
|
||||
// know that this child will be the best child.
|
||||
head = children[0][:]
|
||||
if len(children) > 1 {
|
||||
highest, err := s.latestAttestingBalance(ctx, head)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get latest balance")
|
||||
}
|
||||
for _, child := range children[1:] {
|
||||
balance, err := s.latestAttestingBalance(ctx, child[:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get latest balance")
|
||||
}
|
||||
// When there's a tie, it's broken lexicographically to favor the higher one.
|
||||
if balance > highest ||
|
||||
balance == highest && bytes.Compare(child[:], head) > 0 {
|
||||
highest = balance
|
||||
head = child[:]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// JustifiedCheckpt returns the latest justified check point from fork choice store.
|
||||
func (s *Store) JustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return proto.Clone(s.justifiedCheckpt).(*ethpb.Checkpoint)
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized check point from fork choice store.
|
||||
func (s *Store) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return proto.Clone(s.finalizedCheckpt).(*ethpb.Checkpoint)
|
||||
}
|
||||
@@ -1,346 +0,0 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
)
|
||||
|
||||
func TestStore_GenesisStoreOk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
genesisTime := time.Unix(9999, 0)
|
||||
genesisState := &pb.BeaconState{GenesisTime: uint64(genesisTime.Unix())}
|
||||
genesisStateRoot, err := ssz.HashTreeRoot(genesisState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
genesisBlkRoot, err := ssz.SigningRoot(genesisBlk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: genesisBlkRoot[:]}
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(store.justifiedCheckpt, checkPoint) {
|
||||
t.Error("Justified check point from genesis store did not match")
|
||||
}
|
||||
if !reflect.DeepEqual(store.finalizedCheckpt, checkPoint) {
|
||||
t.Error("Finalized check point from genesis store did not match")
|
||||
}
|
||||
|
||||
cachedState, err := store.checkpointState.StateByCheckpoint(checkPoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(cachedState, genesisState) {
|
||||
t.Error("Incorrect genesis state cached")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_AncestorOk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
type args struct {
|
||||
root []byte
|
||||
slot uint64
|
||||
}
|
||||
|
||||
// /- B1
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
tests := []struct {
|
||||
args *args
|
||||
want []byte
|
||||
}{
|
||||
{args: &args{roots[1], 0}, want: roots[0]},
|
||||
{args: &args{roots[8], 0}, want: roots[0]},
|
||||
{args: &args{roots[8], 4}, want: roots[4]},
|
||||
{args: &args{roots[7], 4}, want: roots[4]},
|
||||
{args: &args{roots[7], 0}, want: roots[0]},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got, err := store.ancestor(ctx, tt.args.root, tt.args.slot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("Store.ancestor(ctx, ) = %v, want %v", got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_AncestorNotPartOfTheChain(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// /- B1
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
root, err := store.ancestor(ctx, roots[8], 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if root != nil {
|
||||
t.Error("block at slot 1 is not part of the chain")
|
||||
}
|
||||
root, err = store.ancestor(ctx, roots[8], 2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if root != nil {
|
||||
t.Error("block at slot 2 is not part of the chain")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LatestAttestingBalance(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
validators := make([]*ethpb.Validator, 100)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
stateRoot, err := ssz.HashTreeRoot(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := blocks.NewGenesisBlock(stateRoot[:])
|
||||
blkRoot, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, s, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: blkRoot[:]}
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// /- B1 (33 votes)
|
||||
// B0 /- B5 - B7 (33 votes)
|
||||
// \- B3 - B4 - B6 - B8 (34 votes)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
switch {
|
||||
case i < 33:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
case i > 66:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
default:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[8]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
root []byte
|
||||
want uint64
|
||||
}{
|
||||
{root: roots[0], want: 100 * 1e9},
|
||||
{root: roots[1], want: 33 * 1e9},
|
||||
{root: roots[3], want: 67 * 1e9},
|
||||
{root: roots[4], want: 67 * 1e9},
|
||||
{root: roots[7], want: 33 * 1e9},
|
||||
{root: roots[8], want: 34 * 1e9},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got, err := store.latestAttestingBalance(ctx, tt.root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("Store.latestAttestingBalance(ctx, ) = %v, want %v", got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_ChildrenBlocksFromParentRoot(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
filter := filters.NewFilter().SetParentRoot(roots[0]).SetStartSlot(0)
|
||||
children, err := store.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(children, [][32]byte{bytesutil.ToBytes32(roots[1]), bytesutil.ToBytes32(roots[3])}) {
|
||||
t.Error("Did not receive correct children roots")
|
||||
}
|
||||
|
||||
filter = filters.NewFilter().SetParentRoot(roots[0]).SetStartSlot(2)
|
||||
children, err = store.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(children, [][32]byte{bytesutil.ToBytes32(roots[3])}) {
|
||||
t.Error("Did not receive correct children roots")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetHead(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
store := NewForkChoiceService(ctx, db)
|
||||
|
||||
roots, err := blockTree1(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
validators := make([]*ethpb.Validator, 100)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{ExitEpoch: 2, EffectiveBalance: 1e9}
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{Validators: validators}
|
||||
stateRoot, err := ssz.HashTreeRoot(s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := blocks.NewGenesisBlock(stateRoot[:])
|
||||
blkRoot, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
checkPoint := ðpb.Checkpoint{Root: blkRoot[:]}
|
||||
|
||||
if err := store.GenesisStore(ctx, checkPoint, checkPoint); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := store.db.SaveState(ctx, s, bytesutil.ToBytes32(roots[0])); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
store.justifiedCheckpt.Root = roots[0]
|
||||
if err := store.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: store.justifiedCheckpt,
|
||||
State: s,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// /- B1 (33 votes)
|
||||
// B0 /- B5 - B7 (33 votes)
|
||||
// \- B3 - B4 - B6 - B8 (34 votes)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
switch {
|
||||
case i < 33:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
case i > 66:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
default:
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, uint64(i), &pb.ValidatorLatestVote{Root: roots[8]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default head is B8
|
||||
head, err := store.Head(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(head, roots[8]) {
|
||||
t.Error("Incorrect head")
|
||||
}
|
||||
|
||||
// 1 validator switches vote to B7 to gain 34%, enough to switch head
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, 50, &pb.ValidatorLatestVote{Root: roots[7]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
head, err = store.Head(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(head, roots[7]) {
|
||||
t.Error("Incorrect head")
|
||||
}
|
||||
|
||||
// 18 validators switches vote to B1 to gain 51%, enough to switch head
|
||||
for i := 0; i < 18; i++ {
|
||||
idx := 50 + uint64(i)
|
||||
if err := store.db.SaveValidatorLatestVote(ctx, idx, &pb.ValidatorLatestVote{Root: roots[1]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
head, err = store.Head(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(head, roots[1]) {
|
||||
t.Log(head)
|
||||
t.Error("Incorrect head")
|
||||
}
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
)
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
// /- B1
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
// (B1, and B3 are all from the same slots)
|
||||
func blockTree1(db db.Database) ([][]byte, error) {
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
r0, _ := ssz.SigningRoot(b0)
|
||||
b1 := ðpb.BeaconBlock{Slot: 1, ParentRoot: r0[:]}
|
||||
r1, _ := ssz.SigningRoot(b1)
|
||||
b3 := ðpb.BeaconBlock{Slot: 3, ParentRoot: r0[:]}
|
||||
r3, _ := ssz.SigningRoot(b3)
|
||||
b4 := ðpb.BeaconBlock{Slot: 4, ParentRoot: r3[:]}
|
||||
r4, _ := ssz.SigningRoot(b4)
|
||||
b5 := ðpb.BeaconBlock{Slot: 5, ParentRoot: r4[:]}
|
||||
r5, _ := ssz.SigningRoot(b5)
|
||||
b6 := ðpb.BeaconBlock{Slot: 6, ParentRoot: r4[:]}
|
||||
r6, _ := ssz.SigningRoot(b6)
|
||||
b7 := ðpb.BeaconBlock{Slot: 7, ParentRoot: r5[:]}
|
||||
r7, _ := ssz.SigningRoot(b7)
|
||||
b8 := ðpb.BeaconBlock{Slot: 8, ParentRoot: r6[:]}
|
||||
r8, _ := ssz.SigningRoot(b8)
|
||||
for _, b := range []*ethpb.BeaconBlock{b0, b1, b3, b4, b5, b6, b7, b8} {
|
||||
if err := db.SaveBlock(context.Background(), b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return [][]byte{r0[:], r1[:], nil, r3[:], r4[:], r5[:], r6[:], r7[:], r8[:]}, nil
|
||||
}
|
||||
|
||||
// blockTree2 constructs the following tree:
|
||||
// Scenario graph: shorturl.at/loyP6
|
||||
//
|
||||
//digraph G {
|
||||
// rankdir=LR;
|
||||
// node [shape="none"];
|
||||
//
|
||||
// subgraph blocks {
|
||||
// rankdir=LR;
|
||||
// node [shape="box"];
|
||||
// a->b;
|
||||
// a->c;
|
||||
// b->d;
|
||||
// b->e;
|
||||
// c->f;
|
||||
// c->g;
|
||||
// d->h
|
||||
// d->i
|
||||
// d->j
|
||||
// d->k
|
||||
// h->l
|
||||
// h->m
|
||||
// g->n
|
||||
// g->o
|
||||
// e->p
|
||||
// }
|
||||
//}
|
||||
func blockTree2(db db.Database) ([][]byte, error) {
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
r0, _ := ssz.SigningRoot(b0)
|
||||
b1 := ðpb.BeaconBlock{Slot: 1, ParentRoot: r0[:]}
|
||||
r1, _ := ssz.SigningRoot(b1)
|
||||
b2 := ðpb.BeaconBlock{Slot: 2, ParentRoot: r0[:]}
|
||||
r2, _ := ssz.SigningRoot(b2)
|
||||
b3 := ðpb.BeaconBlock{Slot: 3, ParentRoot: r1[:]}
|
||||
r3, _ := ssz.SigningRoot(b3)
|
||||
b4 := ðpb.BeaconBlock{Slot: 4, ParentRoot: r1[:]}
|
||||
r4, _ := ssz.SigningRoot(b4)
|
||||
b5 := ðpb.BeaconBlock{Slot: 5, ParentRoot: r2[:]}
|
||||
r5, _ := ssz.SigningRoot(b5)
|
||||
b6 := ðpb.BeaconBlock{Slot: 6, ParentRoot: r2[:]}
|
||||
r6, _ := ssz.SigningRoot(b6)
|
||||
b7 := ðpb.BeaconBlock{Slot: 7, ParentRoot: r3[:]}
|
||||
r7, _ := ssz.SigningRoot(b7)
|
||||
b8 := ðpb.BeaconBlock{Slot: 8, ParentRoot: r3[:]}
|
||||
r8, _ := ssz.SigningRoot(b8)
|
||||
b9 := ðpb.BeaconBlock{Slot: 9, ParentRoot: r3[:]}
|
||||
r9, _ := ssz.SigningRoot(b9)
|
||||
b10 := ðpb.BeaconBlock{Slot: 10, ParentRoot: r3[:]}
|
||||
r10, _ := ssz.SigningRoot(b10)
|
||||
b11 := ðpb.BeaconBlock{Slot: 11, ParentRoot: r4[:]}
|
||||
r11, _ := ssz.SigningRoot(b11)
|
||||
b12 := ðpb.BeaconBlock{Slot: 12, ParentRoot: r6[:]}
|
||||
r12, _ := ssz.SigningRoot(b12)
|
||||
b13 := ðpb.BeaconBlock{Slot: 13, ParentRoot: r6[:]}
|
||||
r13, _ := ssz.SigningRoot(b13)
|
||||
b14 := ðpb.BeaconBlock{Slot: 14, ParentRoot: r7[:]}
|
||||
r14, _ := ssz.SigningRoot(b14)
|
||||
b15 := ðpb.BeaconBlock{Slot: 15, ParentRoot: r7[:]}
|
||||
r15, _ := ssz.SigningRoot(b15)
|
||||
for _, b := range []*ethpb.BeaconBlock{b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} {
|
||||
if err := db.SaveBlock(context.Background(), b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return [][]byte{r0[:], r1[:], r2[:], r3[:], r4[:], r5[:], r6[:], r7[:], r8[:], r9[:], r10[:], r11[:], r12[:], r13[:], r14[:], r15[:]}, nil
|
||||
}
|
||||
|
||||
// blockTree3 constructs a tree that is 512 blocks in a row.
|
||||
// B0 - B1 - B2 - B3 - .... - B512
|
||||
func blockTree3(db db.Database) ([][]byte, error) {
|
||||
blkCount := 512
|
||||
roots := make([][]byte, 0, blkCount)
|
||||
blks := make([]*ethpb.BeaconBlock, 0, blkCount)
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: []byte{'g'}}
|
||||
r0, _ := ssz.SigningRoot(b0)
|
||||
roots = append(roots, r0[:])
|
||||
blks = append(blks, b0)
|
||||
|
||||
for i := 1; i < blkCount; i++ {
|
||||
b := ðpb.BeaconBlock{Slot: uint64(i), ParentRoot: roots[len(roots)-1]}
|
||||
r, _ := ssz.SigningRoot(b)
|
||||
roots = append(roots, r[:])
|
||||
blks = append(blks, b)
|
||||
}
|
||||
|
||||
for _, b := range blks {
|
||||
if err := db.SaveBlock(context.Background(), b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), &pb.BeaconState{}, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
211
beacon-chain/blockchain/head.go
Normal file
211
beacon-chain/blockchain/head.go
Normal file
@@ -0,0 +1,211 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// This defines the current chain service's view of head.
|
||||
type head struct {
|
||||
slot uint64 // current head slot.
|
||||
root [32]byte // current head root.
|
||||
block *ethpb.SignedBeaconBlock // current head block.
|
||||
state *state.BeaconState // current head state.
|
||||
}
|
||||
|
||||
// This gets head from the fork choice service and saves head related items
|
||||
// (ie root, block, state) to the local service cache.
|
||||
func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockchain.updateHead")
|
||||
defer span.End()
|
||||
|
||||
// To get the proper head update, a node first checks its best justified
|
||||
// can become justified. This is designed to prevent bounce attack and
|
||||
// ensure head gets its best justified info.
|
||||
if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = s.bestJustifiedCheckpt
|
||||
}
|
||||
|
||||
// Get head from the fork choice service.
|
||||
f := s.finalizedCheckpt
|
||||
j := s.justifiedCheckpt
|
||||
headRoot, err := s.forkChoiceStore.Head(ctx, j.Epoch, bytesutil.ToBytes32(j.Root), balances, f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save head to the local service cache.
|
||||
return s.saveHead(ctx, headRoot)
|
||||
}
|
||||
|
||||
// This saves head info to the local service cache, it also saves the
|
||||
// new head root to the DB.
|
||||
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockchain.saveHead")
|
||||
defer span.End()
|
||||
|
||||
// Do nothing if head hasn't changed.
|
||||
if headRoot == s.headRoot() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the head state is not available, just return nil.
|
||||
// There's nothing to cache
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
if !s.stateGen.StateSummaryExists(ctx, headRoot) {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
_, cached := s.initSyncState[headRoot]
|
||||
if !cached && !s.beaconDB.HasState(ctx, headRoot) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get the new head block from DB.
|
||||
newHeadBlock, err := s.beaconDB.Block(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if newHeadBlock == nil || newHeadBlock.Block == nil {
|
||||
return errors.New("cannot save nil head block")
|
||||
}
|
||||
|
||||
// Get the new head state from cached state or DB.
|
||||
var newHeadState *state.BeaconState
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
newHeadState, err = s.stateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
} else {
|
||||
var exists bool
|
||||
newHeadState, exists = s.initSyncState[headRoot]
|
||||
if !exists {
|
||||
newHeadState, err = s.beaconDB.State(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
}
|
||||
}
|
||||
if newHeadState == nil {
|
||||
return errors.New("cannot save nil head state")
|
||||
}
|
||||
|
||||
// Cache the new head info.
|
||||
s.setHead(headRoot, newHeadBlock, newHeadState)
|
||||
|
||||
// Save the new head root to DB.
|
||||
if err := s.beaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head root in DB")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called to update canonical root mapping. It does not save head block
|
||||
// root in DB. With the inception of inital-sync-cache-state flag, it uses finalized
|
||||
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, r [32]byte) error {
|
||||
if b == nil || b.Block == nil {
|
||||
return errors.New("cannot save nil head block")
|
||||
}
|
||||
|
||||
var headState *state.BeaconState
|
||||
var err error
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
headState, err = s.stateGen.StateByRoot(ctx, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
} else {
|
||||
headState, err = s.beaconDB.State(ctx, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
if headState == nil {
|
||||
s.initSyncStateLock.RLock()
|
||||
cachedHeadState, ok := s.initSyncState[r]
|
||||
if ok {
|
||||
headState = cachedHeadState
|
||||
}
|
||||
s.initSyncStateLock.RUnlock()
|
||||
}
|
||||
}
|
||||
if headState == nil {
|
||||
return errors.New("nil head state")
|
||||
}
|
||||
|
||||
s.setHead(r, stateTrie.CopySignedBeaconBlock(b), headState)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This sets head view object which is used to track the head slot, root, block and state.
|
||||
func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *state.BeaconState) {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
// This does a full copy of the block and state.
|
||||
s.head = &head{
|
||||
slot: block.Block.Slot,
|
||||
root: root,
|
||||
block: stateTrie.CopySignedBeaconBlock(block),
|
||||
state: state.Copy(),
|
||||
}
|
||||
}
|
||||
|
||||
// This returns the head slot.
|
||||
func (s *Service) headSlot() uint64 {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.head.slot
|
||||
}
|
||||
|
||||
// This returns the head root.
|
||||
// It does a full copy on head root for immutability.
|
||||
func (s *Service) headRoot() [32]byte {
|
||||
if s.head == nil {
|
||||
return params.BeaconConfig().ZeroHash
|
||||
}
|
||||
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.head.root
|
||||
}
|
||||
|
||||
// This returns the head block.
|
||||
// It does a full copy on head block for immutability.
|
||||
func (s *Service) headBlock() *ethpb.SignedBeaconBlock {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return stateTrie.CopySignedBeaconBlock(s.head.block)
|
||||
}
|
||||
|
||||
// This returns the head state.
|
||||
// It does a full copy on head state for immutability.
|
||||
func (s *Service) headState() *state.BeaconState {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.head.state.Copy()
|
||||
}
|
||||
|
||||
// Returns true if head state exists.
|
||||
func (s *Service) hasHeadState() bool {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.head != nil && s.head.state != nil
|
||||
}
|
||||
72
beacon-chain/blockchain/head_test.go
Normal file
72
beacon-chain/blockchain/head_test.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
)
|
||||
|
||||
func TestSaveHead_Same(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
service := setupBeaconChain(t, db)
|
||||
|
||||
r := [32]byte{'A'}
|
||||
service.head = &head{slot: 0, root: r}
|
||||
|
||||
if err := service.saveHead(context.Background(), r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if service.headSlot() != 0 {
|
||||
t.Error("Head did not stay the same")
|
||||
}
|
||||
|
||||
if service.headRoot() != r {
|
||||
t.Error("Head did not stay the same")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveHead_Different(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
service := setupBeaconChain(t, db)
|
||||
|
||||
oldRoot := [32]byte{'A'}
|
||||
service.head = &head{slot: 0, root: oldRoot}
|
||||
|
||||
newHeadBlock := ðpb.BeaconBlock{Slot: 1}
|
||||
newHeadSignedBlock := ðpb.SignedBeaconBlock{Block: newHeadBlock}
|
||||
service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock)
|
||||
newRoot, _ := ssz.HashTreeRoot(newHeadBlock)
|
||||
headState, _ := state.InitializeFromProto(&pb.BeaconState{Slot: 1})
|
||||
service.beaconDB.SaveState(context.Background(), headState, newRoot)
|
||||
if err := service.saveHead(context.Background(), newRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if service.HeadSlot() != 1 {
|
||||
t.Error("Head did not change")
|
||||
}
|
||||
|
||||
cachedRoot, err := service.HeadRoot(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(cachedRoot, newRoot[:]) {
|
||||
t.Error("Head did not change")
|
||||
}
|
||||
if !reflect.DeepEqual(service.headBlock(), newHeadSignedBlock) {
|
||||
t.Error("Head did not change")
|
||||
}
|
||||
if !reflect.DeepEqual(service.headState().CloneInnerState(), headState.CloneInnerState()) {
|
||||
t.Error("Head did not change")
|
||||
}
|
||||
}
|
||||
@@ -1,57 +1,83 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/emicklei/dot"
|
||||
)
|
||||
|
||||
const latestSlotCount = 10
|
||||
const template = `<html>
|
||||
<head>
|
||||
<script src="//cdnjs.cloudflare.com/ajax/libs/viz.js/2.1.2/viz.js"></script>
|
||||
<script src="//cdnjs.cloudflare.com/ajax/libs/viz.js/2.1.2/full.render.js"></script>
|
||||
<body>
|
||||
<script type="application/javascript">
|
||||
var graph = ` + "`%s`;" + `
|
||||
var viz = new Viz();
|
||||
viz.renderSVGElement(graph) // reading the graph.
|
||||
.then(function(element) {
|
||||
document.body.appendChild(element); // appends to document.
|
||||
})
|
||||
.catch(error => {
|
||||
// Create a new Viz instance (@see Caveats page for more info)
|
||||
viz = new Viz();
|
||||
// Possibly display the error
|
||||
console.error(error);
|
||||
});
|
||||
</script>
|
||||
</head>
|
||||
</body>
|
||||
</html>`
|
||||
|
||||
// HeadsHandler is a handler to serve /heads page in metrics.
|
||||
func (s *Service) HeadsHandler(w http.ResponseWriter, _ *http.Request) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
if _, err := fmt.Fprintf(w, "\n %s\t%s\t", "Head slot", "Head root"); err != nil {
|
||||
logrus.WithError(err).Error("Failed to render chain heads page")
|
||||
return
|
||||
// TreeHandler is a handler to serve /tree page in metrics.
|
||||
func (s *Service) TreeHandler(w http.ResponseWriter, _ *http.Request) {
|
||||
if s.headState() == nil {
|
||||
if _, err := w.Write([]byte("Unavailable during initial syncing")); err != nil {
|
||||
log.WithError(err).Error("Failed to render p2p info page")
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(w, "\n %s\t%s\t", "---------", "---------"); err != nil {
|
||||
logrus.WithError(err).Error("Failed to render chain heads page")
|
||||
return
|
||||
nodes := s.forkChoiceStore.Nodes()
|
||||
|
||||
graph := dot.NewGraph(dot.Directed)
|
||||
graph.Attr("rankdir", "RL")
|
||||
graph.Attr("labeljust", "l")
|
||||
|
||||
dotNodes := make([]*dot.Node, len(nodes))
|
||||
avgBalance := uint64(averageBalance(s.headState().Balances()))
|
||||
|
||||
for i := len(nodes) - 1; i >= 0; i-- {
|
||||
// Construct label for each node.
|
||||
slot := strconv.Itoa(int(nodes[i].Slot))
|
||||
weight := strconv.Itoa(int(nodes[i].Weight / 1e9)) // Convert unit Gwei to unit ETH.
|
||||
votes := strconv.Itoa(int(nodes[i].Weight / 1e9 / avgBalance))
|
||||
bestDescendent := strconv.Itoa(int(nodes[i].BestDescendent))
|
||||
index := strconv.Itoa(int(i))
|
||||
label := "slot: " + slot + "\n index: " + index + "\n bestDescendent: " + bestDescendent + "\n votes: " + votes + "\n weight: " + weight
|
||||
var dotN dot.Node
|
||||
if nodes[i].Parent != ^uint64(0) {
|
||||
dotN = graph.Node(index).Box().Attr("label", label)
|
||||
}
|
||||
|
||||
if nodes[i].Slot == s.headSlot() &&
|
||||
nodes[i].BestDescendent == ^uint64(0) {
|
||||
dotN = dotN.Attr("color", "green")
|
||||
}
|
||||
|
||||
dotNodes[i] = &dotN
|
||||
}
|
||||
|
||||
slots := s.latestHeadSlots()
|
||||
for _, slot := range slots {
|
||||
r := hex.EncodeToString(bytesutil.Trunc(s.canonicalRoots[uint64(slot)]))
|
||||
if _, err := fmt.Fprintf(w, "\n %d\t\t%s\t", slot, r); err != nil {
|
||||
logrus.WithError(err).Error("Failed to render chain heads page")
|
||||
return
|
||||
for i := len(nodes) - 1; i >= 0; i-- {
|
||||
if nodes[i].Parent != ^uint64(0) && nodes[i].Parent < uint64(len(dotNodes)) {
|
||||
graph.Edge(*dotNodes[i], *dotNodes[nodes[i].Parent])
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if _, err := w.Write(buf.Bytes()); err != nil {
|
||||
log.WithError(err).Error("Failed to render chain heads page")
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
if _, err := fmt.Fprintf(w, template, graph.String()); err != nil {
|
||||
log.WithError(err).Error("Failed to render p2p info page")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// This returns the latest head slots in a slice and up to latestSlotCount
|
||||
func (s *Service) latestHeadSlots() []int {
|
||||
slots := make([]int, 0, len(s.canonicalRoots))
|
||||
for k := range s.canonicalRoots {
|
||||
slots = append(slots, int(k))
|
||||
}
|
||||
sort.Ints(slots)
|
||||
if (len(slots)) > latestSlotCount {
|
||||
return slots[len(slots)-latestSlotCount:]
|
||||
}
|
||||
return slots
|
||||
}
|
||||
|
||||
191
beacon-chain/blockchain/init_sync_process_block.go
Normal file
191
beacon-chain/blockchain/init_sync_process_block.go
Normal file
@@ -0,0 +1,191 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
const maxCacheSize = 70
|
||||
const initialSyncCacheSize = 45
|
||||
const minimumCacheSize = initialSyncCacheSize / 3
|
||||
|
||||
func (s *Service) persistCachedStates(ctx context.Context, numOfStates int) error {
|
||||
oldStates := make([]*stateTrie.BeaconState, 0, numOfStates)
|
||||
|
||||
// Add slots to the map and add epoch boundary states to the slice.
|
||||
for _, rt := range s.boundaryRoots[:numOfStates-minimumCacheSize] {
|
||||
oldStates = append(oldStates, s.initSyncState[rt])
|
||||
}
|
||||
|
||||
err := s.beaconDB.SaveStates(ctx, oldStates, s.boundaryRoots[:numOfStates-minimumCacheSize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, rt := range s.boundaryRoots[:numOfStates-minimumCacheSize] {
|
||||
delete(s.initSyncState, rt)
|
||||
}
|
||||
s.boundaryRoots = s.boundaryRoots[numOfStates-minimumCacheSize:]
|
||||
return nil
|
||||
}
|
||||
|
||||
// filter out boundary candidates from our currently processed batch of states.
|
||||
func (s *Service) filterBoundaryCandidates(ctx context.Context, root [32]byte, postState *stateTrie.BeaconState) {
|
||||
// Only trigger on epoch start.
|
||||
if !helpers.IsEpochStart(postState.Slot()) {
|
||||
return
|
||||
}
|
||||
|
||||
stateSlice := make([][32]byte, 0, len(s.initSyncState))
|
||||
// Add epoch boundary roots to slice.
|
||||
for rt := range s.initSyncState {
|
||||
stateSlice = append(stateSlice, rt)
|
||||
}
|
||||
|
||||
sort.Slice(stateSlice, func(i int, j int) bool {
|
||||
return s.initSyncState[stateSlice[i]].Slot() < s.initSyncState[stateSlice[j]].Slot()
|
||||
})
|
||||
epochLength := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
if len(s.boundaryRoots) > 0 {
|
||||
// Retrieve previous boundary root.
|
||||
previousBoundaryRoot := s.boundaryRoots[len(s.boundaryRoots)-1]
|
||||
previousState, ok := s.initSyncState[previousBoundaryRoot]
|
||||
if !ok {
|
||||
// Remove the non-existent root and exit filtering.
|
||||
s.boundaryRoots = s.boundaryRoots[:len(s.boundaryRoots)-1]
|
||||
return
|
||||
}
|
||||
previousSlot := previousState.Slot()
|
||||
|
||||
// Round up slot number to account for skipped slots.
|
||||
previousSlot = helpers.RoundUpToNearestEpoch(previousSlot)
|
||||
if postState.Slot()-previousSlot >= epochLength {
|
||||
targetSlot := postState.Slot()
|
||||
tempRoots := s.loopThroughCandidates(stateSlice, previousBoundaryRoot, previousSlot, targetSlot)
|
||||
s.boundaryRoots = append(s.boundaryRoots, tempRoots...)
|
||||
}
|
||||
}
|
||||
s.boundaryRoots = append(s.boundaryRoots, root)
|
||||
s.pruneOldStates()
|
||||
s.pruneNonBoundaryStates()
|
||||
}
|
||||
|
||||
// loop-through the provided candidate roots to filter out which would be appropriate boundary roots.
|
||||
func (s *Service) loopThroughCandidates(stateSlice [][32]byte, previousBoundaryRoot [32]byte,
|
||||
previousSlot uint64, targetSlot uint64) [][32]byte {
|
||||
tempRoots := [][32]byte{}
|
||||
epochLength := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Loop through current states to filter for valid boundary states.
|
||||
for i := len(stateSlice) - 1; stateSlice[i] != previousBoundaryRoot && i >= 0; i-- {
|
||||
currentSlot := s.initSyncState[stateSlice[i]].Slot()
|
||||
// Skip if the current slot is larger than the previous epoch
|
||||
// boundary.
|
||||
if currentSlot > targetSlot-epochLength {
|
||||
continue
|
||||
}
|
||||
tempRoots = append(tempRoots, stateSlice[i])
|
||||
|
||||
// Switch target slot if the current slot is greater than
|
||||
// 1 epoch boundary from the previously saved boundary slot.
|
||||
if currentSlot > previousSlot+epochLength {
|
||||
currentSlot = helpers.RoundUpToNearestEpoch(currentSlot)
|
||||
targetSlot = currentSlot
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
// Reverse to append the roots in ascending order corresponding
|
||||
// to the respective slots.
|
||||
tempRoots = bytesutil.ReverseBytes32Slice(tempRoots)
|
||||
return tempRoots
|
||||
}
|
||||
|
||||
// prune for states past the current finalized checkpoint.
|
||||
func (s *Service) pruneOldStates() {
|
||||
prunedBoundaryRoots := [][32]byte{}
|
||||
for _, rt := range s.boundaryRoots {
|
||||
st, ok := s.initSyncState[rt]
|
||||
// Skip non-existent roots.
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if st.Slot() < helpers.StartSlot(s.FinalizedCheckpt().Epoch) {
|
||||
delete(s.initSyncState, rt)
|
||||
continue
|
||||
}
|
||||
prunedBoundaryRoots = append(prunedBoundaryRoots, rt)
|
||||
}
|
||||
s.boundaryRoots = prunedBoundaryRoots
|
||||
}
|
||||
|
||||
// prune cache for non-boundary states.
|
||||
func (s *Service) pruneNonBoundaryStates() {
|
||||
boundaryMap := make(map[[32]byte]bool)
|
||||
for i := range s.boundaryRoots {
|
||||
boundaryMap[s.boundaryRoots[i]] = true
|
||||
}
|
||||
for rt := range s.initSyncState {
|
||||
if !boundaryMap[rt] {
|
||||
delete(s.initSyncState, rt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) pruneOldNonFinalizedStates() {
|
||||
stateSlice := make([][32]byte, 0, len(s.initSyncState))
|
||||
// Add epoch boundary roots to slice.
|
||||
for rt := range s.initSyncState {
|
||||
stateSlice = append(stateSlice, rt)
|
||||
}
|
||||
|
||||
// Sort by slots.
|
||||
sort.Slice(stateSlice, func(i int, j int) bool {
|
||||
return s.initSyncState[stateSlice[i]].Slot() < s.initSyncState[stateSlice[j]].Slot()
|
||||
})
|
||||
|
||||
boundaryMap := make(map[[32]byte]bool)
|
||||
for i := range s.boundaryRoots {
|
||||
boundaryMap[s.boundaryRoots[i]] = true
|
||||
}
|
||||
for _, rt := range stateSlice[:initialSyncCacheSize] {
|
||||
if boundaryMap[rt] {
|
||||
continue
|
||||
}
|
||||
delete(s.initSyncState, rt)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) generateState(ctx context.Context, startRoot [32]byte, endRoot [32]byte) (*stateTrie.BeaconState, error) {
|
||||
preState, err := s.beaconDB.State(ctx, startRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if preState == nil {
|
||||
return nil, errors.New("finalized state does not exist in db")
|
||||
}
|
||||
endBlock, err := s.beaconDB.Block(ctx, endRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if endBlock == nil {
|
||||
return nil, errors.New("provided block root does not have block saved in the db")
|
||||
}
|
||||
log.Warnf("Generating missing state of slot %d and root %#x", endBlock.Block.Slot, endRoot)
|
||||
|
||||
blocks, err := s.stateGen.LoadBlocks(ctx, preState.Slot()+1, endBlock.Block.Slot, endRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not load the required blocks")
|
||||
}
|
||||
postState, err := s.stateGen.ReplayBlocks(ctx, preState, blocks, endBlock.Block.Slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not replay the blocks to generate the resultant state")
|
||||
}
|
||||
return postState, nil
|
||||
}
|
||||
280
beacon-chain/blockchain/init_sync_process_block_test.go
Normal file
280
beacon-chain/blockchain/init_sync_process_block_test.go
Normal file
@@ -0,0 +1,280 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
messagediff "gopkg.in/d4l3k/messagediff.v1"
|
||||
)
|
||||
|
||||
func TestFilterBoundaryCandidates_FilterCorrect(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
|
||||
|
||||
for i := uint64(0); i < 500; i++ {
|
||||
st.SetSlot(i)
|
||||
root := [32]byte{}
|
||||
copy(root[:], bytesutil.Bytes32(i))
|
||||
service.initSyncState[root] = st.Copy()
|
||||
if helpers.IsEpochStart(i) {
|
||||
service.boundaryRoots = append(service.boundaryRoots, root)
|
||||
}
|
||||
}
|
||||
lastIndex := len(service.boundaryRoots) - 1
|
||||
for i := uint64(500); i < 2000; i++ {
|
||||
st.SetSlot(i)
|
||||
root := [32]byte{}
|
||||
copy(root[:], bytesutil.Bytes32(i))
|
||||
service.initSyncState[root] = st.Copy()
|
||||
}
|
||||
// Set current state.
|
||||
latestSlot := helpers.RoundUpToNearestEpoch(2000)
|
||||
st.SetSlot(latestSlot)
|
||||
lastRoot := [32]byte{}
|
||||
copy(lastRoot[:], bytesutil.Bytes32(latestSlot))
|
||||
|
||||
service.initSyncState[lastRoot] = st.Copy()
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: []byte{},
|
||||
}
|
||||
service.filterBoundaryCandidates(context.Background(), lastRoot, st)
|
||||
if len(service.boundaryRoots[lastIndex+1:]) == 0 {
|
||||
t.Fatal("Wanted non zero added boundary roots")
|
||||
}
|
||||
for _, rt := range service.boundaryRoots[lastIndex+1:] {
|
||||
st, ok := service.initSyncState[rt]
|
||||
if !ok {
|
||||
t.Error("Root doen't exist in cache map")
|
||||
continue
|
||||
}
|
||||
if !(helpers.IsEpochStart(st.Slot()) || helpers.IsEpochStart(st.Slot()-1) || helpers.IsEpochStart(st.Slot()+1)) {
|
||||
t.Errorf("boundary roots not validly stored. They have slot %d", st.Slot())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterBoundaryCandidates_HandleSkippedSlots(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
|
||||
|
||||
for i := uint64(0); i < 500; i++ {
|
||||
st.SetSlot(i)
|
||||
root := [32]byte{}
|
||||
copy(root[:], bytesutil.Bytes32(i))
|
||||
service.initSyncState[root] = st.Copy()
|
||||
if helpers.IsEpochStart(i) {
|
||||
service.boundaryRoots = append(service.boundaryRoots, root)
|
||||
}
|
||||
}
|
||||
lastIndex := len(service.boundaryRoots) - 1
|
||||
for i := uint64(500); i < 2000; i++ {
|
||||
st.SetSlot(i)
|
||||
root := [32]byte{}
|
||||
copy(root[:], bytesutil.Bytes32(i))
|
||||
// save only for offsetted slots
|
||||
if helpers.IsEpochStart(i + 10) {
|
||||
service.initSyncState[root] = st.Copy()
|
||||
}
|
||||
}
|
||||
// Set current state.
|
||||
latestSlot := helpers.RoundUpToNearestEpoch(2000)
|
||||
st.SetSlot(latestSlot)
|
||||
lastRoot := [32]byte{}
|
||||
copy(lastRoot[:], bytesutil.Bytes32(latestSlot))
|
||||
|
||||
service.initSyncState[lastRoot] = st.Copy()
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: []byte{},
|
||||
}
|
||||
service.filterBoundaryCandidates(context.Background(), lastRoot, st)
|
||||
if len(service.boundaryRoots[lastIndex+1:]) == 0 {
|
||||
t.Fatal("Wanted non zero added boundary roots")
|
||||
}
|
||||
for _, rt := range service.boundaryRoots[lastIndex+1:] {
|
||||
st, ok := service.initSyncState[rt]
|
||||
if !ok {
|
||||
t.Error("Root doen't exist in cache map")
|
||||
continue
|
||||
}
|
||||
if st.Slot() >= 500 {
|
||||
// Ignore head boundary root.
|
||||
if st.Slot() == 2016 {
|
||||
continue
|
||||
}
|
||||
if !helpers.IsEpochStart(st.Slot() + 10) {
|
||||
t.Errorf("boundary roots not validly stored. They have slot %d "+
|
||||
"instead of the offset from epoch start", st.Slot())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPruneOldStates_AlreadyFinalized(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
|
||||
|
||||
for i := uint64(100); i < 200; i++ {
|
||||
st.SetSlot(i)
|
||||
root := [32]byte{}
|
||||
copy(root[:], bytesutil.Bytes32(i))
|
||||
service.initSyncState[root] = st.Copy()
|
||||
service.boundaryRoots = append(service.boundaryRoots, root)
|
||||
}
|
||||
finalizedEpoch := uint64(5)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: finalizedEpoch}
|
||||
service.pruneOldStates()
|
||||
for _, rt := range service.boundaryRoots {
|
||||
st, ok := service.initSyncState[rt]
|
||||
if !ok {
|
||||
t.Error("Root doen't exist in cache map")
|
||||
continue
|
||||
}
|
||||
if st.Slot() < helpers.StartSlot(finalizedEpoch) {
|
||||
t.Errorf("State with slot %d still exists and not pruned", st.Slot())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPruneNonBoundary_CanPrune(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
|
||||
|
||||
for i := uint64(0); i < 2000; i++ {
|
||||
st.SetSlot(i)
|
||||
root := [32]byte{}
|
||||
copy(root[:], bytesutil.Bytes32(i))
|
||||
service.initSyncState[root] = st.Copy()
|
||||
if helpers.IsEpochStart(i) {
|
||||
service.boundaryRoots = append(service.boundaryRoots, root)
|
||||
}
|
||||
}
|
||||
service.pruneNonBoundaryStates()
|
||||
for _, rt := range service.boundaryRoots {
|
||||
st, ok := service.initSyncState[rt]
|
||||
if !ok {
|
||||
t.Error("Root doesn't exist in cache map")
|
||||
continue
|
||||
}
|
||||
if !helpers.IsEpochStart(st.Slot()) {
|
||||
t.Errorf("Non boundary state with slot %d still exists and not pruned", st.Slot())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateState_CorrectlyGenerated(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
cfg := &Config{BeaconDB: db, StateGen: stategen.New(db)}
|
||||
service, err := NewService(context.Background(), cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState, privs := testutil.DeterministicGenesisState(t, 32)
|
||||
genesisBlock := blocks.NewGenesisBlock([]byte{})
|
||||
bodyRoot, err := ssz.HashTreeRoot(genesisBlock.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.SetLatestBlockHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: genesisBlock.Block.Slot,
|
||||
ParentRoot: genesisBlock.Block.ParentRoot,
|
||||
StateRoot: params.BeaconConfig().ZeroHash[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
})
|
||||
beaconState.SetSlashings(make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector))
|
||||
cp := beaconState.CurrentJustifiedCheckpoint()
|
||||
mockRoot := [32]byte{}
|
||||
copy(mockRoot[:], "hello-world")
|
||||
cp.Root = mockRoot[:]
|
||||
beaconState.SetCurrentJustifiedCheckpoint(cp)
|
||||
beaconState.SetCurrentEpochAttestations([]*pb.PendingAttestation{})
|
||||
err = db.SaveBlock(context.Background(), genesisBlock)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genRoot, err := ssz.HashTreeRoot(genesisBlock)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = db.SaveState(context.Background(), beaconState, genRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
lastBlock := ðpb.SignedBeaconBlock{}
|
||||
for i := uint64(1); i < 10; i++ {
|
||||
block, err := testutil.GenerateFullBlock(beaconState, privs, testutil.DefaultBlockGenConfig(), i)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState, err = state.ExecuteStateTransition(context.Background(), beaconState, block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = db.SaveBlock(context.Background(), block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lastBlock = block
|
||||
}
|
||||
root, err := ssz.HashTreeRoot(lastBlock.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newState, err := service.generateState(context.Background(), genRoot, root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ssz.DeepEqual(newState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
|
||||
diff, _ := messagediff.PrettyDiff(newState.InnerStateUnsafe(), beaconState.InnerStateUnsafe())
|
||||
t.Errorf("Generated state is different from what is expected: %s", diff)
|
||||
}
|
||||
}
|
||||
@@ -1,17 +1,50 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"fmt"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "blockchain")
|
||||
|
||||
// logs state transition related data every slot.
|
||||
func logStateTransitionData(b *ethpb.BeaconBlock, r []byte) {
|
||||
func logStateTransitionData(b *ethpb.BeaconBlock) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Slot,
|
||||
"attestations": len(b.Body.Attestations),
|
||||
"deposits": len(b.Body.Deposits),
|
||||
"slot": b.Slot,
|
||||
"attestations": len(b.Body.Attestations),
|
||||
"deposits": len(b.Body.Deposits),
|
||||
"attesterSlashings": len(b.Body.AttesterSlashings),
|
||||
}).Info("Finished applying state transition")
|
||||
}
|
||||
|
||||
func logEpochData(beaconState *stateTrie.BeaconState) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"epoch": helpers.CurrentEpoch(beaconState),
|
||||
"finalizedEpoch": beaconState.FinalizedCheckpointEpoch(),
|
||||
"justifiedEpoch": beaconState.CurrentJustifiedCheckpoint().Epoch,
|
||||
"previousJustifiedEpoch": beaconState.PreviousJustifiedCheckpoint().Epoch,
|
||||
}).Info("Starting next epoch")
|
||||
activeVals, err := helpers.ActiveValidatorIndices(beaconState, helpers.CurrentEpoch(beaconState))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get active validator indices")
|
||||
return
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"totalValidators": len(beaconState.Validators()),
|
||||
"activeValidators": len(activeVals),
|
||||
"averageBalance": fmt.Sprintf("%.5f ETH", averageBalance(beaconState.Balances())),
|
||||
}).Info("Validator registry information")
|
||||
}
|
||||
|
||||
func averageBalance(balances []uint64) float64 {
|
||||
total := uint64(0)
|
||||
for i := 0; i < len(balances); i++ {
|
||||
total += balances[i]
|
||||
}
|
||||
return float64(total) / float64(len(balances)) / float64(params.BeaconConfig().GweiPerEth)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,11 @@ package blockchain
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -15,42 +19,162 @@ var (
|
||||
Name: "beacon_head_slot",
|
||||
Help: "Slot of the head block of the beacon chain",
|
||||
})
|
||||
beaconHeadRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_head_root",
|
||||
Help: "Root of the head block of the beacon chain, it returns the lowest 8 bytes interpreted as little endian",
|
||||
})
|
||||
competingAtts = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "competing_attestations",
|
||||
Help: "The # of attestations received and processed from a competing chain",
|
||||
})
|
||||
competingBlks = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "competing_blocks",
|
||||
Help: "The # of blocks received and processed from a competing chain",
|
||||
})
|
||||
processedBlkNoPubsub = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_no_pubsub_block_counter",
|
||||
Help: "The # of processed block without pubsub, this usually means the blocks from sync",
|
||||
headFinalizedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "head_finalized_epoch",
|
||||
Help: "Last finalized epoch of the head state",
|
||||
})
|
||||
processedBlkNoPubsubForkchoice = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_no_pubsub_forkchoice_block_counter",
|
||||
Help: "The # of processed block without pubsub and forkchoice, this means indicate blocks from initial sync",
|
||||
headFinalizedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "head_finalized_root",
|
||||
Help: "Last finalized root of the head state",
|
||||
})
|
||||
processedBlk = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_block_counter",
|
||||
Help: "The # of total processed in block chain service, with fork choice and pubsub",
|
||||
beaconFinalizedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_finalized_epoch",
|
||||
Help: "Last finalized epoch of the processed state",
|
||||
})
|
||||
processedAttNoPubsub = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_no_pubsub_attestation_counter",
|
||||
Help: "The # of processed attestation without pubsub, this usually means the attestations from sync",
|
||||
beaconFinalizedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_finalized_root",
|
||||
Help: "Last finalized root of the processed state",
|
||||
})
|
||||
processedAtt = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "processed_attestation_counter",
|
||||
Help: "The # of processed attestation with pubsub and fork choice, this ususally means attestations from rpc",
|
||||
beaconCurrentJustifiedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_current_justified_epoch",
|
||||
Help: "Current justified epoch of the processed state",
|
||||
})
|
||||
beaconCurrentJustifiedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_current_justified_root",
|
||||
Help: "Current justified root of the processed state",
|
||||
})
|
||||
beaconPrevJustifiedEpoch = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_previous_justified_epoch",
|
||||
Help: "Previous justified epoch of the processed state",
|
||||
})
|
||||
beaconPrevJustifiedRoot = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_previous_justified_root",
|
||||
Help: "Previous justified root of the processed state",
|
||||
})
|
||||
validatorsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "validator_count",
|
||||
Help: "The total number of validators",
|
||||
}, []string{"state"})
|
||||
validatorsBalance = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "validators_total_balance",
|
||||
Help: "The total balance of validators, in GWei",
|
||||
}, []string{"state"})
|
||||
validatorsEffectiveBalance = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "validators_total_effective_balance",
|
||||
Help: "The total effective balance of validators, in GWei",
|
||||
}, []string{"state"})
|
||||
currentEth1DataDepositCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "current_eth1_data_deposit_count",
|
||||
Help: "The current eth1 deposit count in the last processed state eth1data field.",
|
||||
})
|
||||
totalEligibleBalances = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_eligible_balances",
|
||||
Help: "The total amount of ether, in gwei, that has been used in voting attestation target of previous epoch",
|
||||
})
|
||||
totalVotedTargetBalances = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "total_voted_target_balances",
|
||||
Help: "The total amount of ether, in gwei, that is eligible for voting of previous epoch",
|
||||
})
|
||||
)
|
||||
|
||||
func (s *Service) reportSlotMetrics(currentSlot uint64) {
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
func reportSlotMetrics(currentSlot uint64, headSlot uint64, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
beaconSlot.Set(float64(currentSlot))
|
||||
beaconHeadSlot.Set(float64(s.HeadSlot()))
|
||||
beaconHeadRoot.Set(float64(bytesutil.ToLowInt64(s.HeadRoot())))
|
||||
beaconHeadSlot.Set(float64(headSlot))
|
||||
if finalizedCheckpoint != nil {
|
||||
headFinalizedEpoch.Set(float64(finalizedCheckpoint.Epoch))
|
||||
headFinalizedRoot.Set(float64(bytesutil.ToLowInt64(finalizedCheckpoint.Root)))
|
||||
}
|
||||
}
|
||||
|
||||
// reportEpochMetrics reports epoch related metrics.
|
||||
func reportEpochMetrics(state *stateTrie.BeaconState) {
|
||||
currentEpoch := state.Slot() / params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Validator instances
|
||||
pendingInstances := 0
|
||||
activeInstances := 0
|
||||
slashingInstances := 0
|
||||
slashedInstances := 0
|
||||
exitingInstances := 0
|
||||
exitedInstances := 0
|
||||
// Validator balances
|
||||
pendingBalance := uint64(0)
|
||||
activeBalance := uint64(0)
|
||||
activeEffectiveBalance := uint64(0)
|
||||
exitingBalance := uint64(0)
|
||||
exitingEffectiveBalance := uint64(0)
|
||||
slashingBalance := uint64(0)
|
||||
slashingEffectiveBalance := uint64(0)
|
||||
|
||||
for i, validator := range state.Validators() {
|
||||
bal, err := state.BalanceAtIndex(uint64(i))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if validator.Slashed {
|
||||
if currentEpoch < validator.ExitEpoch {
|
||||
slashingInstances++
|
||||
slashingBalance += bal
|
||||
slashingEffectiveBalance += validator.EffectiveBalance
|
||||
} else {
|
||||
slashedInstances++
|
||||
}
|
||||
continue
|
||||
}
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
if currentEpoch < validator.ExitEpoch {
|
||||
exitingInstances++
|
||||
exitingBalance += bal
|
||||
exitingEffectiveBalance += validator.EffectiveBalance
|
||||
} else {
|
||||
exitedInstances++
|
||||
}
|
||||
continue
|
||||
}
|
||||
if currentEpoch < validator.ActivationEpoch {
|
||||
pendingInstances++
|
||||
pendingBalance += bal
|
||||
continue
|
||||
}
|
||||
activeInstances++
|
||||
activeBalance += bal
|
||||
activeEffectiveBalance += validator.EffectiveBalance
|
||||
}
|
||||
validatorsCount.WithLabelValues("Pending").Set(float64(pendingInstances))
|
||||
validatorsCount.WithLabelValues("Active").Set(float64(activeInstances))
|
||||
validatorsCount.WithLabelValues("Exiting").Set(float64(exitingInstances))
|
||||
validatorsCount.WithLabelValues("Exited").Set(float64(exitedInstances))
|
||||
validatorsCount.WithLabelValues("Slashing").Set(float64(slashingInstances))
|
||||
validatorsCount.WithLabelValues("Slashed").Set(float64(slashedInstances))
|
||||
validatorsBalance.WithLabelValues("Pending").Set(float64(pendingBalance))
|
||||
validatorsBalance.WithLabelValues("Active").Set(float64(activeBalance))
|
||||
validatorsBalance.WithLabelValues("Exiting").Set(float64(exitingBalance))
|
||||
validatorsBalance.WithLabelValues("Slashing").Set(float64(slashingBalance))
|
||||
validatorsEffectiveBalance.WithLabelValues("Active").Set(float64(activeEffectiveBalance))
|
||||
validatorsEffectiveBalance.WithLabelValues("Exiting").Set(float64(exitingEffectiveBalance))
|
||||
validatorsEffectiveBalance.WithLabelValues("Slashing").Set(float64(slashingEffectiveBalance))
|
||||
|
||||
// Last justified slot
|
||||
beaconCurrentJustifiedEpoch.Set(float64(state.CurrentJustifiedCheckpoint().Epoch))
|
||||
beaconCurrentJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.CurrentJustifiedCheckpoint().Root)))
|
||||
|
||||
// Last previous justified slot
|
||||
beaconPrevJustifiedEpoch.Set(float64(state.PreviousJustifiedCheckpoint().Epoch))
|
||||
beaconPrevJustifiedRoot.Set(float64(bytesutil.ToLowInt64(state.PreviousJustifiedCheckpoint().Root)))
|
||||
|
||||
// Last finalized slot
|
||||
beaconFinalizedEpoch.Set(float64(state.FinalizedCheckpointEpoch()))
|
||||
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(state.FinalizedCheckpoint().Root)))
|
||||
|
||||
currentEth1DataDepositCount.Set(float64(state.Eth1Data().DepositCount))
|
||||
|
||||
if precompute.Balances != nil {
|
||||
totalEligibleBalances.Set(float64(precompute.Balances.PrevEpoch))
|
||||
totalVotedTargetBalances.Set(float64(precompute.Balances.PrevEpochTargetAttesters))
|
||||
}
|
||||
}
|
||||
|
||||
132
beacon-chain/blockchain/process_attestation.go
Normal file
132
beacon-chain/blockchain/process_attestation.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ErrTargetRootNotInDB returns when the target block root of an attestation cannot be found in the
|
||||
// beacon database.
|
||||
var ErrTargetRootNotInDB = errors.New("target root does not exist in db")
|
||||
|
||||
// onAttestation is called whenever an attestation is received, verifies the attestation is valid and saves
|
||||
/// it to the DB.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def on_attestation(store: Service, attestation: Attestation) -> None:
|
||||
// """
|
||||
// Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
|
||||
//
|
||||
// An ``attestation`` that is asserted as invalid may be valid at a later time,
|
||||
// consider scheduling it for later processing in such case.
|
||||
// """
|
||||
// target = attestation.data.target
|
||||
//
|
||||
// # Attestations must be from the current or previous epoch
|
||||
// current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
// # Use GENESIS_EPOCH for previous when genesis to avoid underflow
|
||||
// previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH
|
||||
// assert target.epoch in [current_epoch, previous_epoch]
|
||||
// assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
|
||||
//
|
||||
// # Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
|
||||
// assert target.root in store.blocks
|
||||
// # Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
|
||||
// base_state = store.block_states[target.root].copy()
|
||||
// assert store.time >= base_state.genesis_time + compute_start_slot_at_epoch(target.epoch) * SECONDS_PER_SLOT
|
||||
//
|
||||
// # Attestations must be for a known block. If block is unknown, delay consideration until the block is found
|
||||
// assert attestation.data.beacon_block_root in store.blocks
|
||||
// # Attestations must not be for blocks in the future. If not, the attestation should not be considered
|
||||
// assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
|
||||
//
|
||||
// # Service target checkpoint state if not yet seen
|
||||
// if target not in store.checkpoint_states:
|
||||
// process_slots(base_state, compute_start_slot_at_epoch(target.epoch))
|
||||
// store.checkpoint_states[target] = base_state
|
||||
// target_state = store.checkpoint_states[target]
|
||||
//
|
||||
// # Attestations can only affect the fork choice of subsequent slots.
|
||||
// # Delay consideration in the fork choice until their slot is in the past.
|
||||
// assert store.time >= (attestation.data.slot + 1) * SECONDS_PER_SLOT
|
||||
//
|
||||
// # Get state at the `target` to validate attestation and calculate the committees
|
||||
// indexed_attestation = get_indexed_attestation(target_state, attestation)
|
||||
// assert is_valid_indexed_attestation(target_state, indexed_attestation)
|
||||
//
|
||||
// # Update latest messages
|
||||
// for i in indexed_attestation.attesting_indices:
|
||||
// if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
|
||||
// store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=attestation.data.beacon_block_root)
|
||||
func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) ([]uint64, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockchain.onAttestation")
|
||||
defer span.End()
|
||||
|
||||
tgt := stateTrie.CopyCheckpoint(a.Data.Target)
|
||||
tgtSlot := helpers.StartSlot(tgt.Epoch)
|
||||
|
||||
if helpers.SlotToEpoch(a.Data.Slot) != a.Data.Target.Epoch {
|
||||
return nil, fmt.Errorf("data slot is not in the same epoch as target %d != %d", helpers.SlotToEpoch(a.Data.Slot), a.Data.Target.Epoch)
|
||||
}
|
||||
|
||||
// Verify beacon node has seen the target block before.
|
||||
if !s.hasBlock(ctx, bytesutil.ToBytes32(tgt.Root)) {
|
||||
return nil, ErrTargetRootNotInDB
|
||||
}
|
||||
|
||||
// Retrieve attestation's data beacon block pre state. Advance pre state to latest epoch if necessary and
|
||||
// save it to the cache.
|
||||
baseState, err := s.getAttPreState(ctx, tgt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
genesisTime := baseState.GenesisTime()
|
||||
|
||||
// Verify attestation target is from current epoch or previous epoch.
|
||||
if err := s.verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Unix()), tgt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify Attestations cannot be from future epochs.
|
||||
if err := helpers.VerifySlotTime(genesisTime, tgtSlot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attestation target slot")
|
||||
}
|
||||
|
||||
// Verify attestation beacon block is known and not from the future.
|
||||
if err := s.verifyBeaconBlock(ctx, a.Data); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := helpers.VerifySlotTime(genesisTime, a.Data.Slot+1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use the target state to to validate attestation and calculate the committees.
|
||||
indexedAtt, err := s.verifyAttestation(ctx, baseState, a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only save attestation in DB for archival node.
|
||||
if flags.Get().EnableArchive {
|
||||
if err := s.beaconDB.SaveAttestation(ctx, a); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Update forkchoice store with the new attestation for updating weight.
|
||||
s.forkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
|
||||
return indexedAtt.AttestingIndices, nil
|
||||
}
|
||||
164
beacon-chain/blockchain/process_attestation_helpers.go
Normal file
164
beacon-chain/blockchain/process_attestation_helpers.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// getAttPreState retrieves the att pre state by either from the cache or the DB.
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
|
||||
s.checkpointStateLock.Lock()
|
||||
defer s.checkpointStateLock.Unlock()
|
||||
cachedState, err := s.checkpointState.StateByCheckpoint(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get cached checkpoint state")
|
||||
}
|
||||
if cachedState != nil {
|
||||
return cachedState, nil
|
||||
}
|
||||
|
||||
var baseState *stateTrie.BeaconState
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
baseState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
} else {
|
||||
if featureconfig.Get().CheckHeadState {
|
||||
headRoot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get head root")
|
||||
}
|
||||
if bytes.Equal(headRoot, c.Root) {
|
||||
st, err := s.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get head state")
|
||||
}
|
||||
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: c,
|
||||
State: st.Copy(),
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
}
|
||||
|
||||
baseState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
}
|
||||
|
||||
if baseState == nil {
|
||||
return nil, fmt.Errorf("pre state of target block %d does not exist", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
|
||||
if helpers.StartSlot(c.Epoch) > baseState.Slot() {
|
||||
baseState, err = state.ProcessSlots(ctx, baseState, helpers.StartSlot(c.Epoch))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to %d", helpers.StartSlot(c.Epoch))
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.checkpointState.AddCheckpointState(&cache.CheckpointState{
|
||||
Checkpoint: c,
|
||||
State: baseState.Copy(),
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
|
||||
return baseState, nil
|
||||
}
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
func (s *Service) verifyAttTargetEpoch(ctx context.Context, genesisTime uint64, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := (nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
currentEpoch := helpers.SlotToEpoch(currentSlot)
|
||||
var prevEpoch uint64
|
||||
// Prevents previous epoch under flow
|
||||
if currentEpoch > 1 {
|
||||
prevEpoch = currentEpoch - 1
|
||||
}
|
||||
if c.Epoch != prevEpoch && c.Epoch != currentEpoch {
|
||||
return fmt.Errorf("target epoch %d does not match current epoch %d or prev epoch %d", c.Epoch, currentEpoch, prevEpoch)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyBeaconBlock verifies beacon head block is known and not from the future.
|
||||
func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.AttestationData) error {
|
||||
b, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(data.BeaconBlockRoot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b == nil || b.Block == nil {
|
||||
return fmt.Errorf("beacon block %#x does not exist", bytesutil.Trunc(data.BeaconBlockRoot))
|
||||
}
|
||||
if b.Block.Slot > data.Slot {
|
||||
return fmt.Errorf("could not process attestation for future block, %d > %d", b.Block.Slot, data.Slot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyAttestation validates input attestation is valid.
|
||||
func (s *Service) verifyAttestation(ctx context.Context, baseState *stateTrie.BeaconState, a *ethpb.Attestation) (*ethpb.IndexedAttestation, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(baseState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexedAtt, err := attestationutil.ConvertToIndexed(ctx, a, committee)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert attestation to indexed attestation")
|
||||
}
|
||||
|
||||
if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil {
|
||||
if err == blocks.ErrSigFailedToVerify {
|
||||
// When sig fails to verify, check if there's a differences in committees due to
|
||||
// different seeds.
|
||||
var aState *stateTrie.BeaconState
|
||||
var err error
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
aState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
aState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
epoch := helpers.SlotToEpoch(a.Data.Slot)
|
||||
origSeed, err := helpers.Seed(baseState, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get original seed")
|
||||
}
|
||||
|
||||
aSeed, err := helpers.Seed(aState, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attester's seed")
|
||||
}
|
||||
if origSeed != aSeed {
|
||||
return nil, fmt.Errorf("could not verify indexed attestation due to differences in seeds: %v != %v",
|
||||
hex.EncodeToString(bytesutil.Trunc(origSeed[:])), hex.EncodeToString(bytesutil.Trunc(aSeed[:])))
|
||||
}
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not verify indexed attestation")
|
||||
}
|
||||
|
||||
return indexedAtt, nil
|
||||
}
|
||||
397
beacon-chain/blockchain/process_attestation_test.go
Normal file
397
beacon-chain/blockchain/process_attestation_test.go
Normal file
@@ -0,0 +1,397 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
func TestStore_OnAttestation(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = blockTree1(db, []byte{'g'})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
BlkWithOutState := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 0}}
|
||||
if err := db.SaveBlock(ctx, BlkWithOutState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithOutStateRoot, _ := ssz.HashTreeRoot(BlkWithOutState.Block)
|
||||
|
||||
BlkWithStateBadAtt := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}}
|
||||
if err := db.SaveBlock(ctx, BlkWithStateBadAtt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithStateBadAttRoot, _ := ssz.HashTreeRoot(BlkWithStateBadAtt.Block)
|
||||
|
||||
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{})
|
||||
if err := s.SetSlot(100 * params.BeaconConfig().SlotsPerEpoch); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := service.beaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
BlkWithValidState := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2}}
|
||||
if err := db.SaveBlock(ctx, BlkWithValidState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
BlkWithValidStateRoot, _ := ssz.HashTreeRoot(BlkWithValidState.Block)
|
||||
s, _ = stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Fork: &pb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
if err := service.beaconDB.SaveState(ctx, s, BlkWithValidStateRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
a *ethpb.Attestation
|
||||
s *pb.BeaconState
|
||||
wantErr bool
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "attestation's data slot not aligned with target vote",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "data slot is not in the same epoch as target 1 != 0",
|
||||
},
|
||||
{
|
||||
name: "attestation's target root not in db",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: []byte{'A'}}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "target root does not exist in db",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}},
|
||||
s: &pb.BeaconState{},
|
||||
wantErr: true,
|
||||
wantErrString: "pre state of target block 0 does not exist",
|
||||
},
|
||||
{
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}},
|
||||
s: &pb.BeaconState{Slot: 100 * params.BeaconConfig().SlotsPerEpoch},
|
||||
wantErr: true,
|
||||
wantErrString: "does not match current epoch",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := service.onAttestation(ctx, tt.a)
|
||||
if tt.wantErr {
|
||||
if !strings.Contains(err.Error(), tt.wantErrString) {
|
||||
t.Errorf("Store.onAttestation() error = %v, wantErr = %v", err, tt.wantErrString)
|
||||
}
|
||||
} else {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseDemoBeaconConfig()
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Fork: &pb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
StateRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
|
||||
BlockRoots: make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot),
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{},
|
||||
JustificationBits: []byte{0},
|
||||
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
})
|
||||
r := [32]byte{'g'}
|
||||
if err := service.beaconDB.SaveState(ctx, s, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.prevFinalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
|
||||
service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'}))
|
||||
s1, err := service.getAttPreState(ctx, cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s1.Slot() != 1*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot())
|
||||
}
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: []byte{'B'}}
|
||||
service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'}))
|
||||
s2, err := service.getAttPreState(ctx, cp2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s2.Slot() != 2*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot())
|
||||
}
|
||||
|
||||
s1, err = service.getAttPreState(ctx, cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s1.Slot() != 1*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot())
|
||||
}
|
||||
|
||||
s1, err = service.checkpointState.StateByCheckpoint(cp1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s1.Slot() != 1*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot())
|
||||
}
|
||||
|
||||
s2, err = service.checkpointState.StateByCheckpoint(cp2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s2.Slot() != 2*params.BeaconConfig().SlotsPerEpoch {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot())
|
||||
}
|
||||
|
||||
s.SetSlot(params.BeaconConfig().SlotsPerEpoch + 1)
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.prevFinalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'C'}}
|
||||
service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'}))
|
||||
s3, err := service.getAttPreState(ctx, cp3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s3.Slot() != s.Slot() {
|
||||
t.Errorf("Wanted state slot: %d, got: %d", s.Slot(), s3.Slot())
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
epoch := uint64(1)
|
||||
baseState, _ := testutil.DeterministicGenesisState(t, 1)
|
||||
baseState.SetSlot(epoch * params.BeaconConfig().SlotsPerEpoch)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch}
|
||||
service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root))
|
||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if baseState.Slot() != returned.Slot() {
|
||||
t.Error("Incorrectly returned base state")
|
||||
}
|
||||
|
||||
cached, err := service.checkpointState.StateByCheckpoint(checkpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if cached == nil {
|
||||
t.Error("State should have been cached")
|
||||
}
|
||||
|
||||
epoch = uint64(2)
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch}
|
||||
service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root))
|
||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
baseState, err = state.ProcessSlots(ctx, baseState, helpers.StartSlot(newCheckpoint.Epoch))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if baseState.Slot() != returned.Slot() {
|
||||
t.Error("Incorrectly returned base state")
|
||||
}
|
||||
|
||||
cached, err = service.checkpointState.StateByCheckpoint(newCheckpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(returned, cached) {
|
||||
t.Error("Incorrectly cached base state")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := service.verifyAttTargetEpoch(
|
||||
ctx,
|
||||
0,
|
||||
params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
|
||||
ðpb.Checkpoint{}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := service.verifyAttTargetEpoch(
|
||||
ctx,
|
||||
0,
|
||||
params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
|
||||
ðpb.Checkpoint{Epoch: 1}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := service.verifyAttTargetEpoch(
|
||||
ctx,
|
||||
0,
|
||||
2*params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot,
|
||||
ðpb.Checkpoint{}); !strings.Contains(err.Error(),
|
||||
"target epoch 0 does not match current epoch 2 or prev epoch 1") {
|
||||
t.Error("Did not receive wanted error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
d := ðpb.AttestationData{}
|
||||
if err := service.verifyBeaconBlock(ctx, d); !strings.Contains(err.Error(), "beacon block does not exist") {
|
||||
t.Error("Did not receive the wanted error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2}}
|
||||
service.beaconDB.SaveBlock(ctx, b)
|
||||
r, _ := ssz.HashTreeRoot(b.Block)
|
||||
d := ðpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
|
||||
|
||||
if err := service.verifyBeaconBlock(ctx, d); !strings.Contains(err.Error(), "could not process attestation for future block") {
|
||||
t.Error("Did not receive the wanted error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 2}}
|
||||
service.beaconDB.SaveBlock(ctx, b)
|
||||
r, _ := ssz.HashTreeRoot(b.Block)
|
||||
d := ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
|
||||
|
||||
if err := service.verifyBeaconBlock(ctx, d); err != nil {
|
||||
t.Error("Did not receive the wanted error")
|
||||
}
|
||||
}
|
||||
360
beacon-chain/blockchain/process_block.go
Normal file
360
beacon-chain/blockchain/process_block.go
Normal file
@@ -0,0 +1,360 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// onBlock is called when a gossip block is received. It runs regular state transition on the block.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def on_block(store: Store, block: BeaconBlock) -> None:
|
||||
// # Make a copy of the state to avoid mutability issues
|
||||
// assert block.parent_root in store.block_states
|
||||
// pre_state = store.block_states[block.parent_root].copy()
|
||||
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
|
||||
// assert store.time >= pre_state.genesis_time + block.slot * SECONDS_PER_SLOT
|
||||
// # Add new block to the store
|
||||
// store.blocks[signing_root(block)] = block
|
||||
// # Check block is a descendant of the finalized block
|
||||
// assert (
|
||||
// get_ancestor(store, signing_root(block), store.blocks[store.finalized_checkpoint.root].slot) ==
|
||||
// store.finalized_checkpoint.root
|
||||
// )
|
||||
// # Check that block is later than the finalized epoch slot
|
||||
// assert block.slot > compute_start_slot_of_epoch(store.finalized_checkpoint.epoch)
|
||||
// # Check the block is valid and compute the post-state
|
||||
// state = state_transition(pre_state, block)
|
||||
// # Add new state for this block to the store
|
||||
// store.block_states[signing_root(block)] = state
|
||||
//
|
||||
// # Update justified checkpoint
|
||||
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
||||
// store.best_justified_checkpoint = state.current_justified_checkpoint
|
||||
//
|
||||
// # Update finalized checkpoint
|
||||
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
// store.finalized_checkpoint = state.finalized_checkpoint
|
||||
func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockchain.onBlock")
|
||||
defer span.End()
|
||||
|
||||
if signed == nil || signed.Block == nil {
|
||||
return nil, errors.New("nil block")
|
||||
}
|
||||
|
||||
b := signed.Block
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
preStateValidatorCount := preState.NumValidators()
|
||||
|
||||
root, err := ssz.HashTreeRoot(b)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Slot,
|
||||
"root": fmt.Sprintf("0x%s...", hex.EncodeToString(root[:])[:8]),
|
||||
}).Info("Executing state transition on block")
|
||||
|
||||
postState, err := state.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
|
||||
if err := s.beaconDB.SaveBlock(ctx, signed); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not save block from slot %d", b.Slot)
|
||||
}
|
||||
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b, root, postState); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot)
|
||||
}
|
||||
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
if err := s.stateGen.SaveState(ctx, root, postState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save state")
|
||||
}
|
||||
} else {
|
||||
if err := s.beaconDB.SaveState(ctx, postState, root); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save state")
|
||||
}
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
if err := s.updateJustified(ctx, postState); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
|
||||
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
|
||||
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
|
||||
if !featureconfig.Get().NewStateMgmt {
|
||||
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
|
||||
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if endSlot > startSlot {
|
||||
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
|
||||
startSlot, endSlot)
|
||||
}
|
||||
}
|
||||
}
|
||||
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
|
||||
|
||||
// Prune proto array fork choice nodes, all nodes before finalized check point will
|
||||
// be pruned.
|
||||
s.forkChoiceStore.Prune(ctx, fRoot)
|
||||
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint()
|
||||
|
||||
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save new justified")
|
||||
}
|
||||
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
finalizedState, err := s.stateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.stateGen.MigrateToCold(ctx, finalizedState, fRoot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update validator indices in database as needed.
|
||||
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save new validators")
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
if postState.Slot() >= s.nextEpochBoundarySlot {
|
||||
logEpochData(postState)
|
||||
reportEpochMetrics(postState)
|
||||
|
||||
// Update committees cache at epoch boundary slot.
|
||||
if err := helpers.UpdateCommitteeCache(postState, helpers.CurrentEpoch(postState)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(postState, helpers.CurrentEpoch(postState)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.nextEpochBoundarySlot = helpers.StartSlot(helpers.NextEpoch(postState))
|
||||
}
|
||||
|
||||
// Delete the processed block attestations from attestation pool.
|
||||
if err := s.deletePoolAtts(b.Body.Attestations); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Delete the processed block attester slashings from slashings pool.
|
||||
for i := 0; i < len(b.Body.AttesterSlashings); i++ {
|
||||
s.slashingPool.MarkIncludedAttesterSlashing(b.Body.AttesterSlashings[i])
|
||||
}
|
||||
|
||||
return postState, nil
|
||||
}
|
||||
|
||||
// onBlockInitialSyncStateTransition is called when an initial sync block is received.
|
||||
// It runs state transition on the block and without any BLS verification. The excluded BLS verification
|
||||
// includes attestation's aggregated signature. It also does not save attestations.
|
||||
func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockchain.onBlock")
|
||||
defer span.End()
|
||||
|
||||
if signed == nil || signed.Block == nil {
|
||||
return errors.New("nil block")
|
||||
}
|
||||
|
||||
b := signed.Block
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
preState, err := s.verifyBlkPreState(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Exit early if the pre state slot is higher than incoming block's slot.
|
||||
if preState.Slot() >= signed.Block.Slot {
|
||||
return nil
|
||||
}
|
||||
|
||||
preStateValidatorCount := preState.NumValidators()
|
||||
postState, err := state.ExecuteStateTransitionNoVerifyAttSigs(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
|
||||
if err := s.beaconDB.SaveBlock(ctx, signed); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Slot)
|
||||
}
|
||||
root, err := ssz.HashTreeRoot(b)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get signing root of block %d", b.Slot)
|
||||
}
|
||||
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b, root, postState); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Slot)
|
||||
}
|
||||
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
if err := s.stateGen.SaveState(ctx, root, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
} else {
|
||||
s.initSyncStateLock.Lock()
|
||||
defer s.initSyncStateLock.Unlock()
|
||||
s.initSyncState[root] = postState.Copy()
|
||||
s.filterBoundaryCandidates(ctx, root, postState)
|
||||
}
|
||||
|
||||
if flags.Get().EnableArchive {
|
||||
atts := signed.Block.Body.Attestations
|
||||
if err := s.beaconDB.SaveAttestations(ctx, atts); err != nil {
|
||||
return errors.Wrapf(err, "could not save block attestations from slot %d", b.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
if err := s.updateJustified(ctx, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
|
||||
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
|
||||
if !featureconfig.Get().NewStateMgmt {
|
||||
startSlot := helpers.StartSlot(s.prevFinalizedCheckpt.Epoch)
|
||||
endSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if endSlot > startSlot {
|
||||
if err := s.rmStatesOlderThanLastFinalized(ctx, startSlot, endSlot); err != nil {
|
||||
return errors.Wrapf(err, "could not delete states prior to finalized check point, range: %d, %d",
|
||||
startSlot, endSlot)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.saveInitState(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save init sync finalized state")
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, postState.FinalizedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not save finalized checkpoint")
|
||||
}
|
||||
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint()
|
||||
|
||||
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save new justified")
|
||||
}
|
||||
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
|
||||
finalizedState, err := s.stateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get state by root for migration")
|
||||
}
|
||||
if err := s.stateGen.MigrateToCold(ctx, finalizedState, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not migrate with new finalized root")
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update validator indices in database as needed.
|
||||
if err := s.saveNewValidators(ctx, preStateValidatorCount, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save new validators")
|
||||
}
|
||||
|
||||
if !featureconfig.Get().NewStateMgmt {
|
||||
numOfStates := len(s.boundaryRoots)
|
||||
if numOfStates > initialSyncCacheSize {
|
||||
if err = s.persistCachedStates(ctx, numOfStates); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(s.initSyncState) > maxCacheSize {
|
||||
s.pruneOldNonFinalizedStates()
|
||||
}
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
if postState.Slot() >= s.nextEpochBoundarySlot {
|
||||
reportEpochMetrics(postState)
|
||||
s.nextEpochBoundarySlot = helpers.StartSlot(helpers.NextEpoch(postState))
|
||||
|
||||
// Update committees cache at epoch boundary slot.
|
||||
if err := helpers.UpdateCommitteeCache(postState, helpers.CurrentEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(postState, helpers.CurrentEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !featureconfig.Get().NewStateMgmt && helpers.IsEpochStart(postState.Slot()) {
|
||||
if err := s.beaconDB.SaveState(ctx, postState, root); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
|
||||
// to gain information on the most current chain.
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock, root [32]byte, state *stateTrie.BeaconState) error {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, state); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Feed in block to fork choice store.
|
||||
if err := s.forkChoiceStore.ProcessBlock(ctx,
|
||||
blk.Slot, root, bytesutil.ToBytes32(blk.ParentRoot),
|
||||
state.CurrentJustifiedCheckpoint().Epoch,
|
||||
state.FinalizedCheckpointEpoch()); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
}
|
||||
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body.Attestations {
|
||||
committee, err := helpers.BeaconCommitteeFromState(state, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices, err := attestationutil.AttestingIndices(a.AggregationBits, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.forkChoiceStore.ProcessAttestation(ctx, indices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
483
beacon-chain/blockchain/process_block_helpers.go
Normal file
483
beacon-chain/blockchain/process_block_helpers.go
Normal file
@@ -0,0 +1,483 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() uint64 {
|
||||
return uint64(time.Now().Unix()-s.genesisTime.Unix()) / params.BeaconConfig().SecondsPerSlot
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.getBlockPreState")
|
||||
defer span.End()
|
||||
|
||||
// Verify incoming block has a valid pre state.
|
||||
preState, err := s.verifyBlkPreState(ctx, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify block slot time is not from the feature.
|
||||
if err := helpers.VerifySlotTime(preState.GenesisTime(), b.Slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify block is a descendent of a finalized block.
|
||||
if err := s.verifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot), b.Slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify block is later than the finalized epoch slot.
|
||||
if err := s.verifyBlkFinalizedSlot(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
// verifyBlkPreState validates input block has a valid pre-state.
|
||||
func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "chainService.verifyBlkPreState")
|
||||
defer span.End()
|
||||
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
preState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
|
||||
}
|
||||
if preState == nil {
|
||||
return nil, errors.Wrapf(err, "nil pre state for slot %d", b.Slot)
|
||||
}
|
||||
return preState, nil // No copy needed from newly hydrated state gen object.
|
||||
}
|
||||
|
||||
preState := s.initSyncState[bytesutil.ToBytes32(b.ParentRoot)]
|
||||
var err error
|
||||
if preState == nil {
|
||||
if featureconfig.Get().CheckHeadState {
|
||||
headRoot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get head root")
|
||||
}
|
||||
if bytes.Equal(headRoot, b.ParentRoot) {
|
||||
return s.HeadState(ctx)
|
||||
}
|
||||
}
|
||||
preState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
|
||||
}
|
||||
if preState == nil {
|
||||
if bytes.Equal(s.finalizedCheckpt.Root, b.ParentRoot) {
|
||||
return nil, fmt.Errorf("pre state of slot %d does not exist", b.Slot)
|
||||
}
|
||||
preState, err = s.generateState(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root), bytesutil.ToBytes32(b.ParentRoot))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return preState, nil // No copy needed from newly hydrated DB object.
|
||||
}
|
||||
return preState.Copy(), nil
|
||||
}
|
||||
|
||||
// verifyBlkDescendant validates input block root is a descendant of the
|
||||
// current finalized block root.
|
||||
func (s *Service) verifyBlkDescendant(ctx context.Context, root [32]byte, slot uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.verifyBlkDescendant")
|
||||
defer span.End()
|
||||
|
||||
finalizedBlkSigned, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
||||
if err != nil || finalizedBlkSigned == nil || finalizedBlkSigned.Block == nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
finalizedBlk := finalizedBlkSigned.Block
|
||||
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block root")
|
||||
}
|
||||
if bFinalizedRoot == nil {
|
||||
return fmt.Errorf("no finalized block known for block from slot %d", slot)
|
||||
}
|
||||
|
||||
if !bytes.Equal(bFinalizedRoot, s.finalizedCheckpt.Root) {
|
||||
err := fmt.Errorf("block from slot %d is not a descendent of the current finalized block slot %d, %#x != %#x",
|
||||
slot, finalizedBlk.Slot, bytesutil.Trunc(bFinalizedRoot), bytesutil.Trunc(s.finalizedCheckpt.Root))
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
||||
// to current finalized slot.
|
||||
func (s *Service) verifyBlkFinalizedSlot(b *ethpb.BeaconBlock) error {
|
||||
finalizedSlot := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if finalizedSlot >= b.Slot {
|
||||
return fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot, finalizedSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveNewValidators saves newly added validator indices from the state to db.
|
||||
// Does nothing if validator count has not changed.
|
||||
func (s *Service) saveNewValidators(ctx context.Context, preStateValidatorCount int, postState *stateTrie.BeaconState) error {
|
||||
postStateValidatorCount := postState.NumValidators()
|
||||
if preStateValidatorCount != postStateValidatorCount {
|
||||
indices := make([]uint64, 0)
|
||||
pubKeys := make([][48]byte, 0)
|
||||
for i := preStateValidatorCount; i < postStateValidatorCount; i++ {
|
||||
indices = append(indices, uint64(i))
|
||||
pubKeys = append(pubKeys, postState.PubkeyAtIndex(uint64(i)))
|
||||
}
|
||||
if err := s.beaconDB.SaveValidatorIndices(ctx, pubKeys, indices); err != nil {
|
||||
return errors.Wrapf(err, "could not save activated validators: %v", indices)
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"indices": indices,
|
||||
"totalValidatorCount": postStateValidatorCount - preStateValidatorCount,
|
||||
}).Trace("Validator indices saved in DB")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rmStatesOlderThanLastFinalized deletes the states in db since last finalized check point.
|
||||
func (s *Service) rmStatesOlderThanLastFinalized(ctx context.Context, startSlot uint64, endSlot uint64) error {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.rmStatesBySlots")
|
||||
defer span.End()
|
||||
|
||||
// Make sure start slot is not a skipped slot
|
||||
for i := startSlot; i > 0; i-- {
|
||||
filter := filters.NewFilter().SetStartSlot(i).SetEndSlot(i)
|
||||
b, err := s.beaconDB.Blocks(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
startSlot = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure finalized slot is not a skipped slot.
|
||||
for i := endSlot; i > 0; i-- {
|
||||
filter := filters.NewFilter().SetStartSlot(i).SetEndSlot(i)
|
||||
b, err := s.beaconDB.Blocks(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(b) > 0 {
|
||||
endSlot = i - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Do not remove genesis state
|
||||
if startSlot == 0 {
|
||||
startSlot++
|
||||
}
|
||||
// If end slot comes less than start slot
|
||||
if endSlot < startSlot {
|
||||
endSlot = startSlot
|
||||
}
|
||||
|
||||
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot)
|
||||
roots, err := s.beaconDB.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
roots, err = s.filterBlockRoots(ctx, roots)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.beaconDB.DeleteStates(ctx, roots); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldUpdateCurrentJustified prevents bouncing attack, by only update conflicting justified
|
||||
// checkpoints in the fork choice if in the early slots of the epoch.
|
||||
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
|
||||
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
|
||||
func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustifiedCheckpt *ethpb.Checkpoint) (bool, error) {
|
||||
if helpers.SlotsSinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
||||
return true, nil
|
||||
}
|
||||
newJustifiedBlockSigned, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(newJustifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newJustifiedBlockSigned == nil || newJustifiedBlockSigned.Block == nil {
|
||||
return false, errors.New("nil new justified block")
|
||||
}
|
||||
newJustifiedBlock := newJustifiedBlockSigned.Block
|
||||
if newJustifiedBlock.Slot <= helpers.StartSlot(s.justifiedCheckpt.Epoch) {
|
||||
return false, nil
|
||||
}
|
||||
justifiedBlockSigned, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if justifiedBlockSigned == nil || justifiedBlockSigned.Block == nil {
|
||||
return false, errors.New("nil justified block")
|
||||
}
|
||||
justifiedBlock := justifiedBlockSigned.Block
|
||||
b, err := s.ancestor(ctx, newJustifiedCheckpt.Root, justifiedBlock.Slot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !bytes.Equal(b, s.justifiedCheckpt.Root) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *Service) updateJustified(ctx context.Context, state *stateTrie.BeaconState) error {
|
||||
cpt := state.CurrentJustifiedCheckpoint()
|
||||
if cpt.Epoch > s.bestJustifiedCheckpt.Epoch {
|
||||
s.bestJustifiedCheckpt = cpt
|
||||
}
|
||||
canUpdate, err := s.shouldUpdateCurrentJustified(ctx, cpt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if canUpdate {
|
||||
s.prevJustifiedCheckpt = s.justifiedCheckpt
|
||||
s.justifiedCheckpt = cpt
|
||||
}
|
||||
|
||||
if !featureconfig.Get().NewStateMgmt {
|
||||
justifiedRoot := bytesutil.ToBytes32(cpt.Root)
|
||||
|
||||
justifiedState := s.initSyncState[justifiedRoot]
|
||||
// If justified state is nil, resume back to normal syncing process and save
|
||||
// justified check point.
|
||||
if justifiedState == nil {
|
||||
if s.beaconDB.HasState(ctx, justifiedRoot) {
|
||||
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
|
||||
}
|
||||
justifiedState, err = s.generateState(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root), justifiedRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
|
||||
}
|
||||
}
|
||||
if err := s.beaconDB.SaveState(ctx, justifiedState, justifiedRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save justified state")
|
||||
}
|
||||
}
|
||||
|
||||
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
|
||||
}
|
||||
|
||||
// This saves every finalized state in DB during initial sync, needed as part of optimization to
|
||||
// use cache state during initial sync in case of restart.
|
||||
func (s *Service) saveInitState(ctx context.Context, state *stateTrie.BeaconState) error {
|
||||
cpt := state.FinalizedCheckpoint()
|
||||
finalizedRoot := bytesutil.ToBytes32(cpt.Root)
|
||||
fs := s.initSyncState[finalizedRoot]
|
||||
if fs == nil {
|
||||
var err error
|
||||
fs, err = s.beaconDB.State(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fs == nil {
|
||||
fs, err = s.generateState(ctx, bytesutil.ToBytes32(s.prevFinalizedCheckpt.Root), finalizedRoot)
|
||||
if err != nil {
|
||||
// This might happen if the client was in sync and is now re-syncing for whatever reason.
|
||||
log.Warn("Initial sync cache did not have finalized state root cached")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.beaconDB.SaveState(ctx, fs, finalizedRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This filters block roots that are not known as head root and finalized root in DB.
|
||||
// It serves as the last line of defence before we prune states.
|
||||
func (s *Service) filterBlockRoots(ctx context.Context, roots [][32]byte) ([][32]byte, error) {
|
||||
f, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fRoot := f.Root
|
||||
h, err := s.beaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hRoot, err := ssz.HashTreeRoot(h.Block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filtered := make([][32]byte, 0, len(roots))
|
||||
for _, root := range roots {
|
||||
if bytes.Equal(root[:], fRoot[:]) || bytes.Equal(root[:], hRoot[:]) {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, root)
|
||||
}
|
||||
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// ancestor returns the block root of an ancestry block from the input block root.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
|
||||
// block = store.blocks[root]
|
||||
// if block.slot > slot:
|
||||
// return get_ancestor(store, block.parent_root, slot)
|
||||
// elif block.slot == slot:
|
||||
// return root
|
||||
// else:
|
||||
// return Bytes32() # root is older than queried slot: no results.
|
||||
func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "forkchoice.ancestor")
|
||||
defer span.End()
|
||||
|
||||
// Stop recursive ancestry lookup if context is cancelled.
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
signed, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get ancestor block")
|
||||
}
|
||||
if signed == nil || signed.Block == nil {
|
||||
return nil, errors.New("nil block")
|
||||
}
|
||||
b := signed.Block
|
||||
|
||||
// If we dont have the ancestor in the DB, simply return nil so rest of fork choice
|
||||
// operation can proceed. This is not an error condition.
|
||||
if b == nil || b.Slot < slot {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if b.Slot == slot {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
return s.ancestor(ctx, b.ParentRoot, slot)
|
||||
}
|
||||
|
||||
// This updates justified check point in store, if the new justified is later than stored justified or
|
||||
// the store's justified is not in chain with finalized check point.
|
||||
//
|
||||
// Spec definition:
|
||||
// if (
|
||||
// state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch
|
||||
// or get_ancestor(store, store.justified_checkpoint.root, finalized_slot) != store.finalized_checkpoint.root
|
||||
// ):
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *stateTrie.BeaconState) error {
|
||||
finalizedBlkSigned, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
||||
if err != nil || finalizedBlkSigned == nil || finalizedBlkSigned.Block == nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
finalizedBlk := finalizedBlkSigned.Block
|
||||
|
||||
anc, err := s.ancestor(ctx, s.justifiedCheckpt.Root, finalizedBlk.Slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Either the new justified is later than stored justified or not in chain with finalized check pint.
|
||||
if cpt := state.CurrentJustifiedCheckpoint(); cpt != nil && cpt.Epoch > s.justifiedCheckpt.Epoch || !bytes.Equal(anc, s.finalizedCheckpt.Root) {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This retrieves missing blocks from DB (ie. the blocks that couldn't received over sync) and inserts them to fork choice store.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.BeaconBlock, state *stateTrie.BeaconState) error {
|
||||
pendingNodes := make([]*ethpb.BeaconBlock, 0)
|
||||
|
||||
parentRoot := bytesutil.ToBytes32(blk.ParentRoot)
|
||||
slot := blk.Slot
|
||||
// Fork choice only matters from last finalized slot.
|
||||
higherThanFinalized := slot > helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
for !s.forkChoiceStore.HasNode(parentRoot) && s.beaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
|
||||
b, err := s.beaconDB.Block(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pendingNodes = append(pendingNodes, b.Block)
|
||||
parentRoot = bytesutil.ToBytes32(b.Block.ParentRoot)
|
||||
slot = b.Block.Slot
|
||||
higherThanFinalized = slot > helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
}
|
||||
|
||||
// Insert parent nodes to fork choice store in reverse order.
|
||||
// Lower slots should be at the end of the list.
|
||||
for i := len(pendingNodes) - 1; i >= 0; i-- {
|
||||
b := pendingNodes[i]
|
||||
r, err := ssz.HashTreeRoot(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.forkChoiceStore.ProcessBlock(ctx,
|
||||
b.Slot, r, bytesutil.ToBytes32(b.ParentRoot),
|
||||
state.CurrentJustifiedCheckpoint().Epoch,
|
||||
state.FinalizedCheckpointEpoch()); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// The deletes input attestations from the attestation pool, so proposers don't include them in a block for the future.
|
||||
func (s *Service) deletePoolAtts(atts []*ethpb.Attestation) error {
|
||||
for _, att := range atts {
|
||||
if helpers.IsAggregated(att) {
|
||||
if err := s.attPool.DeleteAggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.attPool.DeleteUnaggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
701
beacon-chain/blockchain/process_block_test.go
Normal file
701
beacon-chain/blockchain/process_block_test.go
Normal file
@@ -0,0 +1,701 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
if err := db.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
validGenesisRoot, err := ssz.HashTreeRoot(genesis.Block)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
roots, err := blockTree1(db, validGenesisRoot[:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
random := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: validGenesisRoot[:]}}
|
||||
if err := db.SaveBlock(ctx, random); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
randomParentRoot, err := ssz.HashTreeRoot(random.Block)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := service.beaconDB.SaveState(ctx, st.Copy(), randomParentRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
randomParentRoot2 := roots[1]
|
||||
if err := service.beaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk *ethpb.BeaconBlock
|
||||
s *stateTrie.BeaconState
|
||||
time uint64
|
||||
wantErrString string
|
||||
}{
|
||||
{
|
||||
name: "parent block root does not have a state",
|
||||
blk: ðpb.BeaconBlock{},
|
||||
s: st.Copy(),
|
||||
wantErrString: "provided block root does not have block saved in the db",
|
||||
},
|
||||
{
|
||||
name: "block is from the feature",
|
||||
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot[:], Slot: params.BeaconConfig().FarFutureEpoch},
|
||||
s: st.Copy(),
|
||||
wantErrString: "could not process slot from the future",
|
||||
},
|
||||
{
|
||||
name: "could not get finalized block",
|
||||
blk: ðpb.BeaconBlock{ParentRoot: randomParentRoot[:]},
|
||||
s: st.Copy(),
|
||||
wantErrString: "block from slot 0 is not a descendent of the current finalized block",
|
||||
},
|
||||
{
|
||||
name: "same slot as finalized block",
|
||||
blk: ðpb.BeaconBlock{Slot: 0, ParentRoot: randomParentRoot2},
|
||||
s: st.Copy(),
|
||||
wantErrString: "block is equal or earlier than finalized block, slot 0 < slot 0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: validGenesisRoot[:]}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: validGenesisRoot[:]}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: validGenesisRoot[:]}
|
||||
service.prevFinalizedCheckpt = ðpb.Checkpoint{Root: validGenesisRoot[:]}
|
||||
service.finalizedCheckpt.Root = roots[0]
|
||||
|
||||
_, err := service.onBlock(ctx, ðpb.SignedBeaconBlock{Block: tt.blk})
|
||||
if !strings.Contains(err.Error(), tt.wantErrString) {
|
||||
t.Errorf("Store.OnBlock() error = %v, wantErr = %v", err, tt.wantErrString)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SaveNewValidators(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
preCount := 2 // validators 0 and validators 1
|
||||
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{Validators: []*ethpb.Validator{
|
||||
{PublicKey: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
|
||||
{PublicKey: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}},
|
||||
{PublicKey: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2}},
|
||||
{PublicKey: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3}},
|
||||
}})
|
||||
if err := service.saveNewValidators(ctx, preCount, s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !db.HasValidatorIndex(ctx, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2}) {
|
||||
t.Error("Wanted validator saved in db")
|
||||
}
|
||||
if !db.HasValidatorIndex(ctx, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3}) {
|
||||
t.Error("Wanted validator saved in db")
|
||||
}
|
||||
if db.HasValidatorIndex(ctx, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}) {
|
||||
t.Error("validator not suppose to be saved in db")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveStateSinceLastFinalized(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Save 100 blocks in DB, each has a state.
|
||||
numBlocks := 100
|
||||
totalBlocks := make([]*ethpb.SignedBeaconBlock, numBlocks)
|
||||
blockRoots := make([][32]byte, 0)
|
||||
for i := 0; i < len(totalBlocks); i++ {
|
||||
totalBlocks[i] = ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: uint64(i),
|
||||
},
|
||||
}
|
||||
r, err := ssz.HashTreeRoot(totalBlocks[i].Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: uint64(i)})
|
||||
if err := service.beaconDB.SaveState(ctx, s, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := service.beaconDB.SaveBlock(ctx, totalBlocks[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
blockRoots = append(blockRoots, r)
|
||||
if err := service.beaconDB.SaveHeadBlockRoot(ctx, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// New finalized epoch: 1
|
||||
finalizedEpoch := uint64(1)
|
||||
finalizedSlot := finalizedEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
endSlot := helpers.StartSlot(finalizedEpoch+1) - 1 // Inclusive
|
||||
if err := service.rmStatesOlderThanLastFinalized(ctx, 0, endSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, r := range blockRoots {
|
||||
s, err := service.beaconDB.State(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Also verifies genesis state didnt get deleted
|
||||
if s != nil && s.Slot() != finalizedSlot && s.Slot() != 0 && s.Slot() < endSlot {
|
||||
t.Errorf("State with slot %d should not be in DB", s.Slot())
|
||||
}
|
||||
}
|
||||
|
||||
// New finalized epoch: 5
|
||||
newFinalizedEpoch := uint64(5)
|
||||
newFinalizedSlot := newFinalizedEpoch * params.BeaconConfig().SlotsPerEpoch
|
||||
endSlot = helpers.StartSlot(newFinalizedEpoch+1) - 1 // Inclusive
|
||||
if err := service.rmStatesOlderThanLastFinalized(ctx, helpers.StartSlot(finalizedEpoch+1)-1, endSlot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, r := range blockRoots {
|
||||
s, err := service.beaconDB.State(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Also verifies genesis state didnt get deleted
|
||||
if s != nil && s.Slot() != newFinalizedSlot && s.Slot() != finalizedSlot && s.Slot() != 0 && s.Slot() < endSlot {
|
||||
t.Errorf("State with slot %d should not be in DB", s.Slot())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service.genesisTime = time.Now()
|
||||
|
||||
update, err := service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !update {
|
||||
t.Error("Should be able to update justified, received false")
|
||||
}
|
||||
|
||||
lastJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{ParentRoot: []byte{'G'}}}
|
||||
lastJustifiedRoot, _ := ssz.HashTreeRoot(lastJustifiedBlk.Block)
|
||||
newJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1, ParentRoot: lastJustifiedRoot[:]}}
|
||||
newJustifiedRoot, _ := ssz.HashTreeRoot(newJustifiedBlk.Block)
|
||||
if err := service.beaconDB.SaveBlock(ctx, newJustifiedBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := service.beaconDB.SaveBlock(ctx, lastJustifiedBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
||||
update, err = service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !update {
|
||||
t.Error("Should be able to update justified, received false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
params.UseMinimalConfig()
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lastJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{ParentRoot: []byte{'G'}}}
|
||||
lastJustifiedRoot, _ := ssz.HashTreeRoot(lastJustifiedBlk.Block)
|
||||
newJustifiedBlk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{ParentRoot: lastJustifiedRoot[:]}}
|
||||
newJustifiedRoot, _ := ssz.HashTreeRoot(newJustifiedBlk.Block)
|
||||
if err := service.beaconDB.SaveBlock(ctx, newJustifiedBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := service.beaconDB.SaveBlock(ctx, lastJustifiedBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
||||
|
||||
update, err := service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if update {
|
||||
t.Error("Should not be able to update justified, received true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromCache(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 1})
|
||||
r := [32]byte{'A'}
|
||||
b := ðpb.BeaconBlock{Slot: 1, ParentRoot: r[:]}
|
||||
service.initSyncState[r] = s
|
||||
|
||||
received, err := service.verifyBlkPreState(ctx, b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(s.InnerStateUnsafe(), received.InnerStateUnsafe()) {
|
||||
t.Error("cached state not the same")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r := [32]byte{'A'}
|
||||
b := ðpb.BeaconBlock{Slot: 1, ParentRoot: r[:]}
|
||||
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
_, err = service.verifyBlkPreState(ctx, b)
|
||||
wanted := "pre state of slot 1 does not exist"
|
||||
if err.Error() != wanted {
|
||||
t.Error("Did not get wanted error")
|
||||
}
|
||||
|
||||
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 1})
|
||||
service.beaconDB.SaveState(ctx, s, r)
|
||||
|
||||
received, err := service.verifyBlkPreState(ctx, b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(s, received) {
|
||||
t.Error("cached state not the same")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveInitState_CanSaveDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := uint64(0); i < 64; i++ {
|
||||
b := ðpb.BeaconBlock{Slot: i}
|
||||
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: i})
|
||||
r, _ := ssz.HashTreeRoot(b)
|
||||
service.initSyncState[r] = s
|
||||
}
|
||||
|
||||
// Set finalized root as slot 32
|
||||
finalizedRoot, _ := ssz.HashTreeRoot(ðpb.BeaconBlock{Slot: 32})
|
||||
|
||||
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{
|
||||
Epoch: 1, Root: finalizedRoot[:]}})
|
||||
if err := service.saveInitState(ctx, s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Verify finalized state is saved in DB
|
||||
finalizedState, err := service.beaconDB.State(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if finalizedState == nil {
|
||||
t.Error("finalized state can't be nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
signedBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := db.SaveBlock(ctx, signedBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, err := ssz.HashTreeRoot(signedBlock.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service.initSyncState[r] = st.Copy()
|
||||
if err := db.SaveState(ctx, st.Copy(), r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Could update
|
||||
s, _ := stateTrie.InitializeFromProto(&pb.BeaconState{CurrentJustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: r[:]}})
|
||||
if err := service.updateJustified(context.Background(), s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if service.bestJustifiedCheckpt.Epoch != s.CurrentJustifiedCheckpoint().Epoch {
|
||||
t.Error("Incorrect justified epoch in service")
|
||||
}
|
||||
|
||||
// Could not update
|
||||
service.bestJustifiedCheckpt.Epoch = 2
|
||||
if err := service.updateJustified(context.Background(), s); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if service.bestJustifiedCheckpt.Epoch != 2 {
|
||||
t.Error("Incorrect justified epoch in service")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterBlockRoots_CanFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fBlock := ðpb.BeaconBlock{}
|
||||
fRoot, _ := ssz.HashTreeRoot(fBlock)
|
||||
hBlock := ðpb.BeaconBlock{Slot: 1}
|
||||
headRoot, _ := ssz.HashTreeRoot(hBlock)
|
||||
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
|
||||
if err := service.beaconDB.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: fBlock}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := service.beaconDB.SaveState(ctx, st.Copy(), fRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := service.beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: fRoot[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := service.beaconDB.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: hBlock}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := service.beaconDB.SaveState(ctx, st.Copy(), headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := service.beaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
roots := [][32]byte{{'C'}, {'D'}, headRoot, {'E'}, fRoot, {'F'}}
|
||||
wanted := [][32]byte{{'C'}, {'D'}, {'E'}, {'F'}}
|
||||
|
||||
received, err := service.filterBlockRoots(ctx, roots)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(wanted, received) {
|
||||
t.Error("Did not filter correctly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistCache_CanSave(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
|
||||
|
||||
for i := uint64(0); i < initialSyncCacheSize; i++ {
|
||||
st.SetSlot(i)
|
||||
root := [32]byte{}
|
||||
copy(root[:], bytesutil.Bytes32(i))
|
||||
service.initSyncState[root] = st.Copy()
|
||||
service.boundaryRoots = append(service.boundaryRoots, root)
|
||||
}
|
||||
|
||||
if err = service.persistCachedStates(ctx, initialSyncCacheSize); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := uint64(0); i < initialSyncCacheSize-minimumCacheSize; i++ {
|
||||
root := [32]byte{}
|
||||
copy(root[:], bytesutil.Bytes32(i))
|
||||
state, err := db.State(context.Background(), root)
|
||||
if err != nil {
|
||||
t.Errorf("State with root of %#x , could not be retrieved: %v", root, err)
|
||||
}
|
||||
if state == nil {
|
||||
t.Errorf("State with root of %#x , does not exist", root)
|
||||
}
|
||||
if state.Slot() != i {
|
||||
t.Errorf("Incorrect slot retrieved. Wanted %d but got %d", i, state.Slot())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
if err := db.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
validGenesisRoot, err := ssz.HashTreeRoot(genesis.Block)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
|
||||
if err := service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
roots, err := blockTree1(db, validGenesisRoot[:])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
block := ðpb.BeaconBlock{Slot: 9, ParentRoot: roots[8]}
|
||||
if err := service.fillInForkChoiceMissingBlocks(context.Background(), block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
if len(service.forkChoiceStore.Nodes()) != 5 {
|
||||
t.Error("Miss match nodes")
|
||||
}
|
||||
|
||||
if !service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])) {
|
||||
t.Error("Didn't save node")
|
||||
}
|
||||
if !service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])) {
|
||||
t.Error("Didn't save node")
|
||||
}
|
||||
if !service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])) {
|
||||
t.Error("Didn't save node")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
|
||||
cfg := &Config{BeaconDB: db}
|
||||
service, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
// Set finalized epoch to 1.
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1}
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
if err := db.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
validGenesisRoot, err := ssz.HashTreeRoot(genesis.Block)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
st, _ := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
|
||||
if err := service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Define a tree branch, slot 63 <- 64 <- 65
|
||||
b63 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 63}}
|
||||
if err := service.beaconDB.SaveBlock(ctx, b63); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r63, _ := ssz.HashTreeRoot(b63.Block)
|
||||
b64 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 64, ParentRoot: r63[:]}}
|
||||
if err := service.beaconDB.SaveBlock(ctx, b64); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r64, _ := ssz.HashTreeRoot(b64.Block)
|
||||
b65 := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 65, ParentRoot: r64[:]}}
|
||||
if err := service.beaconDB.SaveBlock(ctx, b65); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
if err := service.fillInForkChoiceMissingBlocks(context.Background(), b65.Block, beaconState); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// There should be 2 nodes, block 65 and block 64.
|
||||
if len(service.forkChoiceStore.Nodes()) != 2 {
|
||||
t.Error("Miss match nodes")
|
||||
}
|
||||
|
||||
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
|
||||
if !service.forkChoiceStore.HasNode(r63) {
|
||||
t.Error("Didn't save node")
|
||||
}
|
||||
}
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
// /- B1
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
// (B1, and B3 are all from the same slots)
|
||||
func blockTree1(db db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
b0 := ðpb.BeaconBlock{Slot: 0, ParentRoot: genesisRoot}
|
||||
r0, _ := ssz.HashTreeRoot(b0)
|
||||
b1 := ðpb.BeaconBlock{Slot: 1, ParentRoot: r0[:]}
|
||||
r1, _ := ssz.HashTreeRoot(b1)
|
||||
b3 := ðpb.BeaconBlock{Slot: 3, ParentRoot: r0[:]}
|
||||
r3, _ := ssz.HashTreeRoot(b3)
|
||||
b4 := ðpb.BeaconBlock{Slot: 4, ParentRoot: r3[:]}
|
||||
r4, _ := ssz.HashTreeRoot(b4)
|
||||
b5 := ðpb.BeaconBlock{Slot: 5, ParentRoot: r4[:]}
|
||||
r5, _ := ssz.HashTreeRoot(b5)
|
||||
b6 := ðpb.BeaconBlock{Slot: 6, ParentRoot: r4[:]}
|
||||
r6, _ := ssz.HashTreeRoot(b6)
|
||||
b7 := ðpb.BeaconBlock{Slot: 7, ParentRoot: r5[:]}
|
||||
r7, _ := ssz.HashTreeRoot(b7)
|
||||
b8 := ðpb.BeaconBlock{Slot: 8, ParentRoot: r6[:]}
|
||||
r8, _ := ssz.HashTreeRoot(b8)
|
||||
st, err := stateTrie.InitializeFromProtoUnsafe(&pb.BeaconState{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, b := range []*ethpb.BeaconBlock{b0, b1, b3, b4, b5, b6, b7, b8} {
|
||||
if err := db.SaveBlock(context.Background(), ðpb.SignedBeaconBlock{Block: b}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := db.SaveState(context.Background(), st.Copy(), r1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), st.Copy(), r7); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := db.SaveState(context.Background(), st.Copy(), r8); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return [][]byte{r0[:], r1[:], nil, r3[:], r4[:], r5[:], r6[:], r7[:], r8[:]}, nil
|
||||
}
|
||||
@@ -1,57 +1,27 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
type AttestationReceiver interface {
|
||||
ReceiveAttestation(ctx context.Context, att *ethpb.Attestation) error
|
||||
ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error
|
||||
}
|
||||
|
||||
// ReceiveAttestation is a function that defines the operations that are preformed on
|
||||
// attestation that is received from regular sync. The operations consist of:
|
||||
// 1. Gossip attestation to other peers
|
||||
// 2. Validate attestation, update validator's latest vote
|
||||
// 3. Apply fork choice to the processed attestation
|
||||
// 4. Save latest head info
|
||||
func (s *Service) ReceiveAttestation(ctx context.Context, att *ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveAttestation")
|
||||
defer span.End()
|
||||
|
||||
// Broadcast the new attestation to the network.
|
||||
if err := s.p2p.Broadcast(ctx, att); err != nil {
|
||||
return errors.Wrap(err, "could not broadcast attestation")
|
||||
}
|
||||
|
||||
attDataRoot, err := ssz.HashTreeRoot(att.Data)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to hash attestation")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"attRoot": fmt.Sprintf("%#x", attDataRoot),
|
||||
"blockRoot": fmt.Sprintf("%#x", att.Data.BeaconBlockRoot),
|
||||
}).Debug("Broadcasting attestation")
|
||||
|
||||
if err := s.ReceiveAttestationNoPubsub(ctx, att); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
processedAtt.Inc()
|
||||
return nil
|
||||
IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool
|
||||
}
|
||||
|
||||
// ReceiveAttestationNoPubsub is a function that defines the operations that are preformed on
|
||||
@@ -63,50 +33,113 @@ func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Att
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveAttestationNoPubsub")
|
||||
defer span.End()
|
||||
|
||||
// Update forkchoice store for the new attestation
|
||||
attSlot, err := s.forkChoiceStore.OnAttestation(ctx, att)
|
||||
_, err := s.onAttestation(ctx, att)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process attestation from fork choice service")
|
||||
return errors.Wrap(err, "could not process attestation")
|
||||
}
|
||||
|
||||
// Run fork choice for head block after updating fork choice store.
|
||||
headRoot, err := s.forkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head from fork choice service")
|
||||
}
|
||||
// Only save head if it's different than the current head.
|
||||
if !bytes.Equal(headRoot, s.HeadRoot()) {
|
||||
headBlk, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(headRoot))
|
||||
if !featureconfig.Get().DisableUpdateHeadPerAttestation {
|
||||
baseState, err := s.getAttPreState(ctx, att.Data.Target)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute state from block head")
|
||||
return err
|
||||
}
|
||||
if err := s.saveHead(ctx, headBlk, bytesutil.ToBytes32(headRoot)); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
|
||||
// This updates fork choice head, if a new head could not be updated due to
|
||||
// long range or intermediate forking. It simply logs a warning and returns nil
|
||||
// as that's more appropriate than returning errors.
|
||||
if err := s.updateHead(ctx, baseState.Balances()); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Skip checking for competing attestation's target roots at epoch boundary.
|
||||
if !helpers.IsEpochStart(attSlot) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
targetRoot, err := helpers.BlockRoot(s.headState, att.Data.Target.Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get target root for epoch %d", att.Data.Target.Epoch)
|
||||
}
|
||||
isCompetingAtts(targetRoot, att.Data.Target.Root[:])
|
||||
}
|
||||
|
||||
processedAttNoPubsub.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// This checks if the attestation is from a competing chain, emits warning and updates metrics.
|
||||
func isCompetingAtts(headTargetRoot []byte, attTargetRoot []byte) {
|
||||
if !bytes.Equal(attTargetRoot, headTargetRoot) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"attTargetRoot": hex.EncodeToString(attTargetRoot),
|
||||
"headTargetRoot": hex.EncodeToString(headTargetRoot),
|
||||
}).Warn("target heads different from new attestation")
|
||||
competingAtts.Inc()
|
||||
// IsValidAttestation returns true if the attestation can be verified against its pre-state.
|
||||
func (s *Service) IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool {
|
||||
baseState, err := s.getAttPreState(ctx, att.Data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to validate attestation")
|
||||
return false
|
||||
}
|
||||
|
||||
if err := blocks.VerifyAttestation(ctx, baseState, att); err != nil {
|
||||
log.WithError(err).Error("Failed to validate attestation")
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// This processes attestations from the attestation pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
subscribedToStateEvents <- struct{}{}
|
||||
<-stateChannel
|
||||
stateSub.Unsubscribe()
|
||||
|
||||
st := slotutil.GetSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-st.C():
|
||||
ctx := context.Background()
|
||||
atts := s.attPool.ForkchoiceAttestations()
|
||||
for _, a := range atts {
|
||||
var hasState bool
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
hasState = s.stateGen.StateSummaryExists(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
} else {
|
||||
hasState = s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot)) && s.beaconDB.HasState(ctx, bytesutil.ToBytes32(a.Data.Target.Root))
|
||||
}
|
||||
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
if !(hasState && hasBlock) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.attPool.DeleteForkchoiceAttestation(a); err != nil {
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
if !s.verifyCheckpointEpoch(a.Data.Target) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.ReceiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": a.Data.Slot,
|
||||
"committeeIndex": a.Data.CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.Target.Root)),
|
||||
"aggregationCount": a.AggregationBits.Count(),
|
||||
}).WithError(err).Warn("Could not receive attestation in chain service")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This verifies the epoch of input checkpoint is within current epoch and previous epoch
|
||||
// with respect to current time. Returns true if it's within, false if it's not.
|
||||
func (s *Service) verifyCheckpointEpoch(c *ethpb.Checkpoint) bool {
|
||||
now := uint64(time.Now().Unix())
|
||||
genesisTime := uint64(s.genesisTime.Unix())
|
||||
currentSlot := (now - genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
currentEpoch := helpers.SlotToEpoch(currentSlot)
|
||||
|
||||
var prevEpoch uint64
|
||||
if currentEpoch > 1 {
|
||||
prevEpoch = currentEpoch - 1
|
||||
}
|
||||
|
||||
if c.Epoch != prevEpoch && c.Epoch != currentEpoch {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -2,112 +2,24 @@ package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestReceiveAttestation_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
r, _ := ssz.SigningRoot(ðpb.BeaconBlock{})
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
chainService.genesisTime = time.Now()
|
||||
|
||||
b := ðpb.BeaconBlock{}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
|
||||
t.Fatal(err)
|
||||
if !chainService.verifyCheckpointEpoch(ðpb.Checkpoint{}) {
|
||||
t.Error("Wanted true, got false")
|
||||
}
|
||||
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: root[:]},
|
||||
}}
|
||||
if err := chainService.ReceiveAttestation(ctx, a); err != nil {
|
||||
t.Fatal(err)
|
||||
if chainService.verifyCheckpointEpoch(ðpb.Checkpoint{Epoch: 1}) {
|
||||
t.Error("Wanted false, got true")
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Saved new head info")
|
||||
testutil.AssertLogsContain(t, hook, "Broadcasting attestation")
|
||||
}
|
||||
|
||||
func TestReceiveAttestation_SameHead(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
r, _ := ssz.SigningRoot(ðpb.BeaconBlock{})
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
chainService.canonicalRoots[0] = r[:]
|
||||
|
||||
b := ðpb.BeaconBlock{}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: root[:]},
|
||||
}}
|
||||
if err := chainService.ReceiveAttestation(ctx, a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Saved new head info")
|
||||
testutil.AssertLogsContain(t, hook, "Broadcasting attestation")
|
||||
}
|
||||
|
||||
func TestReceiveAttestationNoPubsub_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
r, _ := ssz.SigningRoot(ðpb.BeaconBlock{})
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
|
||||
b := ðpb.BeaconBlock{}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
root, err := ssz.SigningRoot(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.beaconDB.SaveState(ctx, &pb.BeaconState{}, root); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: root[:]},
|
||||
}}
|
||||
if err := chainService.ReceiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Saved new head info")
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Broadcasting attestation")
|
||||
}
|
||||
|
||||
@@ -5,11 +5,15 @@ import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -17,10 +21,10 @@ import (
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service receive and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) error
|
||||
ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconBlock) error
|
||||
ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.BeaconBlock) error
|
||||
ReceiveBlockNoVerify(ctx context.Context, block *ethpb.BeaconBlock) error
|
||||
ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock) error
|
||||
ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock) error
|
||||
ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock) error
|
||||
ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock) error
|
||||
}
|
||||
|
||||
// ReceiveBlock is a function that defines the operations that are preformed on
|
||||
@@ -29,11 +33,11 @@ type BlockReceiver interface {
|
||||
// 2. Validate block, apply state transition and update check points
|
||||
// 3. Apply fork choice to the processed block
|
||||
// 4. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlock")
|
||||
defer span.End()
|
||||
|
||||
root, err := ssz.SigningRoot(block)
|
||||
root, err := ssz.HashTreeRoot(block.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received block")
|
||||
}
|
||||
@@ -50,7 +54,6 @@ func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) er
|
||||
return err
|
||||
}
|
||||
|
||||
processedBlk.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -59,57 +62,63 @@ func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) er
|
||||
// 1. Validate block, apply state transition and update check points
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoPubsub")
|
||||
defer span.End()
|
||||
blockCopy := proto.Clone(block).(*ethpb.BeaconBlock)
|
||||
blockCopy := stateTrie.CopySignedBeaconBlock(block)
|
||||
|
||||
// Apply state transition on the new block.
|
||||
if err := s.forkChoiceStore.OnBlock(ctx, blockCopy); err != nil {
|
||||
err := errors.Wrap(err, "could not process block from fork choice service")
|
||||
postState, err := s.onBlock(ctx, blockCopy)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
root, err := ssz.SigningRoot(blockCopy)
|
||||
|
||||
// Add attestations from the block to the pool for fork choice.
|
||||
if err := s.attPool.SaveBlockAttestations(blockCopy.Block.Body.Attestations); err != nil {
|
||||
log.Errorf("Could not save attestation for fork choice: %v", err)
|
||||
return nil
|
||||
}
|
||||
for _, exit := range block.Block.Body.VoluntaryExits {
|
||||
s.exitPool.MarkIncluded(exit)
|
||||
}
|
||||
|
||||
s.epochParticipationLock.Lock()
|
||||
defer s.epochParticipationLock.Unlock()
|
||||
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
|
||||
|
||||
root, err := ssz.HashTreeRoot(blockCopy.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received block")
|
||||
}
|
||||
|
||||
// Run fork choice after applying state transition on the new block.
|
||||
headRoot, err := s.forkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head from fork choice service")
|
||||
}
|
||||
headBlk, err := s.beaconDB.Block(ctx, bytesutil.ToBytes32(headRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute state from block head")
|
||||
}
|
||||
|
||||
// Only save head if it's different than the current head.
|
||||
if !bytes.Equal(headRoot, s.HeadRoot()) {
|
||||
if err := s.saveHead(ctx, headBlk, bytesutil.ToBytes32(headRoot)); err != nil {
|
||||
if featureconfig.Get().DisableForkChoice && block.Block.Slot > s.headSlot() {
|
||||
if err := s.saveHead(ctx, root); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
} else {
|
||||
if err := s.updateHead(ctx, postState.Balances()); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
// Remove block's contained deposits, attestations, and other operations from persistent storage.
|
||||
if err := s.cleanupBlockOperations(ctx, blockCopy); err != nil {
|
||||
return errors.Wrap(err, "could not clean up block deposits, attestations, and other operations")
|
||||
}
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block.Slot,
|
||||
BlockRoot: root,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
s.reportSlotMetrics(blockCopy.Slot)
|
||||
|
||||
// Log if block is a competing block.
|
||||
isCompetingBlock(root[:], blockCopy.Slot, headRoot, headBlk.Slot)
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.finalizedCheckpt)
|
||||
|
||||
// Log state transition data.
|
||||
logStateTransitionData(blockCopy, root[:])
|
||||
logStateTransitionData(blockCopy.Block)
|
||||
|
||||
processedBlkNoPubsub.Inc()
|
||||
|
||||
// We write the latest saved head root to a feed for consumption by other services.
|
||||
s.headUpdatedFeed.Send(bytesutil.ToBytes32(headRoot))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -117,111 +126,112 @@ func (s *Service) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconB
|
||||
// that are preformed blocks that is received from initial sync service. The operations consists of:
|
||||
// 1. Validate block, apply state transition and update check points
|
||||
// 2. Save latest head info
|
||||
func (s *Service) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (s *Service) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoForkchoice")
|
||||
defer span.End()
|
||||
blockCopy := proto.Clone(block).(*ethpb.BeaconBlock)
|
||||
blockCopy := stateTrie.CopySignedBeaconBlock(block)
|
||||
|
||||
// Apply state transition on the incoming newly received block.
|
||||
if err := s.forkChoiceStore.OnBlock(ctx, blockCopy); err != nil {
|
||||
err := errors.Wrap(err, "could not process block from fork choice service")
|
||||
// Apply state transition on the new block.
|
||||
_, err := s.onBlock(ctx, blockCopy)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
root, err := ssz.SigningRoot(blockCopy)
|
||||
|
||||
root, err := ssz.HashTreeRoot(blockCopy.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received block")
|
||||
}
|
||||
|
||||
if !bytes.Equal(root[:], s.HeadRoot()) {
|
||||
if err := s.saveHead(ctx, blockCopy, root); err != nil {
|
||||
cachedHeadRoot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head root from cache")
|
||||
}
|
||||
if !bytes.Equal(root[:], cachedHeadRoot) {
|
||||
if err := s.saveHead(ctx, root); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
// Remove block's contained deposits, attestations, and other operations from persistent storage.
|
||||
if err := s.cleanupBlockOperations(ctx, blockCopy); err != nil {
|
||||
return errors.Wrap(err, "could not clean up block deposits, attestations, and other operations")
|
||||
}
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block.Slot,
|
||||
BlockRoot: root,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
s.reportSlotMetrics(blockCopy.Slot)
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.finalizedCheckpt)
|
||||
|
||||
// Log state transition data.
|
||||
logStateTransitionData(blockCopy, root[:])
|
||||
logStateTransitionData(blockCopy.Block)
|
||||
|
||||
s.epochParticipationLock.Lock()
|
||||
defer s.epochParticipationLock.Unlock()
|
||||
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
|
||||
|
||||
// We write the latest saved head root to a feed for consumption by other services.
|
||||
s.headUpdatedFeed.Send(root)
|
||||
processedBlkNoPubsubForkchoice.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoVerify runs state transition on a input block without verifying the block's BLS contents.
|
||||
// Depends on the security model, this is the "minimal" work a node can do to sync the chain.
|
||||
// It simulates light client behavior and assumes 100% trust with the syncing peer.
|
||||
func (s *Service) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (s *Service) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveBlockNoVerify")
|
||||
defer span.End()
|
||||
blockCopy := proto.Clone(block).(*ethpb.BeaconBlock)
|
||||
blockCopy := stateTrie.CopySignedBeaconBlock(block)
|
||||
|
||||
// Apply state transition on the incoming newly received blockCopy without verifying its BLS contents.
|
||||
if err := s.forkChoiceStore.OnBlockNoVerifyStateTransition(ctx, blockCopy); err != nil {
|
||||
return errors.Wrap(err, "could not process blockCopy from fork choice service")
|
||||
if err := s.onBlockInitialSyncStateTransition(ctx, blockCopy); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
root, err := ssz.SigningRoot(blockCopy)
|
||||
|
||||
root, err := ssz.HashTreeRoot(blockCopy.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root on received blockCopy")
|
||||
}
|
||||
|
||||
if !bytes.Equal(root[:], s.HeadRoot()) {
|
||||
if err := s.saveHead(ctx, blockCopy, root); err != nil {
|
||||
cachedHeadRoot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head root from cache")
|
||||
}
|
||||
|
||||
if !bytes.Equal(root[:], cachedHeadRoot) {
|
||||
if err := s.saveHeadNoDB(ctx, blockCopy, root); err != nil {
|
||||
err := errors.Wrap(err, "could not save head")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block.Slot,
|
||||
BlockRoot: root,
|
||||
Verified: false,
|
||||
},
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
s.reportSlotMetrics(blockCopy.Slot)
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.headSlot(), s.finalizedCheckpt)
|
||||
|
||||
// Log state transition data.
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blockCopy.Slot,
|
||||
"attestations": len(blockCopy.Body.Attestations),
|
||||
"deposits": len(blockCopy.Body.Deposits),
|
||||
"slot": blockCopy.Block.Slot,
|
||||
"attestations": len(blockCopy.Block.Body.Attestations),
|
||||
"deposits": len(blockCopy.Block.Body.Deposits),
|
||||
}).Debug("Finished applying state transition")
|
||||
|
||||
// We write the latest saved head root to a feed for consumption by other services.
|
||||
s.headUpdatedFeed.Send(root)
|
||||
s.epochParticipationLock.Lock()
|
||||
defer s.epochParticipationLock.Unlock()
|
||||
s.epochParticipation[helpers.SlotToEpoch(blockCopy.Block.Slot)] = precompute.Balances
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupBlockOperations processes and cleans up any block operations relevant to the beacon node
|
||||
// such as attestations, exits, and deposits. We update the latest seen attestation by validator
|
||||
// in the local node's runtime, cleanup and remove pending deposits which have been included in the block
|
||||
// from our node's local cache, and process validator exits and more.
|
||||
func (s *Service) cleanupBlockOperations(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
// Forward processed block to operation pool to remove individual operation from DB.
|
||||
if s.opsPoolService.IncomingProcessedBlockFeed().Send(block) == 0 {
|
||||
log.Error("Sent processed block to no subscribers")
|
||||
}
|
||||
|
||||
// Remove pending deposits from the deposit queue.
|
||||
for _, dep := range block.Body.Deposits {
|
||||
s.depositCache.RemovePendingDeposit(ctx, dep)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This checks if the block is from a competing chain, emits warning and updates metrics.
|
||||
func isCompetingBlock(root []byte, slot uint64, headRoot []byte, headSlot uint64) {
|
||||
if !bytes.Equal(root[:], headRoot) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blkSlot": slot,
|
||||
"blkRoot": hex.EncodeToString(root[:]),
|
||||
"headSlot": headSlot,
|
||||
"headRoot": hex.EncodeToString(headRoot),
|
||||
}).Warn("Calculated head diffs from new block")
|
||||
competingBlks.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,275 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestReceiveBlock_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
deposits, _, privKeys := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, ðpb.Eth1Data{BlockHash: make([]byte, 32)})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Eth1Data.BlockHash = nil
|
||||
beaconState.Eth1DepositIndex = 100
|
||||
stateRoot, err := ssz.HashTreeRoot(beaconState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
bodyRoot, err := ssz.HashTreeRoot(genesis.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
genesisBlkRoot, err := ssz.SigningRoot(genesis)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cp := ðpb.Checkpoint{Root: genesisBlkRoot[:]}
|
||||
if err := chainService.forkChoiceStore.GenesisStore(ctx, cp, cp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.LatestBlockHeader = ðpb.BeaconBlockHeader{
|
||||
Slot: genesis.Slot,
|
||||
ParentRoot: genesis.ParentRoot,
|
||||
BodyRoot: bodyRoot[:],
|
||||
StateRoot: genesis.StateRoot,
|
||||
}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Fatalf("Could not save block to db: %v", err)
|
||||
}
|
||||
parentRoot, err := ssz.SigningRoot(genesis)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.SaveState(ctx, beaconState, parentRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
slot := beaconState.Slot + 1
|
||||
epoch := helpers.SlotToEpoch(slot)
|
||||
beaconState.Slot++
|
||||
randaoReveal, err := testutil.CreateRandaoReveal(beaconState, epoch, privKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Slot--
|
||||
|
||||
block := ðpb.BeaconBlock{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositCount: uint64(len(deposits)),
|
||||
DepositRoot: []byte("a"),
|
||||
BlockHash: []byte("b"),
|
||||
},
|
||||
RandaoReveal: randaoReveal[:],
|
||||
Attestations: nil,
|
||||
},
|
||||
}
|
||||
|
||||
stateRootCandidate, err := state.ExecuteStateTransitionNoVerify(context.Background(), beaconState, block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateRoot, err = ssz.HashTreeRoot(stateRootCandidate)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
block.StateRoot = stateRoot[:]
|
||||
|
||||
block, err = testutil.SignBlock(beaconState, block, privKeys)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.ReceiveBlock(context.Background(), block); err != nil {
|
||||
t.Errorf("Block failed processing: %v", err)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Finished applying state transition")
|
||||
}
|
||||
|
||||
func TestReceiveReceiveBlockNoPubsub_CanSaveHeadInfo(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
headBlk := ðpb.BeaconBlock{Slot: 100}
|
||||
if err := db.SaveBlock(ctx, headBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, err := ssz.SigningRoot(headBlk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
chainService.forkChoiceStore = &store{headRoot: r[:]}
|
||||
|
||||
if err := chainService.ReceiveBlockNoPubsub(ctx, ðpb.BeaconBlock{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBody{}}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(r[:], chainService.HeadRoot()) {
|
||||
t.Error("Incorrect head root saved")
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(headBlk, chainService.HeadBlock()) {
|
||||
t.Error("Incorrect head block saved")
|
||||
}
|
||||
|
||||
testutil.AssertLogsContain(t, hook, "Saved new head info")
|
||||
}
|
||||
|
||||
func TestReceiveReceiveBlockNoPubsub_SameHead(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
headBlk := ðpb.BeaconBlock{}
|
||||
if err := db.SaveBlock(ctx, headBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
newBlk := ðpb.BeaconBlock{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBody{}}
|
||||
newRoot, _ := ssz.SigningRoot(newBlk)
|
||||
if err := db.SaveBlock(ctx, newBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
chainService.forkChoiceStore = &store{headRoot: newRoot[:]}
|
||||
chainService.canonicalRoots[0] = newRoot[:]
|
||||
|
||||
if err := chainService.ReceiveBlockNoPubsub(ctx, newBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Saved new head info")
|
||||
}
|
||||
|
||||
func TestReceiveBlockNoPubsubForkchoice_ProcessCorrectly(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
|
||||
chainService := setupBeaconChain(t, db)
|
||||
deposits, _, privKeys := testutil.SetupInitialDeposits(t, 100)
|
||||
beaconState, err := state.GenesisBeaconState(deposits, 0, ðpb.Eth1Data{BlockHash: make([]byte, 32)})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Eth1DepositIndex = 100
|
||||
stateRoot, err := ssz.HashTreeRoot(beaconState)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
genesis := b.NewGenesisBlock(stateRoot[:])
|
||||
bodyRoot, err := ssz.HashTreeRoot(genesis.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.forkChoiceStore.GenesisStore(ctx, ðpb.Checkpoint{}, ðpb.Checkpoint{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState.LatestBlockHeader = ðpb.BeaconBlockHeader{
|
||||
Slot: genesis.Slot,
|
||||
ParentRoot: genesis.ParentRoot,
|
||||
BodyRoot: bodyRoot[:],
|
||||
StateRoot: genesis.StateRoot,
|
||||
}
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, genesis); err != nil {
|
||||
t.Fatalf("Could not save block to db: %v", err)
|
||||
}
|
||||
parentRoot, err := ssz.SigningRoot(genesis)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := db.SaveState(ctx, beaconState, parentRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
slot := beaconState.Slot + 1
|
||||
epoch := helpers.SlotToEpoch(slot)
|
||||
beaconState.Slot++
|
||||
randaoReveal, err := testutil.CreateRandaoReveal(beaconState, epoch, privKeys)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState.Slot--
|
||||
|
||||
block := ðpb.BeaconBlock{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositCount: uint64(len(deposits)),
|
||||
DepositRoot: []byte("a"),
|
||||
BlockHash: []byte("b"),
|
||||
},
|
||||
RandaoReveal: randaoReveal[:],
|
||||
Attestations: nil,
|
||||
},
|
||||
}
|
||||
|
||||
stateRootCandidate, err := state.ExecuteStateTransitionNoVerify(context.Background(), beaconState, block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateRoot, err = ssz.HashTreeRoot(stateRootCandidate)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
block.StateRoot = stateRoot[:]
|
||||
|
||||
block, err = testutil.SignBlock(beaconState, block, privKeys)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := chainService.beaconDB.SaveBlock(ctx, block); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := chainService.ReceiveBlockNoPubsubForkchoice(context.Background(), block); err != nil {
|
||||
t.Errorf("Block failed processing: %v", err)
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Finished applying state transition")
|
||||
testutil.AssertLogsDoNotContain(t, hook, "Finished fork choice")
|
||||
}
|
||||
@@ -11,90 +11,112 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ChainFeeds interface defines the methods of the Service which provide state related
|
||||
// information feeds to consumers.
|
||||
type ChainFeeds interface {
|
||||
StateInitializedFeed() *event.Feed
|
||||
}
|
||||
|
||||
// NewHeadNotifier defines a struct which can notify many consumers of a new,
|
||||
// canonical chain head event occuring in the node.
|
||||
type NewHeadNotifier interface {
|
||||
HeadUpdatedFeed() *event.Feed
|
||||
}
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.Database
|
||||
depositCache *depositcache.DepositCache
|
||||
chainStartFetcher powchain.ChainStartFetcher
|
||||
opsPoolService operations.OperationFeeds
|
||||
forkChoiceStore forkchoice.ForkChoicer
|
||||
chainStartChan chan time.Time
|
||||
genesisTime time.Time
|
||||
stateInitializedFeed *event.Feed
|
||||
headUpdatedFeed *event.Feed
|
||||
p2p p2p.Broadcaster
|
||||
maxRoutines int64
|
||||
headSlot uint64
|
||||
headBlock *ethpb.BeaconBlock
|
||||
headState *pb.BeaconState
|
||||
canonicalRoots map[uint64][]byte
|
||||
headLock sync.RWMutex
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.HeadAccessDatabase
|
||||
depositCache *depositcache.DepositCache
|
||||
chainStartFetcher powchain.ChainStartFetcher
|
||||
attPool attestations.Pool
|
||||
slashingPool *slashings.Pool
|
||||
exitPool *voluntaryexits.Pool
|
||||
genesisTime time.Time
|
||||
p2p p2p.Broadcaster
|
||||
maxRoutines int64
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
stateNotifier statefeed.Notifier
|
||||
genesisRoot [32]byte
|
||||
epochParticipation map[uint64]*precompute.Balance
|
||||
epochParticipationLock sync.RWMutex
|
||||
forkChoiceStore f.ForkChoicer
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
prevJustifiedCheckpt *ethpb.Checkpoint
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
nextEpochBoundarySlot uint64
|
||||
voteLock sync.RWMutex
|
||||
initSyncState map[[32]byte]*stateTrie.BeaconState
|
||||
boundaryRoots [][32]byte
|
||||
initSyncStateLock sync.RWMutex
|
||||
checkpointState *cache.CheckpointStateCache
|
||||
checkpointStateLock sync.Mutex
|
||||
stateGen *stategen.State
|
||||
opsService *attestations.Service
|
||||
}
|
||||
|
||||
// Config options for the service.
|
||||
type Config struct {
|
||||
BeaconBlockBuf int
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
BeaconDB db.Database
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache *depositcache.DepositCache
|
||||
OpsPoolService operations.OperationFeeds
|
||||
AttPool attestations.Pool
|
||||
ExitPool *voluntaryexits.Pool
|
||||
SlashingPool *slashings.Pool
|
||||
P2p p2p.Broadcaster
|
||||
MaxRoutines int64
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
OpsService *attestations.Service
|
||||
StateGen *stategen.State
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
store := forkchoice.NewForkChoiceService(ctx, cfg.BeaconDB)
|
||||
return &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
depositCache: cfg.DepositCache,
|
||||
chainStartFetcher: cfg.ChainStartFetcher,
|
||||
opsPoolService: cfg.OpsPoolService,
|
||||
forkChoiceStore: store,
|
||||
chainStartChan: make(chan time.Time),
|
||||
stateInitializedFeed: new(event.Feed),
|
||||
headUpdatedFeed: new(event.Feed),
|
||||
p2p: cfg.P2p,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
maxRoutines: cfg.MaxRoutines,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
depositCache: cfg.DepositCache,
|
||||
chainStartFetcher: cfg.ChainStartFetcher,
|
||||
attPool: cfg.AttPool,
|
||||
exitPool: cfg.ExitPool,
|
||||
slashingPool: cfg.SlashingPool,
|
||||
p2p: cfg.P2p,
|
||||
maxRoutines: cfg.MaxRoutines,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
epochParticipation: make(map[uint64]*precompute.Balance),
|
||||
forkChoiceStore: cfg.ForkChoiceStore,
|
||||
initSyncState: make(map[[32]byte]*stateTrie.BeaconState),
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointState: cache.NewCheckpointStateCache(),
|
||||
opsService: cfg.OpsService,
|
||||
stateGen: cfg.StateGen,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -105,10 +127,37 @@ func (s *Service) Start() {
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state: %v", err)
|
||||
}
|
||||
|
||||
// For running initial sync with state cache, in an event of restart, we use
|
||||
// last finalized check point as start point to sync instead of head
|
||||
// state. This is because we no longer save state every slot during sync.
|
||||
cp, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
|
||||
if beaconState == nil {
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
beaconState, err = s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(cp.Root))
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state: %v", err)
|
||||
}
|
||||
} else {
|
||||
beaconState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(cp.Root))
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that attestation processor is subscribed and ready for state initializing event.
|
||||
attestationProcessorSubscribed := make(chan struct{}, 1)
|
||||
|
||||
// If the chain has already been initialized, simply start the block processing routine.
|
||||
if beaconState != nil {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(beaconState.GenesisTime), 0)
|
||||
s.genesisTime = time.Unix(int64(beaconState.GenesisTime()), 0)
|
||||
s.opsService.SetGenesisTime(beaconState.GenesisTime())
|
||||
if err := s.initializeChainInfo(ctx); err != nil {
|
||||
log.Fatalf("Could not set up chain info: %v", err)
|
||||
}
|
||||
@@ -120,34 +169,74 @@ func (s *Service) Start() {
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get finalized checkpoint: %v", err)
|
||||
}
|
||||
if err := s.forkChoiceStore.GenesisStore(ctx, justifiedCheckpoint, finalizedCheckpoint); err != nil {
|
||||
log.Fatalf("Could not start fork choice service: %v", err)
|
||||
|
||||
// Resume fork choice.
|
||||
s.justifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.prevJustifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.bestJustifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.finalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint)
|
||||
|
||||
if finalizedCheckpoint.Epoch > 1 {
|
||||
if err := s.pruneGarbageState(ctx, helpers.StartSlot(finalizedCheckpoint.Epoch)-params.BeaconConfig().SlotsPerEpoch); err != nil {
|
||||
log.WithError(err).Warn("Could not prune old states")
|
||||
}
|
||||
}
|
||||
s.stateInitializedFeed.Send(s.genesisTime)
|
||||
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: s.genesisTime,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.chainStartFetcher == nil {
|
||||
log.Fatal("Not configured web3Service for POW chain")
|
||||
return // return need for TestStartUninitializedChainWithoutConfigPOWChain.
|
||||
}
|
||||
subChainStart := s.chainStartFetcher.ChainStartFeed().Subscribe(s.chainStartChan)
|
||||
go func() {
|
||||
genesisTime := <-s.chainStartChan
|
||||
s.processChainStartTime(ctx, genesisTime, subChainStart)
|
||||
return
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
<-attestationProcessorSubscribed
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.ChainStarted {
|
||||
data := event.Data.(*statefeed.ChainStartedData)
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain start event")
|
||||
s.processChainStartTime(ctx, data.StartTime)
|
||||
return
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state notifier failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
go s.processAttestation(attestationProcessorSubscribed)
|
||||
}
|
||||
|
||||
// processChainStartTime initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time, chainStartSub event.Subscription) {
|
||||
initialDeposits := s.chainStartFetcher.ChainStartDeposits()
|
||||
if err := s.initializeBeaconChain(ctx, genesisTime, initialDeposits, s.chainStartFetcher.ChainStartEth1Data()); err != nil {
|
||||
func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.chainStartFetcher.PreGenesisState()
|
||||
if err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.chainStartFetcher.ChainStartEth1Data()); err != nil {
|
||||
log.Fatalf("Could not initialize beacon chain: %v", err)
|
||||
}
|
||||
s.stateInitializedFeed.Send(genesisTime)
|
||||
chainStartSub.Unsubscribe()
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: genesisTime,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// initializes the state and genesis block of the beacon chain to persistent storage
|
||||
@@ -156,15 +245,14 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
|
||||
func (s *Service) initializeBeaconChain(
|
||||
ctx context.Context,
|
||||
genesisTime time.Time,
|
||||
deposits []*ethpb.Deposit,
|
||||
preGenesisState *stateTrie.BeaconState,
|
||||
eth1data *ethpb.Eth1Data) error {
|
||||
_, span := trace.StartSpan(context.Background(), "beacon-chain.Service.initializeBeaconChain")
|
||||
defer span.End()
|
||||
log.Info("Genesis time reached, starting the beacon chain")
|
||||
s.genesisTime = genesisTime
|
||||
unixTime := uint64(genesisTime.Unix())
|
||||
|
||||
genesisState, err := state.GenesisBeaconState(deposits, unixTime, eth1data)
|
||||
genesisState, err := state.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize genesis state")
|
||||
}
|
||||
@@ -173,12 +261,20 @@ func (s *Service) initializeBeaconChain(
|
||||
return errors.Wrap(err, "could not save genesis data")
|
||||
}
|
||||
|
||||
log.Info("Initialized beacon chain genesis state")
|
||||
|
||||
// Clear out all pre-genesis data now that the state is initialized.
|
||||
s.chainStartFetcher.ClearPreGenesisData()
|
||||
|
||||
// Update committee shuffled indices for genesis epoch.
|
||||
if featureconfig.Get().EnableNewCache {
|
||||
if err := helpers.UpdateCommitteeCache(genesisState); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.opsService.SetGenesisTime(genesisState.GenesisTime())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -198,66 +294,32 @@ func (s *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// StateInitializedFeed returns a feed that is written to
|
||||
// when the beacon state is first initialized.
|
||||
func (s *Service) StateInitializedFeed() *event.Feed {
|
||||
return s.stateInitializedFeed
|
||||
// ClearCachedStates removes all stored caches states. This is done after the node
|
||||
// is synced.
|
||||
func (s *Service) ClearCachedStates() {
|
||||
s.initSyncState = map[[32]byte]*stateTrie.BeaconState{}
|
||||
}
|
||||
|
||||
// HeadUpdatedFeed is a feed containing the head block root and
|
||||
// is written to when a new head block is saved to DB.
|
||||
func (s *Service) HeadUpdatedFeed() *event.Feed {
|
||||
return s.headUpdatedFeed
|
||||
}
|
||||
// This gets called when beacon chain is first initialized to save validator indices and public keys in db.
|
||||
func (s *Service) saveGenesisValidators(ctx context.Context, state *stateTrie.BeaconState) error {
|
||||
pubkeys := make([][48]byte, state.NumValidators())
|
||||
indices := make([]uint64, state.NumValidators())
|
||||
|
||||
// This gets called to update canonical root mapping.
|
||||
func (s *Service) saveHead(ctx context.Context, b *ethpb.BeaconBlock, r [32]byte) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
s.headSlot = b.Slot
|
||||
|
||||
s.canonicalRoots[b.Slot] = r[:]
|
||||
|
||||
if err := s.beaconDB.SaveHeadBlockRoot(ctx, r); err != nil {
|
||||
return errors.Wrap(err, "could not save head root in DB")
|
||||
for i := 0; i < state.NumValidators(); i++ {
|
||||
pubkeys[i] = state.PubkeyAtIndex(uint64(i))
|
||||
indices[i] = uint64(i)
|
||||
}
|
||||
s.headBlock = b
|
||||
return s.beaconDB.SaveValidatorIndices(ctx, pubkeys, indices)
|
||||
}
|
||||
|
||||
headState, err := s.beaconDB.State(ctx, r)
|
||||
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.BeaconState) error {
|
||||
stateRoot, err := genesisState.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
s.headState = headState
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": b.Slot,
|
||||
"headRoot": fmt.Sprintf("%#x", r),
|
||||
}).Debug("Saved new head info")
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called when beacon chain is first initialized to save validator indices and pubkeys in db
|
||||
func (s *Service) saveGenesisValidators(ctx context.Context, state *pb.BeaconState) error {
|
||||
for i, v := range state.Validators {
|
||||
if err := s.beaconDB.SaveValidatorIndex(ctx, bytesutil.ToBytes48(v.PublicKey), uint64(i)); err != nil {
|
||||
return errors.Wrapf(err, "could not save validator index: %d", i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState *pb.BeaconState) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
stateRoot, err := ssz.HashTreeRoot(genesisState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not tree hash genesis state")
|
||||
return err
|
||||
}
|
||||
genesisBlk := blocks.NewGenesisBlock(stateRoot[:])
|
||||
genesisBlkRoot, err := ssz.SigningRoot(genesisBlk)
|
||||
genesisBlkRoot, err := ssz.HashTreeRoot(genesisBlk.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
@@ -265,35 +327,86 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState *pb.BeaconSt
|
||||
if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis block")
|
||||
}
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
if err := s.stateGen.SaveState(ctx, genesisBlkRoot, genesisState); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state")
|
||||
}
|
||||
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
|
||||
Slot: 0,
|
||||
Root: genesisBlkRoot[:],
|
||||
BoundaryRoot: genesisBlkRoot[:],
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state")
|
||||
}
|
||||
}
|
||||
if err := s.beaconDB.SaveHeadBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head block root")
|
||||
}
|
||||
if err := s.beaconDB.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could save genesis block root")
|
||||
}
|
||||
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state")
|
||||
}
|
||||
if err := s.saveGenesisValidators(ctx, genesisState); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis validators")
|
||||
}
|
||||
|
||||
genesisCheckpoint := ðpb.Checkpoint{Root: genesisBlkRoot[:]}
|
||||
if err := s.forkChoiceStore.GenesisStore(ctx, genesisCheckpoint, genesisCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "Could not start fork choice service: %v")
|
||||
|
||||
// Add the genesis block to the fork choice store.
|
||||
s.justifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.prevJustifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.bestJustifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.finalizedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
|
||||
if err := s.forkChoiceStore.ProcessBlock(ctx,
|
||||
genesisBlk.Block.Slot,
|
||||
genesisBlkRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
genesisCheckpoint.Epoch,
|
||||
genesisCheckpoint.Epoch); err != nil {
|
||||
log.Fatalf("Could not process genesis block for fork choice: %v", err)
|
||||
}
|
||||
|
||||
s.headBlock = genesisBlk
|
||||
s.headState = genesisState
|
||||
s.canonicalRoots[genesisState.Slot] = genesisBlkRoot[:]
|
||||
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called to initialize chain info variables using the finalized checkpoint stored in DB
|
||||
func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
genesisBlock, err := s.beaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block from db")
|
||||
}
|
||||
if genesisBlock == nil {
|
||||
return errors.New("no genesis block in db")
|
||||
}
|
||||
genesisBlkRoot, err := ssz.HashTreeRoot(genesisBlock.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root of genesis block")
|
||||
}
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
|
||||
if flags.Get().UnsafeSync {
|
||||
headBlock, err := s.beaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head block")
|
||||
}
|
||||
headRoot, err := ssz.HashTreeRoot(headBlock.Block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash head block")
|
||||
}
|
||||
headState, err := s.beaconDB.HeadState(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state")
|
||||
}
|
||||
s.setHead(headRoot, headBlock, headState)
|
||||
return nil
|
||||
}
|
||||
|
||||
finalized, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
@@ -304,17 +417,71 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
// would be the genesis state and block.
|
||||
return errors.New("no finalized epoch in the database")
|
||||
}
|
||||
s.headState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(finalized.Root))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
finalizedRoot := bytesutil.ToBytes32(finalized.Root)
|
||||
var finalizedState *stateTrie.BeaconState
|
||||
if featureconfig.Get().NewStateMgmt {
|
||||
finalizedRoot = s.beaconDB.LastArchivedIndexRoot(ctx)
|
||||
finalizedState, err = s.stateGen.Resume(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
if finalizedRoot == params.BeaconConfig().ZeroHash {
|
||||
finalizedRoot = bytesutil.ToBytes32(finalized.Root)
|
||||
}
|
||||
} else {
|
||||
finalizedState, err = s.beaconDB.State(ctx, bytesutil.ToBytes32(finalized.Root))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
}
|
||||
s.headBlock, err = s.beaconDB.Block(ctx, bytesutil.ToBytes32(finalized.Root))
|
||||
|
||||
finalizedBlock, err := s.beaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block from db")
|
||||
}
|
||||
|
||||
s.headSlot = s.headState.Slot
|
||||
s.canonicalRoots[s.headSlot] = finalized.Root
|
||||
if finalizedState == nil || finalizedBlock == nil {
|
||||
return errors.New("finalized state and block can't be nil")
|
||||
}
|
||||
s.setHead(finalizedRoot, finalizedBlock, finalizedState)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is called when a client starts from a non-genesis slot. It deletes the states in DB
|
||||
// from slot 1 (avoid genesis state) to `slot`.
|
||||
func (s *Service) pruneGarbageState(ctx context.Context, slot uint64) error {
|
||||
if featureconfig.Get().DontPruneStateStartUp {
|
||||
return nil
|
||||
}
|
||||
|
||||
filter := filters.NewFilter().SetStartSlot(1).SetEndSlot(slot)
|
||||
roots, err := s.beaconDB.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.beaconDB.DeleteStates(ctx, roots); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is called when a client starts from non-genesis slot. This passes last justified and finalized
|
||||
// information to fork choice service to initializes fork choice store.
|
||||
func (s *Service) resumeForkChoice(justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
store := protoarray.New(justifiedCheckpoint.Epoch, finalizedCheckpoint.Epoch, bytesutil.ToBytes32(finalizedCheckpoint.Root))
|
||||
s.forkChoiceStore = store
|
||||
}
|
||||
|
||||
// This returns true if block has been processed before. Two ways to verify the block has been processed:
|
||||
// 1.) Check fork choice store.
|
||||
// 2.) Check DB.
|
||||
// Checking 1.) is ten times faster than checking 2.)
|
||||
func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
|
||||
if s.forkChoiceStore.HasNode(root) {
|
||||
return true
|
||||
}
|
||||
|
||||
return s.beaconDB.HasBlock(ctx, root)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -19,19 +18,16 @@ func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
canonicalRoots: make(map[uint64][]byte),
|
||||
beaconDB: db,
|
||||
}
|
||||
go func() {
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 777},
|
||||
[32]byte{},
|
||||
)
|
||||
}()
|
||||
s.saveHead(
|
||||
context.Background(),
|
||||
ðpb.BeaconBlock{Slot: 888},
|
||||
[32]byte{},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -4,28 +4,31 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethereum "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethTypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
ssz "github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
protodb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
@@ -33,10 +36,6 @@ import (
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
// Ensure Service implements interfaces.
|
||||
var _ = ChainFeeds(&Service{})
|
||||
var _ = NewHeadNotifier(&Service{})
|
||||
|
||||
func init() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
@@ -46,16 +45,20 @@ type store struct {
|
||||
headRoot []byte
|
||||
}
|
||||
|
||||
func (s *store) OnBlock(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
return nil
|
||||
func (s *store) OnBlock(ctx context.Context, b *ethpb.SignedBeaconBlock) (*beaconstate.BeaconState, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *store) OnBlockNoVerifyStateTransition(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
return nil
|
||||
func (s *store) OnBlockCacheFilteredTree(ctx context.Context, b *ethpb.SignedBeaconBlock) (*beaconstate.BeaconState, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *store) OnAttestation(ctx context.Context, a *ethpb.Attestation) (uint64, error) {
|
||||
return 0, nil
|
||||
func (s *store) OnBlockInitialSyncStateTransition(ctx context.Context, b *ethpb.SignedBeaconBlock) (*beaconstate.BeaconState, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *store) OnAttestation(ctx context.Context, a *ethpb.Attestation) ([]uint64, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *store) GenesisStore(ctx context.Context, justifiedCheckpoint *ethpb.Checkpoint, finalizedCheckpoint *ethpb.Checkpoint) error {
|
||||
@@ -66,109 +69,24 @@ func (s *store) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) JustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) Head(ctx context.Context) ([]byte, error) {
|
||||
return s.headRoot, nil
|
||||
}
|
||||
|
||||
type mockOperationService struct{}
|
||||
|
||||
func (ms *mockOperationService) IncomingProcessedBlockFeed() *event.Feed {
|
||||
return new(event.Feed)
|
||||
type mockBeaconNode struct {
|
||||
stateFeed *event.Feed
|
||||
}
|
||||
|
||||
func (ms *mockOperationService) IncomingAttFeed() *event.Feed {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *mockOperationService) IncomingExitFeed() *event.Feed {
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockClient struct{}
|
||||
|
||||
func (m *mockClient) SubscribeNewHead(ctx context.Context, ch chan<- *gethTypes.Header) (ethereum.Subscription, error) {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
}
|
||||
|
||||
func (m *mockClient) BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error) {
|
||||
head := &gethTypes.Header{Number: big.NewInt(0), Difficulty: big.NewInt(100)}
|
||||
return gethTypes.NewBlockWithHeader(head), nil
|
||||
}
|
||||
|
||||
func (m *mockClient) BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error) {
|
||||
head := &gethTypes.Header{Number: big.NewInt(0), Difficulty: big.NewInt(100)}
|
||||
return gethTypes.NewBlockWithHeader(head), nil
|
||||
}
|
||||
|
||||
func (m *mockClient) HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error) {
|
||||
return &gethTypes.Header{Number: big.NewInt(0), Difficulty: big.NewInt(100)}, nil
|
||||
}
|
||||
|
||||
func (m *mockClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
}
|
||||
|
||||
func (m *mockClient) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
|
||||
return []byte{'t', 'e', 's', 't'}, nil
|
||||
}
|
||||
|
||||
func (m *mockClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) {
|
||||
return []byte{'t', 'e', 's', 't'}, nil
|
||||
}
|
||||
|
||||
func (m *mockClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]gethTypes.Log, error) {
|
||||
logs := make([]gethTypes.Log, 3)
|
||||
for i := 0; i < len(logs); i++ {
|
||||
logs[i].Address = common.Address{}
|
||||
logs[i].Topics = make([]common.Hash, 5)
|
||||
logs[i].Topics[0] = common.Hash{'a'}
|
||||
logs[i].Topics[1] = common.Hash{'b'}
|
||||
logs[i].Topics[2] = common.Hash{'c'}
|
||||
|
||||
// StateFeed mocks the same method in the beacon node.
|
||||
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
||||
if mbn.stateFeed == nil {
|
||||
mbn.stateFeed = new(event.Feed)
|
||||
}
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
func (m *mockClient) LatestBlockHash() common.Hash {
|
||||
return common.BytesToHash([]byte{'A'})
|
||||
}
|
||||
|
||||
type faultyClient struct{}
|
||||
|
||||
func (f *faultyClient) SubscribeNewHead(ctx context.Context, ch chan<- *gethTypes.Header) (ethereum.Subscription, error) {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
}
|
||||
|
||||
func (f *faultyClient) BlockByHash(ctx context.Context, hash common.Hash) (*gethTypes.Block, error) {
|
||||
return nil, errors.New("failed")
|
||||
}
|
||||
|
||||
func (f *faultyClient) BlockByNumber(ctx context.Context, number *big.Int) (*gethTypes.Block, error) {
|
||||
return nil, errors.New("failed")
|
||||
}
|
||||
|
||||
func (f *faultyClient) HeaderByNumber(ctx context.Context, number *big.Int) (*gethTypes.Header, error) {
|
||||
return nil, errors.New("failed")
|
||||
}
|
||||
|
||||
func (f *faultyClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- gethTypes.Log) (ethereum.Subscription, error) {
|
||||
return new(event.Feed).Subscribe(ch), nil
|
||||
}
|
||||
|
||||
func (f *faultyClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]gethTypes.Log, error) {
|
||||
return nil, errors.New("unable to retrieve logs")
|
||||
}
|
||||
|
||||
func (f *faultyClient) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
|
||||
return []byte{}, errors.New("unable to retrieve contract code")
|
||||
}
|
||||
|
||||
func (f *faultyClient) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) {
|
||||
return []byte{}, errors.New("unable to retrieve contract code")
|
||||
}
|
||||
|
||||
func (f *faultyClient) LatestBlockHash() common.Hash {
|
||||
return common.BytesToHash([]byte{'A'})
|
||||
return mbn.stateFeed
|
||||
}
|
||||
|
||||
type mockBroadcaster struct {
|
||||
@@ -182,24 +100,32 @@ func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
|
||||
var _ = p2p.Broadcaster(&mockBroadcaster{})
|
||||
|
||||
func setupGenesisBlock(t *testing.T, cs *Service) ([32]byte, *ethpb.BeaconBlock) {
|
||||
genesis := b.NewGenesisBlock([]byte{})
|
||||
if err := cs.beaconDB.SaveBlock(context.Background(), genesis); err != nil {
|
||||
t.Fatalf("could not save block to db: %v", err)
|
||||
}
|
||||
parentHash, err := ssz.SigningRoot(genesis)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get tree hash root of canonical head: %v", err)
|
||||
}
|
||||
return parentHash, genesis
|
||||
}
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
endpoint := "ws://127.0.0.1"
|
||||
ctx := context.Background()
|
||||
var web3Service *powchain.Service
|
||||
var err error
|
||||
bState, _ := testutil.DeterministicGenesisState(t, 10)
|
||||
err = beaconDB.SavePowchainData(ctx, &protodb.ETH1ChainData{
|
||||
BeaconState: bState.InnerStateUnsafe(),
|
||||
Trie: &protodb.SparseMerkleTrie{},
|
||||
CurrentEth1Data: &protodb.LatestETH1Data{
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
ChainstartData: &protodb.ChainStartData{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
DepositCount: 0,
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
DepositContainers: []*protodb.DepositContainer{},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
|
||||
BeaconDB: beaconDB,
|
||||
ETH1Endpoint: endpoint,
|
||||
DepositContract: common.Address{},
|
||||
})
|
||||
@@ -207,52 +133,76 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
t.Fatalf("unable to set up web3 service: %v", err)
|
||||
}
|
||||
|
||||
opsService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cfg := &Config{
|
||||
BeaconBlockBuf: 0,
|
||||
BeaconDB: beaconDB,
|
||||
DepositCache: depositcache.NewDepositCache(),
|
||||
ChainStartFetcher: web3Service,
|
||||
OpsPoolService: &mockOperationService{},
|
||||
P2p: &mockBroadcaster{},
|
||||
StateNotifier: &mockBeaconNode{},
|
||||
AttPool: attestations.NewPool(),
|
||||
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
|
||||
OpsService: opsService,
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("could not register blockchain service: %v", err)
|
||||
}
|
||||
|
||||
chainService, err := NewService(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to setup chain service: %v", err)
|
||||
}
|
||||
chainService.genesisTime = time.Unix(1, 0) // non-zero time
|
||||
|
||||
return chainService
|
||||
}
|
||||
|
||||
func TestChainStartStop_Uninitialized(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
hook := logTest.NewGlobal()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
// Test the start function.
|
||||
genesisChan := make(chan time.Time, 0)
|
||||
sub := chainService.stateInitializedFeed.Subscribe(genesisChan)
|
||||
defer sub.Unsubscribe()
|
||||
// Listen for state events.
|
||||
stateSubChannel := make(chan *feed.Event, 1)
|
||||
stateSub := chainService.stateNotifier.StateFeed().Subscribe(stateSubChannel)
|
||||
|
||||
// Test the chain start state notifier.
|
||||
genesisTime := time.Unix(1, 0)
|
||||
chainService.Start()
|
||||
chainService.chainStartChan <- time.Unix(0, 0)
|
||||
genesisTime := <-genesisChan
|
||||
if genesisTime != time.Unix(0, 0) {
|
||||
t.Errorf(
|
||||
"Expected genesis time to equal chainstart time (%v), received %v",
|
||||
time.Unix(0, 0),
|
||||
genesisTime,
|
||||
)
|
||||
event := &feed.Event{
|
||||
Type: statefeed.ChainStarted,
|
||||
Data: &statefeed.ChainStartedData{
|
||||
StartTime: genesisTime,
|
||||
},
|
||||
}
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 1; sent == 1; {
|
||||
sent = chainService.stateNotifier.StateFeed().Send(event)
|
||||
if sent == 1 {
|
||||
// Flush our local subscriber.
|
||||
<-stateSubChannel
|
||||
}
|
||||
}
|
||||
|
||||
// Now wait for notification the state is ready.
|
||||
for stateInitialized := false; stateInitialized == false; {
|
||||
recv := <-stateSubChannel
|
||||
if recv.Type == statefeed.Initialized {
|
||||
stateInitialized = true
|
||||
}
|
||||
}
|
||||
stateSub.Unsubscribe()
|
||||
|
||||
beaconState, err := db.HeadState(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if beaconState == nil || beaconState.Slot != 0 {
|
||||
if beaconState == nil || beaconState.Slot() != 0 {
|
||||
t.Error("Expected canonical state feed to send a state with genesis block")
|
||||
}
|
||||
if err := chainService.Stop(); err != nil {
|
||||
@@ -263,7 +213,7 @@ func TestChainStartStop_Uninitialized(t *testing.T) {
|
||||
t.Error("Context was not canceled")
|
||||
}
|
||||
testutil.AssertLogsContain(t, hook, "Waiting")
|
||||
testutil.AssertLogsContain(t, hook, "Genesis time reached")
|
||||
testutil.AssertLogsContain(t, hook, "Initialized beacon chain genesis state")
|
||||
}
|
||||
|
||||
func TestChainStartStop_Initialized(t *testing.T) {
|
||||
@@ -275,22 +225,26 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
chainService := setupBeaconChain(t, db)
|
||||
|
||||
genesisBlk := b.NewGenesisBlock([]byte{})
|
||||
blkRoot, err := ssz.SigningRoot(genesisBlk)
|
||||
blkRoot, err := ssz.HashTreeRoot(genesisBlk.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveBlock(ctx, genesisBlk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: 1})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, s, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveHeadBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveGenesisBlockRoot(ctx, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveState(ctx, &pb.BeaconState{Slot: 1}, blkRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: blkRoot[:]}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -315,33 +269,57 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
bc := setupBeaconChain(t, db)
|
||||
var err error
|
||||
|
||||
// Set up 10 deposits pre chain start for validators to register
|
||||
count := uint64(10)
|
||||
deposits, _, _ := testutil.SetupInitialDeposits(t, count)
|
||||
if err := bc.initializeBeaconChain(ctx, time.Unix(0, 0), deposits, ðpb.Eth1Data{}); err != nil {
|
||||
deposits, _, _ := testutil.DeterministicDepositsAndKeys(count)
|
||||
trie, _, err := testutil.DepositTrieFromDeposits(deposits)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hashTreeRoot := trie.HashTreeRoot()
|
||||
genState, err := state.EmptyGenesisState()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
genState.SetEth1Data(ðpb.Eth1Data{
|
||||
DepositRoot: hashTreeRoot[:],
|
||||
DepositCount: uint64(len(deposits)),
|
||||
})
|
||||
genState, err = b.ProcessDeposits(ctx, genState, ðpb.BeaconBlockBody{Deposits: deposits})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := bc.initializeBeaconChain(ctx, time.Unix(0, 0), genState, ðpb.Eth1Data{
|
||||
DepositRoot: hashTreeRoot[:],
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s, err := bc.beaconDB.State(ctx, bytesutil.ToBytes32(bc.canonicalRoots[0]))
|
||||
s, err := bc.beaconDB.State(ctx, bc.headRoot())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, v := range s.Validators {
|
||||
if !db.HasValidatorIndex(ctx, bytesutil.ToBytes48(v.PublicKey)) {
|
||||
for _, v := range s.Validators() {
|
||||
if !db.HasValidatorIndex(ctx, v.PublicKey) {
|
||||
t.Errorf("Validator %s missing from db", hex.EncodeToString(v.PublicKey))
|
||||
}
|
||||
}
|
||||
|
||||
if bc.HeadState() == nil {
|
||||
if _, err := bc.HeadState(ctx); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
headBlk, err := bc.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if headBlk == nil {
|
||||
t.Error("Head state can't be nil after initialize beacon chain")
|
||||
}
|
||||
if bc.HeadBlock() == nil {
|
||||
t.Error("Head state can't be nil after initialize beacon chain")
|
||||
}
|
||||
if bc.CanonicalRoot(0) == nil {
|
||||
t.Error("Canonical root for slot 0 can't be nil after initialize beacon chain")
|
||||
if bc.headRoot() == params.BeaconConfig().ZeroHash {
|
||||
t.Error("Canonical root for slot 0 can't be zeros after initialize beacon chain")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -351,7 +329,7 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
genesis := b.NewGenesisBlock([]byte{})
|
||||
genesisRoot, err := ssz.SigningRoot(genesis)
|
||||
genesisRoot, err := ssz.HashTreeRoot(genesis.Block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -363,9 +341,12 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := ðpb.BeaconBlock{Slot: finalizedSlot, ParentRoot: genesisRoot[:]}
|
||||
headState := &pb.BeaconState{Slot: finalizedSlot}
|
||||
headRoot, _ := ssz.SigningRoot(headBlock)
|
||||
headBlock := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: finalizedSlot, ParentRoot: genesisRoot[:]}}
|
||||
headState, err := beaconstate.InitializeFromProto(&pb.BeaconState{Slot: finalizedSlot})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
headRoot, _ := ssz.HashTreeRoot(headBlock.Block)
|
||||
if err := db.SaveState(ctx, headState, headRoot); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -381,20 +362,181 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
if err := db.SaveBlock(ctx, headBlock); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c := &Service{beaconDB: db, canonicalRoots: make(map[uint64][]byte)}
|
||||
c := &Service{beaconDB: db}
|
||||
if err := c.initializeChainInfo(ctx); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(c.HeadBlock(), headBlock) {
|
||||
headBlk, err := c.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(headBlk, headBlock) {
|
||||
t.Error("head block incorrect")
|
||||
}
|
||||
if !reflect.DeepEqual(c.HeadState(), headState) {
|
||||
t.Error("head block incorrect")
|
||||
s, err := c.HeadState(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if headBlock.Slot != c.HeadSlot() {
|
||||
if !reflect.DeepEqual(s.InnerStateUnsafe(), headState.InnerStateUnsafe()) {
|
||||
t.Error("head state incorrect")
|
||||
}
|
||||
if headBlock.Block.Slot != c.HeadSlot() {
|
||||
t.Error("head slot incorrect")
|
||||
}
|
||||
if !bytes.Equal(headRoot[:], c.HeadRoot()) {
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(headRoot[:], r) {
|
||||
t.Error("head slot incorrect")
|
||||
}
|
||||
if c.genesisRoot != genesisRoot {
|
||||
t.Error("genesis block root incorrect")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
}
|
||||
b := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: 1}}
|
||||
r, _ := ssz.HashTreeRoot(b)
|
||||
state := &pb.BeaconState{}
|
||||
newState, err := beaconstate.InitializeFromProto(state)
|
||||
s.beaconDB.SaveState(ctx, newState, r)
|
||||
if err := s.saveHeadNoDB(ctx, b, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newB, err := s.beaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if reflect.DeepEqual(newB, b) {
|
||||
t.Error("head block should not be equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestChainService_PruneOldStates(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
block := ðpb.BeaconBlock{Slot: uint64(i)}
|
||||
if err := s.beaconDB.SaveBlock(ctx, ðpb.SignedBeaconBlock{Block: block}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, err := ssz.HashTreeRoot(block)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
state := &pb.BeaconState{Slot: uint64(i)}
|
||||
newState, err := beaconstate.InitializeFromProto(state)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := s.beaconDB.SaveState(ctx, newState, r); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete half of the states.
|
||||
if err := s.pruneGarbageState(ctx, 50); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
filter := filters.NewFilter().SetStartSlot(1).SetEndSlot(100)
|
||||
roots, err := s.beaconDB.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 1; i < 50; i++ {
|
||||
s, err := s.beaconDB.State(ctx, roots[i])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if s != nil {
|
||||
t.Errorf("wanted nil for slot %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
defer testDB.TeardownDB(t, db)
|
||||
s := &Service{
|
||||
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
finalizedCheckpt: ðpb.Checkpoint{},
|
||||
beaconDB: db,
|
||||
}
|
||||
block := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, _ := ssz.HashTreeRoot(block.Block)
|
||||
bs := &pb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{}}
|
||||
state, _ := beaconstate.InitializeFromProto(bs)
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, block.Block, r, state); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if s.hasBlock(ctx, [32]byte{}) {
|
||||
t.Error("Should not have block")
|
||||
}
|
||||
|
||||
if !s.hasBlock(ctx, r) {
|
||||
t.Error("Should have block")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockDB(b *testing.B) {
|
||||
db := testDB.SetupDB(b)
|
||||
defer testDB.TeardownDB(b, db)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
beaconDB: db,
|
||||
}
|
||||
block := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{}}
|
||||
if err := s.beaconDB.SaveBlock(ctx, block); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
r, _ := ssz.HashTreeRoot(block.Block)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if !s.beaconDB.HasBlock(ctx, r) {
|
||||
b.Fatal("Block is not in DB")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(b)
|
||||
defer testDB.TeardownDB(b, db)
|
||||
s := &Service{
|
||||
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
finalizedCheckpt: ðpb.Checkpoint{},
|
||||
beaconDB: db,
|
||||
}
|
||||
block := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, _ := ssz.HashTreeRoot(block.Block)
|
||||
bs := &pb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{}}
|
||||
state, _ := beaconstate.InitializeFromProto(bs)
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, block.Block, r, state); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if !s.forkChoiceStore.HasNode(r) {
|
||||
b.Fatal("Block is not in fork choice store")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,11 +7,18 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -6,53 +6,131 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
|
||||
opfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ChainService defines the mock interface for testing
|
||||
type ChainService struct {
|
||||
State *pb.BeaconState
|
||||
Root []byte
|
||||
Block *ethpb.BeaconBlock
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
StateFeed *event.Feed
|
||||
BlocksReceived []*ethpb.BeaconBlock
|
||||
Genesis time.Time
|
||||
Fork *pb.Fork
|
||||
DB db.Database
|
||||
State *stateTrie.BeaconState
|
||||
Root []byte
|
||||
Block *ethpb.SignedBeaconBlock
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckPoint *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckPoint *ethpb.Checkpoint
|
||||
BlocksReceived []*ethpb.SignedBeaconBlock
|
||||
Balance *precompute.Balance
|
||||
Genesis time.Time
|
||||
Fork *pb.Fork
|
||||
DB db.Database
|
||||
stateNotifier statefeed.Notifier
|
||||
blockNotifier blockfeed.Notifier
|
||||
opNotifier opfeed.Notifier
|
||||
ValidAttestation bool
|
||||
}
|
||||
|
||||
// StateNotifier mocks the same method in the chain service.
|
||||
func (ms *ChainService) StateNotifier() statefeed.Notifier {
|
||||
if ms.stateNotifier == nil {
|
||||
ms.stateNotifier = &MockStateNotifier{}
|
||||
}
|
||||
return ms.stateNotifier
|
||||
}
|
||||
|
||||
// BlockNotifier mocks the same method in the chain service.
|
||||
func (ms *ChainService) BlockNotifier() blockfeed.Notifier {
|
||||
if ms.blockNotifier == nil {
|
||||
ms.blockNotifier = &MockBlockNotifier{}
|
||||
}
|
||||
return ms.blockNotifier
|
||||
}
|
||||
|
||||
// MockBlockNotifier mocks the block notifier.
|
||||
type MockBlockNotifier struct {
|
||||
feed *event.Feed
|
||||
}
|
||||
|
||||
// BlockFeed returns a block feed.
|
||||
func (msn *MockBlockNotifier) BlockFeed() *event.Feed {
|
||||
if msn.feed == nil {
|
||||
msn.feed = new(event.Feed)
|
||||
}
|
||||
return msn.feed
|
||||
}
|
||||
|
||||
// MockStateNotifier mocks the state notifier.
|
||||
type MockStateNotifier struct {
|
||||
feed *event.Feed
|
||||
}
|
||||
|
||||
// StateFeed returns a state feed.
|
||||
func (msn *MockStateNotifier) StateFeed() *event.Feed {
|
||||
if msn.feed == nil {
|
||||
msn.feed = new(event.Feed)
|
||||
}
|
||||
return msn.feed
|
||||
}
|
||||
|
||||
// OperationNotifier mocks the same method in the chain service.
|
||||
func (ms *ChainService) OperationNotifier() opfeed.Notifier {
|
||||
if ms.opNotifier == nil {
|
||||
ms.opNotifier = &MockOperationNotifier{}
|
||||
}
|
||||
return ms.opNotifier
|
||||
}
|
||||
|
||||
// MockOperationNotifier mocks the operation notifier.
|
||||
type MockOperationNotifier struct {
|
||||
feed *event.Feed
|
||||
}
|
||||
|
||||
// OperationFeed returns an operation feed.
|
||||
func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
|
||||
if mon.feed == nil {
|
||||
mon.feed = new(event.Feed)
|
||||
}
|
||||
return mon.feed
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoVerify mocks ReceiveBlockNoVerify method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (ms *ChainService) ReceiveBlockNoVerify(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoPubsub mocks ReceiveBlockNoPubsub method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (ms *ChainService) ReceiveBlockNoPubsub(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockNoPubsubForkchoice mocks ReceiveBlockNoPubsubForkchoice method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.BeaconBlock) error {
|
||||
func (ms *ChainService) ReceiveBlockNoPubsubForkchoice(ctx context.Context, block *ethpb.SignedBeaconBlock) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &pb.BeaconState{}
|
||||
ms.State = &stateTrie.BeaconState{}
|
||||
}
|
||||
if !bytes.Equal(ms.Root, block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.ParentRoot)
|
||||
if !bytes.Equal(ms.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.Block.ParentRoot)
|
||||
}
|
||||
if err := ms.State.SetSlot(block.Block.Slot); err != nil {
|
||||
return err
|
||||
}
|
||||
ms.State.Slot = block.Slot
|
||||
ms.BlocksReceived = append(ms.BlocksReceived, block)
|
||||
signingRoot, err := ssz.SigningRoot(block)
|
||||
signingRoot, err := ssz.HashTreeRoot(block.Block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -60,7 +138,7 @@ func (ms *ChainService) ReceiveBlockNoPubsubForkchoice(ctx context.Context, bloc
|
||||
if err := ms.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Slot)
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
|
||||
}
|
||||
ms.Root = signingRoot[:]
|
||||
ms.Block = block
|
||||
@@ -69,24 +147,26 @@ func (ms *ChainService) ReceiveBlockNoPubsubForkchoice(ctx context.Context, bloc
|
||||
|
||||
// HeadSlot mocks HeadSlot method in chain service.
|
||||
func (ms *ChainService) HeadSlot() uint64 {
|
||||
return ms.State.Slot
|
||||
|
||||
if ms.State == nil {
|
||||
return 0
|
||||
}
|
||||
return ms.State.Slot()
|
||||
}
|
||||
|
||||
// HeadRoot mocks HeadRoot method in chain service.
|
||||
func (ms *ChainService) HeadRoot() []byte {
|
||||
return ms.Root
|
||||
func (ms *ChainService) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
return ms.Root, nil
|
||||
|
||||
}
|
||||
|
||||
// HeadBlock mocks HeadBlock method in chain service.
|
||||
func (ms *ChainService) HeadBlock() *ethpb.BeaconBlock {
|
||||
return ms.Block
|
||||
func (ms *ChainService) HeadBlock(context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
return ms.Block, nil
|
||||
}
|
||||
|
||||
// HeadState mocks HeadState method in chain service.
|
||||
func (ms *ChainService) HeadState() *pb.BeaconState {
|
||||
return ms.State
|
||||
func (ms *ChainService) HeadState(context.Context) (*stateTrie.BeaconState, error) {
|
||||
return ms.State, nil
|
||||
}
|
||||
|
||||
// CurrentFork mocks HeadState method in chain service.
|
||||
@@ -99,6 +179,16 @@ func (ms *ChainService) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.FinalizedCheckPoint
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt mocks CurrentJustifiedCheckpt method in chain service.
|
||||
func (ms *ChainService) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.CurrentJustifiedCheckPoint
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt mocks PreviousJustifiedCheckpt method in chain service.
|
||||
func (ms *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.PreviousJustifiedCheckPoint
|
||||
}
|
||||
|
||||
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
|
||||
func (ms *ChainService) ReceiveAttestation(context.Context, *ethpb.Attestation) error {
|
||||
return nil
|
||||
@@ -109,21 +199,38 @@ func (ms *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attes
|
||||
return nil
|
||||
}
|
||||
|
||||
// StateInitializedFeed mocks the same method in the chain service.
|
||||
func (ms *ChainService) StateInitializedFeed() *event.Feed {
|
||||
if ms.StateFeed != nil {
|
||||
return ms.StateFeed
|
||||
// HeadValidatorsIndices mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadValidatorsIndices(epoch uint64) ([]uint64, error) {
|
||||
if ms.State == nil {
|
||||
return []uint64{}, nil
|
||||
}
|
||||
ms.StateFeed = new(event.Feed)
|
||||
return ms.StateFeed
|
||||
return helpers.ActiveValidatorIndices(ms.State, epoch)
|
||||
}
|
||||
|
||||
// HeadUpdatedFeed mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadUpdatedFeed() *event.Feed {
|
||||
return new(event.Feed)
|
||||
// HeadSeed mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadSeed(epoch uint64) ([32]byte, error) {
|
||||
return helpers.Seed(ms.State, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
// GenesisTime mocks the same method in the chain service.
|
||||
func (ms *ChainService) GenesisTime() time.Time {
|
||||
return ms.Genesis
|
||||
}
|
||||
|
||||
// CurrentSlot mocks the same method in the chain service.
|
||||
func (ms *ChainService) CurrentSlot() uint64 {
|
||||
return ms.HeadSlot()
|
||||
}
|
||||
|
||||
// Participation mocks the same method in the chain service.
|
||||
func (ms *ChainService) Participation(epoch uint64) *precompute.Balance {
|
||||
return ms.Balance
|
||||
}
|
||||
|
||||
// IsValidAttestation always returns true.
|
||||
func (ms *ChainService) IsValidAttestation(ctx context.Context, att *ethpb.Attestation) bool {
|
||||
return ms.ValidAttestation
|
||||
}
|
||||
|
||||
// ClearCachedStates does nothing.
|
||||
func (ms *ChainService) ClearCachedStates() {}
|
||||
|
||||
25
beacon-chain/cache/BUILD.bazel
vendored
25
beacon-chain/cache/BUILD.bazel
vendored
@@ -3,27 +3,27 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"active_count.go",
|
||||
"active_indices.go",
|
||||
"attestation_data.go",
|
||||
"checkpoint_state.go",
|
||||
"committee.go",
|
||||
"committee_ids.go",
|
||||
"common.go",
|
||||
"eth1_data.go",
|
||||
"hot_state_cache.go",
|
||||
"skip_slot_cache.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/beacon/rpc/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -32,24 +32,25 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"active_count_test.go",
|
||||
"active_indices_test.go",
|
||||
"attestation_data_test.go",
|
||||
"benchmarks_test.go",
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"eth1_data_test.go",
|
||||
"feature_flag_test.go",
|
||||
"hot_state_cache_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
race = "on",
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/beacon/rpc/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
102
beacon-chain/cache/active_count.go
vendored
102
beacon-chain/cache/active_count.go
vendored
@@ -1,102 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotActiveCountInfo will be returned when a cache object is not a pointer to
|
||||
// a ActiveCountByEpoch struct.
|
||||
ErrNotActiveCountInfo = errors.New("object is not a active count obj")
|
||||
|
||||
// maxActiveCountListSize defines the max number of active count can cache.
|
||||
maxActiveCountListSize = 1000
|
||||
|
||||
// Metrics.
|
||||
activeCountCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "active_validator_count_cache_miss",
|
||||
Help: "The number of active validator count requests that aren't present in the cache.",
|
||||
})
|
||||
activeCountCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "active_validator_count_cache_hit",
|
||||
Help: "The number of active validator count requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// ActiveCountByEpoch defines the active validator count per epoch.
|
||||
type ActiveCountByEpoch struct {
|
||||
Epoch uint64
|
||||
ActiveCount uint64
|
||||
}
|
||||
|
||||
// ActiveCountCache is a struct with 1 queue for looking up active count by epoch.
|
||||
type ActiveCountCache struct {
|
||||
activeCountCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// activeCountKeyFn takes the epoch as the key for the active count of a given epoch.
|
||||
func activeCountKeyFn(obj interface{}) (string, error) {
|
||||
aInfo, ok := obj.(*ActiveCountByEpoch)
|
||||
if !ok {
|
||||
return "", ErrNotActiveCountInfo
|
||||
}
|
||||
|
||||
return strconv.Itoa(int(aInfo.Epoch)), nil
|
||||
}
|
||||
|
||||
// NewActiveCountCache creates a new active count cache for storing/accessing active validator count.
|
||||
func NewActiveCountCache() *ActiveCountCache {
|
||||
return &ActiveCountCache{
|
||||
activeCountCache: cache.NewFIFO(activeCountKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// ActiveCountInEpoch fetches ActiveCountByEpoch by epoch. Returns true with a
|
||||
// reference to the ActiveCountInEpoch info, if exists. Otherwise returns false, nil.
|
||||
func (c *ActiveCountCache) ActiveCountInEpoch(epoch uint64) (uint64, error) {
|
||||
if !featureconfig.Get().EnableActiveCountCache {
|
||||
return params.BeaconConfig().FarFutureEpoch, nil
|
||||
}
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.activeCountCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
if err != nil {
|
||||
return params.BeaconConfig().FarFutureEpoch, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
activeCountCacheHit.Inc()
|
||||
} else {
|
||||
activeCountCacheMiss.Inc()
|
||||
return params.BeaconConfig().FarFutureEpoch, nil
|
||||
}
|
||||
|
||||
aInfo, ok := obj.(*ActiveCountByEpoch)
|
||||
if !ok {
|
||||
return params.BeaconConfig().FarFutureEpoch, ErrNotActiveCountInfo
|
||||
}
|
||||
|
||||
return aInfo.ActiveCount, nil
|
||||
}
|
||||
|
||||
// AddActiveCount adds ActiveCountByEpoch object to the cache. This method also trims the least
|
||||
// recently added ActiveCountByEpoch object if the cache size has ready the max cache size limit.
|
||||
func (c *ActiveCountCache) AddActiveCount(activeCount *ActiveCountByEpoch) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.activeCountCache.AddIfNotPresent(activeCount); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(c.activeCountCache, maxActiveCountListSize)
|
||||
return nil
|
||||
}
|
||||
83
beacon-chain/cache/active_count_test.go
vendored
83
beacon-chain/cache/active_count_test.go
vendored
@@ -1,83 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func TestActiveCountKeyFn_OK(t *testing.T) {
|
||||
aInfo := &ActiveCountByEpoch{
|
||||
Epoch: 999,
|
||||
ActiveCount: 10,
|
||||
}
|
||||
|
||||
key, err := activeCountKeyFn(aInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != strconv.Itoa(int(aInfo.Epoch)) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, strconv.Itoa(int(aInfo.Epoch)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveCountKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := activeCountKeyFn("bad")
|
||||
if err != ErrNotActiveCountInfo {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotActiveCountInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveCountCache_ActiveCountByEpoch(t *testing.T) {
|
||||
cache := NewActiveCountCache()
|
||||
|
||||
aInfo := &ActiveCountByEpoch{
|
||||
Epoch: 99,
|
||||
ActiveCount: 11,
|
||||
}
|
||||
activeCount, err := cache.ActiveCountInEpoch(aInfo.Epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if activeCount != params.BeaconConfig().FarFutureEpoch {
|
||||
t.Error("Expected active count not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddActiveCount(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
activeCount, err = cache.ActiveCountInEpoch(aInfo.Epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(activeCount, aInfo.ActiveCount) {
|
||||
t.Errorf(
|
||||
"Expected fetched active count to be %v, got %v",
|
||||
aInfo.ActiveCount,
|
||||
activeCount,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveCount_MaxSize(t *testing.T) {
|
||||
cache := NewActiveCountCache()
|
||||
|
||||
for i := uint64(0); i < 1001; i++ {
|
||||
aInfo := &ActiveCountByEpoch{
|
||||
Epoch: i,
|
||||
}
|
||||
if err := cache.AddActiveCount(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.activeCountCache.ListKeys()) != maxActiveCountListSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxActiveCountListSize,
|
||||
len(cache.activeCountCache.ListKeys()),
|
||||
)
|
||||
}
|
||||
}
|
||||
106
beacon-chain/cache/active_indices.go
vendored
106
beacon-chain/cache/active_indices.go
vendored
@@ -1,106 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotActiveIndicesInfo will be returned when a cache object is not a pointer to
|
||||
// a ActiveIndicesByEpoch struct.
|
||||
ErrNotActiveIndicesInfo = errors.New("object is not a active indices list")
|
||||
|
||||
// maxActiveIndicesListSize defines the max number of active indices can cache.
|
||||
maxActiveIndicesListSize = 4
|
||||
|
||||
// Metrics.
|
||||
activeIndicesCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "active_validator_indices_cache_miss",
|
||||
Help: "The number of active validator indices requests that aren't present in the cache.",
|
||||
})
|
||||
activeIndicesCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "active_validator_indices_cache_hit",
|
||||
Help: "The number of active validator indices requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// ActiveIndicesByEpoch defines the active validator indices per epoch.
|
||||
type ActiveIndicesByEpoch struct {
|
||||
Epoch uint64
|
||||
ActiveIndices []uint64
|
||||
}
|
||||
|
||||
// ActiveIndicesCache is a struct with 1 queue for looking up active indices by epoch.
|
||||
type ActiveIndicesCache struct {
|
||||
activeIndicesCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// activeIndicesKeyFn takes the epoch as the key for the active indices of a given epoch.
|
||||
func activeIndicesKeyFn(obj interface{}) (string, error) {
|
||||
aInfo, ok := obj.(*ActiveIndicesByEpoch)
|
||||
if !ok {
|
||||
return "", ErrNotActiveIndicesInfo
|
||||
}
|
||||
|
||||
return strconv.Itoa(int(aInfo.Epoch)), nil
|
||||
}
|
||||
|
||||
// NewActiveIndicesCache creates a new active indices cache for storing/accessing active validator indices.
|
||||
func NewActiveIndicesCache() *ActiveIndicesCache {
|
||||
return &ActiveIndicesCache{
|
||||
activeIndicesCache: cache.NewFIFO(activeIndicesKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// ActiveIndicesInEpoch fetches ActiveIndicesByEpoch by epoch. Returns true with a
|
||||
// reference to the ActiveIndicesInEpoch info, if exists. Otherwise returns false, nil.
|
||||
func (c *ActiveIndicesCache) ActiveIndicesInEpoch(epoch uint64) ([]uint64, error) {
|
||||
if !featureconfig.Get().EnableActiveIndicesCache {
|
||||
return nil, nil
|
||||
}
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.activeIndicesCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
activeIndicesCacheHit.Inc()
|
||||
} else {
|
||||
activeIndicesCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
aInfo, ok := obj.(*ActiveIndicesByEpoch)
|
||||
if !ok {
|
||||
return nil, ErrNotActiveIndicesInfo
|
||||
}
|
||||
|
||||
return aInfo.ActiveIndices, nil
|
||||
}
|
||||
|
||||
// AddActiveIndicesList adds ActiveIndicesByEpoch object to the cache. This method also trims the least
|
||||
// recently added ActiveIndicesByEpoch object if the cache size has ready the max cache size limit.
|
||||
func (c *ActiveIndicesCache) AddActiveIndicesList(activeIndices *ActiveIndicesByEpoch) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.activeIndicesCache.AddIfNotPresent(activeIndices); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
trim(c.activeIndicesCache, maxActiveIndicesListSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveIndicesKeys returns the keys of the active indices cache.
|
||||
func (c *ActiveIndicesCache) ActiveIndicesKeys() []string {
|
||||
return c.activeIndicesCache.ListKeys()
|
||||
}
|
||||
82
beacon-chain/cache/active_indices_test.go
vendored
82
beacon-chain/cache/active_indices_test.go
vendored
@@ -1,82 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestActiveIndicesKeyFn_OK(t *testing.T) {
|
||||
aInfo := &ActiveIndicesByEpoch{
|
||||
Epoch: 999,
|
||||
ActiveIndices: []uint64{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
key, err := activeIndicesKeyFn(aInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != strconv.Itoa(int(aInfo.Epoch)) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, strconv.Itoa(int(aInfo.Epoch)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveIndicesKeyFn_InvalidObj(t *testing.T) {
|
||||
_, err := activeIndicesKeyFn("bad")
|
||||
if err != ErrNotActiveIndicesInfo {
|
||||
t.Errorf("Expected error %v, got %v", ErrNotActiveIndicesInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveIndicesCache_ActiveIndicesByEpoch(t *testing.T) {
|
||||
cache := NewActiveIndicesCache()
|
||||
|
||||
aInfo := &ActiveIndicesByEpoch{
|
||||
Epoch: 99,
|
||||
ActiveIndices: []uint64{1, 2, 3, 4},
|
||||
}
|
||||
|
||||
activeIndices, err := cache.ActiveIndicesInEpoch(aInfo.Epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if activeIndices != nil {
|
||||
t.Error("Expected active indices not to exist in empty cache")
|
||||
}
|
||||
|
||||
if err := cache.AddActiveIndicesList(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
activeIndices, err = cache.ActiveIndicesInEpoch(aInfo.Epoch)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(activeIndices, aInfo.ActiveIndices) {
|
||||
t.Errorf(
|
||||
"Expected fetched active indices to be %v, got %v",
|
||||
aInfo.ActiveIndices,
|
||||
activeIndices,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestActiveIndices_MaxSize(t *testing.T) {
|
||||
cache := NewActiveIndicesCache()
|
||||
|
||||
for i := uint64(0); i < 100; i++ {
|
||||
aInfo := &ActiveIndicesByEpoch{
|
||||
Epoch: i,
|
||||
}
|
||||
if err := cache.AddActiveIndicesList(aInfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.activeIndicesCache.ListKeys()) != maxActiveIndicesListSize {
|
||||
t.Errorf(
|
||||
"Expected hash cache key size to be %d, got %d",
|
||||
maxActiveIndicesListSize,
|
||||
len(cache.activeIndicesCache.ListKeys()),
|
||||
)
|
||||
}
|
||||
}
|
||||
38
beacon-chain/cache/attestation_data.go
vendored
38
beacon-chain/cache/attestation_data.go
vendored
@@ -10,9 +10,7 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
@@ -59,13 +57,7 @@ func NewAttestationCache() *AttestationCache {
|
||||
|
||||
// Get waits for any in progress calculation to complete before returning a
|
||||
// cached response, if any.
|
||||
func (c *AttestationCache) Get(ctx context.Context, req *pb.AttestationRequest) (*ethpb.AttestationData, error) {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
// Return a miss result if cache is not enabled.
|
||||
attestationCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *AttestationCache) Get(ctx context.Context, req *ethpb.AttestationDataRequest) (*ethpb.AttestationData, error) {
|
||||
if req == nil {
|
||||
return nil, errors.New("nil attestation data request")
|
||||
}
|
||||
@@ -113,11 +105,7 @@ func (c *AttestationCache) Get(ctx context.Context, req *pb.AttestationRequest)
|
||||
|
||||
// MarkInProgress a request so that any other similar requests will block on
|
||||
// Get until MarkNotInProgress is called.
|
||||
func (c *AttestationCache) MarkInProgress(req *pb.AttestationRequest) error {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AttestationCache) MarkInProgress(req *ethpb.AttestationDataRequest) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
s, e := reqToKey(req)
|
||||
@@ -127,19 +115,13 @@ func (c *AttestationCache) MarkInProgress(req *pb.AttestationRequest) error {
|
||||
if c.inProgress[s] {
|
||||
return ErrAlreadyInProgress
|
||||
}
|
||||
if featureconfig.Get().EnableAttestationCache {
|
||||
c.inProgress[s] = true
|
||||
}
|
||||
c.inProgress[s] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkNotInProgress will release the lock on a given request. This should be
|
||||
// called after put.
|
||||
func (c *AttestationCache) MarkNotInProgress(req *pb.AttestationRequest) error {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AttestationCache) MarkNotInProgress(req *ethpb.AttestationDataRequest) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
s, e := reqToKey(req)
|
||||
@@ -151,11 +133,7 @@ func (c *AttestationCache) MarkNotInProgress(req *pb.AttestationRequest) error {
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *AttestationCache) Put(ctx context.Context, req *pb.AttestationRequest, res *ethpb.AttestationData) error {
|
||||
if !featureconfig.Get().EnableAttestationCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *AttestationCache) Put(ctx context.Context, req *ethpb.AttestationDataRequest, res *ethpb.AttestationData) error {
|
||||
data := &attestationReqResWrapper{
|
||||
req,
|
||||
res,
|
||||
@@ -180,11 +158,11 @@ func wrapperToKey(i interface{}) (string, error) {
|
||||
return reqToKey(w.req)
|
||||
}
|
||||
|
||||
func reqToKey(req *pb.AttestationRequest) (string, error) {
|
||||
func reqToKey(req *ethpb.AttestationDataRequest) (string, error) {
|
||||
return fmt.Sprintf("%d-%d", req.CommitteeIndex, req.Slot), nil
|
||||
}
|
||||
|
||||
type attestationReqResWrapper struct {
|
||||
req *pb.AttestationRequest
|
||||
req *ethpb.AttestationDataRequest
|
||||
res *ethpb.AttestationData
|
||||
}
|
||||
|
||||
5
beacon-chain/cache/attestation_data_test.go
vendored
5
beacon-chain/cache/attestation_data_test.go
vendored
@@ -5,16 +5,15 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
)
|
||||
|
||||
func TestAttestationCache_RoundTrip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := cache.NewAttestationCache()
|
||||
|
||||
req := &pb.AttestationRequest{
|
||||
req := ðpb.AttestationDataRequest{
|
||||
CommitteeIndex: 0,
|
||||
Slot: 1,
|
||||
}
|
||||
|
||||
45
beacon-chain/cache/benchmarks_test.go
vendored
45
beacon-chain/cache/benchmarks_test.go
vendored
@@ -1,45 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
var indices300k = createIndices(300000)
|
||||
var epoch = uint64(1)
|
||||
|
||||
func createIndices(count int) *ActiveIndicesByEpoch {
|
||||
indices := make([]uint64, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
indices = append(indices, uint64(i))
|
||||
}
|
||||
return &ActiveIndicesByEpoch{
|
||||
Epoch: epoch,
|
||||
ActiveIndices: indices,
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCachingAddRetrieve(b *testing.B) {
|
||||
|
||||
c := NewActiveIndicesCache()
|
||||
|
||||
b.Run("ADD300K", func(b *testing.B) {
|
||||
b.N = 10
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if err := c.AddActiveIndicesList(indices300k); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("RETR300K", func(b *testing.B) {
|
||||
b.N = 10
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := c.ActiveIndicesInEpoch(epoch); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
20
beacon-chain/cache/checkpoint_state.go
vendored
20
beacon-chain/cache/checkpoint_state.go
vendored
@@ -4,11 +4,10 @@ import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
@@ -19,7 +18,9 @@ var (
|
||||
ErrNotCheckpointState = errors.New("object is not a state by check point struct")
|
||||
|
||||
// maxCheckpointStateSize defines the max number of entries check point to state cache can contain.
|
||||
maxCheckpointStateSize = 4
|
||||
// Choosing 10 to account for multiple forks, this allows 5 forks per epoch boundary with 2 epochs
|
||||
// window to accept attestation based on latest spec.
|
||||
maxCheckpointStateSize = 10
|
||||
|
||||
// Metrics.
|
||||
checkpointStateMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
@@ -35,7 +36,7 @@ var (
|
||||
// CheckpointState defines the active validator indices per epoch.
|
||||
type CheckpointState struct {
|
||||
Checkpoint *ethpb.Checkpoint
|
||||
State *pb.BeaconState
|
||||
State *stateTrie.BeaconState
|
||||
}
|
||||
|
||||
// CheckpointStateCache is a struct with 1 queue for looking up state by checkpoint.
|
||||
@@ -67,7 +68,7 @@ func NewCheckpointStateCache() *CheckpointStateCache {
|
||||
|
||||
// StateByCheckpoint fetches state by checkpoint. Returns true with a
|
||||
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*pb.BeaconState, error) {
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
h, err := hashutil.HashProto(cp)
|
||||
@@ -92,7 +93,7 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*pb.Beac
|
||||
return nil, ErrNotCheckpointState
|
||||
}
|
||||
|
||||
return proto.Clone(info.State).(*pb.BeaconState), nil
|
||||
return info.State.Copy(), nil
|
||||
}
|
||||
|
||||
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
|
||||
@@ -100,7 +101,10 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*pb.Beac
|
||||
func (c *CheckpointStateCache) AddCheckpointState(cp *CheckpointState) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.cache.AddIfNotPresent(cp); err != nil {
|
||||
if err := c.cache.AddIfNotPresent(&CheckpointState{
|
||||
Checkpoint: stateTrie.CopyCheckpoint(cp.Checkpoint),
|
||||
State: cp.State.Copy(),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
47
beacon-chain/cache/checkpoint_state_test.go
vendored
47
beacon-chain/cache/checkpoint_state_test.go
vendored
@@ -4,16 +4,23 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
)
|
||||
|
||||
func TestCheckpointStateCacheKeyFn_OK(t *testing.T) {
|
||||
cp := ðpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
|
||||
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: 64,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
info := &CheckpointState{
|
||||
Checkpoint: cp,
|
||||
State: &pb.BeaconState{Slot: 64},
|
||||
State: st,
|
||||
}
|
||||
key, err := checkpointState(info)
|
||||
if err != nil {
|
||||
@@ -39,9 +46,15 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
cache := NewCheckpointStateCache()
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: []byte{'A'}}
|
||||
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: 64,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
info1 := &CheckpointState{
|
||||
Checkpoint: cp1,
|
||||
State: &pb.BeaconState{Slot: 64},
|
||||
State: st,
|
||||
}
|
||||
state, err := cache.StateByCheckpoint(cp1)
|
||||
if err != nil {
|
||||
@@ -58,14 +71,20 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(state, info1.State) {
|
||||
if !reflect.DeepEqual(state.InnerStateUnsafe(), info1.State.InnerStateUnsafe()) {
|
||||
t.Error("incorrectly cached state")
|
||||
}
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: []byte{'B'}}
|
||||
st2, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: 128,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
info2 := &CheckpointState{
|
||||
Checkpoint: cp2,
|
||||
State: &pb.BeaconState{Slot: 128},
|
||||
State: st2,
|
||||
}
|
||||
if err := cache.AddCheckpointState(info2); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -74,7 +93,7 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(state, info2.State) {
|
||||
if !reflect.DeepEqual(state.CloneInnerState(), info2.State.CloneInnerState()) {
|
||||
t.Error("incorrectly cached state")
|
||||
}
|
||||
|
||||
@@ -82,18 +101,26 @@ func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(state, info1.State) {
|
||||
if !reflect.DeepEqual(state.CloneInnerState(), info1.State.CloneInnerState()) {
|
||||
t.Error("incorrectly cached state")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache__MaxSize(t *testing.T) {
|
||||
func TestCheckpointStateCache_MaxSize(t *testing.T) {
|
||||
c := NewCheckpointStateCache()
|
||||
|
||||
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: 0,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i := 0; i < maxCheckpointStateSize+100; i++ {
|
||||
if err := st.SetSlot(uint64(i)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
info := &CheckpointState{
|
||||
Checkpoint: ðpb.Checkpoint{Epoch: uint64(i)},
|
||||
State: &pb.BeaconState{Slot: uint64(i)},
|
||||
State: st,
|
||||
}
|
||||
if err := c.AddCheckpointState(info); err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
193
beacon-chain/cache/committee.go
vendored
193
beacon-chain/cache/committee.go
vendored
@@ -2,12 +2,10 @@ package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -18,9 +16,10 @@ var (
|
||||
// a Committee struct.
|
||||
ErrNotCommittee = errors.New("object is not a committee struct")
|
||||
|
||||
// maxShuffledIndicesSize defines the max number of shuffled indices list can cache.
|
||||
// 3 for previous, current epoch and next epoch.
|
||||
maxShuffledIndicesSize = 3
|
||||
// maxCommitteesCacheSize defines the max number of shuffled committees on per randao basis can cache.
|
||||
// Due to reorgs, it's good to keep the old cache around for quickly switch over. 10 is a generous
|
||||
// cache size as it considers 3 concurrent branches over 3 epochs.
|
||||
maxCommitteesCacheSize = 10
|
||||
|
||||
// CommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
|
||||
CommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
@@ -34,47 +33,45 @@ var (
|
||||
})
|
||||
)
|
||||
|
||||
// Committee defines the committee per epoch and index.
|
||||
type Committee struct {
|
||||
CommitteeCount uint64
|
||||
Epoch uint64
|
||||
Committee []uint64
|
||||
// Committees defines the shuffled committees seed.
|
||||
type Committees struct {
|
||||
CommitteeCount uint64
|
||||
Seed [32]byte
|
||||
ShuffledIndices []uint64
|
||||
SortedIndices []uint64
|
||||
ProposerIndices []uint64
|
||||
}
|
||||
|
||||
// CommitteeCache is a struct with 1 queue for looking up shuffled indices list by epoch and committee index.
|
||||
// CommitteeCache is a struct with 1 queue for looking up shuffled indices list by seed.
|
||||
type CommitteeCache struct {
|
||||
CommitteeCache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// committeeKeyFn takes the epoch as the key to retrieve shuffled indices of a committee in a given epoch.
|
||||
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
|
||||
func committeeKeyFn(obj interface{}) (string, error) {
|
||||
info, ok := obj.(*Committee)
|
||||
info, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return "", ErrNotCommittee
|
||||
}
|
||||
|
||||
return strconv.Itoa(int(info.Epoch)), nil
|
||||
return key(info.Seed), nil
|
||||
}
|
||||
|
||||
// NewCommitteeCache creates a new committee cache for storing/accessing shuffled indices of a committee.
|
||||
func NewCommitteeCache() *CommitteeCache {
|
||||
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
|
||||
func NewCommitteesCache() *CommitteeCache {
|
||||
return &CommitteeCache{
|
||||
CommitteeCache: cache.NewFIFO(committeeKeyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// ShuffledIndices fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
|
||||
func (c *CommitteeCache) ShuffledIndices(slot uint64, index uint64) ([]uint64, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return nil, nil
|
||||
}
|
||||
func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]uint64, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
epoch := int(slot / params.BeaconConfig().SlotsPerEpoch)
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(strconv.Itoa(epoch))
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -86,7 +83,7 @@ func (c *CommitteeCache) ShuffledIndices(slot uint64, index uint64) ([]uint64, e
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committee)
|
||||
item, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return nil, ErrNotCommittee
|
||||
}
|
||||
@@ -98,100 +95,61 @@ func (c *CommitteeCache) ShuffledIndices(slot uint64, index uint64) ([]uint64, e
|
||||
|
||||
indexOffSet := index + (slot%params.BeaconConfig().SlotsPerEpoch)*committeeCountPerSlot
|
||||
start, end := startEndIndices(item, indexOffSet)
|
||||
return item.Committee[start:end], nil
|
||||
|
||||
if int(end) > len(item.ShuffledIndices) || end < start {
|
||||
return nil, errors.New("requested index out of bound")
|
||||
}
|
||||
|
||||
return item.ShuffledIndices[start:end], nil
|
||||
}
|
||||
|
||||
// AddCommitteeShuffledList adds Committee shuffled list object to the cache. T
|
||||
// his method also trims the least recently list if the cache size has ready the max cache size limit.
|
||||
func (c *CommitteeCache) AddCommitteeShuffledList(committee *Committee) error {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return nil
|
||||
}
|
||||
func (c *CommitteeCache) AddCommitteeShuffledList(committees *Committees) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.CommitteeCache.AddIfNotPresent(committee); err != nil {
|
||||
|
||||
if err := c.CommitteeCache.AddIfNotPresent(committees); err != nil {
|
||||
return err
|
||||
}
|
||||
trim(c.CommitteeCache, maxShuffledIndicesSize)
|
||||
trim(c.CommitteeCache, maxCommitteesCacheSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Epochs returns the epochs stored in the committee cache. These are the keys to the cache.
|
||||
func (c *CommitteeCache) Epochs() ([]uint64, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache {
|
||||
return nil, nil
|
||||
}
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
// AddProposerIndicesList updates the committee shuffled list with proposer indices.
|
||||
func (c *CommitteeCache) AddProposerIndicesList(seed [32]byte, indices []uint64) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
epochs := make([]uint64, len(c.CommitteeCache.ListKeys()))
|
||||
for i, s := range c.CommitteeCache.ListKeys() {
|
||||
epoch, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epochs[i] = uint64(epoch)
|
||||
}
|
||||
return epochs, nil
|
||||
}
|
||||
|
||||
// EpochInCache returns true if an input epoch is part of keys in cache.
|
||||
func (c *CommitteeCache) EpochInCache(wantedEpoch uint64) (bool, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return false, nil
|
||||
}
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
for _, s := range c.CommitteeCache.ListKeys() {
|
||||
epoch, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if wantedEpoch == uint64(epoch) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// CommitteeCountPerSlot returns the number of committees in a given slot as stored in cache.
|
||||
func (c *CommitteeCache) CommitteeCountPerSlot(slot uint64) (uint64, bool, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return 0, false, nil
|
||||
}
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
epoch := int(slot / params.BeaconConfig().SlotsPerEpoch)
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
if !exists {
|
||||
committees := &Committees{ProposerIndices: indices}
|
||||
if err := c.CommitteeCache.Add(committees); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
CommitteeCacheMiss.Inc()
|
||||
return 0, false, nil
|
||||
committees, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return ErrNotCommittee
|
||||
}
|
||||
committees.ProposerIndices = indices
|
||||
if err := c.CommitteeCache.Add(committees); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committee)
|
||||
if !ok {
|
||||
return 0, false, ErrNotCommittee
|
||||
}
|
||||
|
||||
return item.CommitteeCount / params.BeaconConfig().SlotsPerEpoch, true, nil
|
||||
trim(c.CommitteeCache, maxCommitteesCacheSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveIndices returns the active indices of a given epoch stored in cache.
|
||||
func (c *CommitteeCache) ActiveIndices(epoch uint64) ([]uint64, error) {
|
||||
if !featureconfig.Get().EnableShuffledIndexCache && !featureconfig.Get().EnableNewCache {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ActiveIndices returns the active indices of a given seed stored in cache.
|
||||
func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(strconv.Itoa(int(epoch)))
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -203,18 +161,49 @@ func (c *CommitteeCache) ActiveIndices(epoch uint64) ([]uint64, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committee)
|
||||
item, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return nil, ErrNotCommittee
|
||||
}
|
||||
|
||||
return item.Committee, nil
|
||||
return item.SortedIndices, nil
|
||||
}
|
||||
|
||||
func startEndIndices(c *Committee, index uint64) (uint64, uint64) {
|
||||
validatorCount := uint64(len(c.Committee))
|
||||
// ProposerIndices returns the proposer indices of a given seed.
|
||||
func (c *CommitteeCache) ProposerIndices(seed [32]byte) ([]uint64, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
CommitteeCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return nil, ErrNotCommittee
|
||||
}
|
||||
|
||||
return item.ProposerIndices, nil
|
||||
}
|
||||
|
||||
func startEndIndices(c *Committees, index uint64) (uint64, uint64) {
|
||||
validatorCount := uint64(len(c.ShuffledIndices))
|
||||
start := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index)
|
||||
end := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index+1)
|
||||
|
||||
return start, end
|
||||
}
|
||||
|
||||
// Using seed as source for key to handle reorgs in the same epoch.
|
||||
// The seed is derived from state's array of randao mixes and epoch value
|
||||
// hashed together. This avoids collisions on different validator set. Spec definition:
|
||||
// https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#get_seed
|
||||
func key(seed [32]byte) string {
|
||||
return string(seed[:])
|
||||
}
|
||||
|
||||
68
beacon-chain/cache/committee_fuzz_test.go
vendored
Normal file
68
beacon-chain/cache/committee_fuzz_test.go
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
)
|
||||
|
||||
func TestCommitteeKeyFuzz_OK(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(c)
|
||||
k, err := committeeKeyFn(c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if k != key(c.Seed) {
|
||||
t.Errorf("Incorrect hash k: %s, expected %s", k, key(c.Seed))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(c)
|
||||
if err := cache.AddCommitteeShuffledList(c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := cache.Committee(0, c.Seed, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.CommitteeCache.ListKeys()) != maxCommitteesCacheSize {
|
||||
t.Error("Incorrect key size")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(c)
|
||||
if err := cache.AddCommitteeShuffledList(c); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
indices, err := cache.ActiveIndices(c.Seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(indices, c.SortedIndices) {
|
||||
t.Error("Saved indices not the same")
|
||||
}
|
||||
}
|
||||
|
||||
if len(cache.CommitteeCache.ListKeys()) != maxCommitteesCacheSize {
|
||||
t.Error("Incorrect key size")
|
||||
}
|
||||
}
|
||||
44
beacon-chain/cache/committee_ids.go
vendored
Normal file
44
beacon-chain/cache/committee_ids.go
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
)
|
||||
|
||||
type committeeIDs struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// CommitteeIDs for attestations.
|
||||
var CommitteeIDs = newCommitteeIDs()
|
||||
|
||||
func newCommitteeIDs() *committeeIDs {
|
||||
cache, err := lru.New(8)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &committeeIDs{cache: cache}
|
||||
}
|
||||
|
||||
// AddIDs to the cache for attestation committees by epoch.
|
||||
func (t *committeeIDs) AddIDs(indices []uint64, epoch uint64) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
val, exists := t.cache.Get(epoch)
|
||||
if exists {
|
||||
indices = sliceutil.UnionUint64(append(indices, val.([]uint64)...))
|
||||
}
|
||||
t.cache.Add(epoch, indices)
|
||||
}
|
||||
|
||||
// GetIDs from the cache for attestation committees by epoch.
|
||||
func (t *committeeIDs) GetIDs(epoch uint64) []uint64 {
|
||||
val, exists := t.cache.Get(epoch)
|
||||
if !exists {
|
||||
return []uint64{}
|
||||
}
|
||||
return val.([]uint64)
|
||||
}
|
||||
218
beacon-chain/cache/committee_test.go
vendored
218
beacon-chain/cache/committee_test.go
vendored
@@ -1,26 +1,29 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func TestCommitteeKeyFn_OK(t *testing.T) {
|
||||
item := &Committee{
|
||||
Epoch: 999,
|
||||
CommitteeCount: 1,
|
||||
Committee: []uint64{1, 2, 3, 4, 5},
|
||||
item := &Committees{
|
||||
CommitteeCount: 1,
|
||||
Seed: [32]byte{'A'},
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
key, err := committeeKeyFn(item)
|
||||
k, err := committeeKeyFn(item)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if key != strconv.Itoa(int(item.Epoch)) {
|
||||
t.Errorf("Incorrect hash key: %s, expected %s", key, strconv.Itoa(int(item.Epoch)))
|
||||
if k != key(item.Seed) {
|
||||
t.Errorf("Incorrect hash k: %s, expected %s", k, key(item.Seed))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,17 +35,17 @@ func TestCommitteeKeyFn_InvalidObj(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committee{
|
||||
Epoch: 1,
|
||||
Committee: []uint64{1, 2, 3, 4, 5, 6},
|
||||
CommitteeCount: 3,
|
||||
item := &Committees{
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4, 5, 6},
|
||||
Seed: [32]byte{'A'},
|
||||
CommitteeCount: 3,
|
||||
}
|
||||
|
||||
slot := uint64(item.Epoch * params.BeaconConfig().SlotsPerEpoch)
|
||||
slot := params.BeaconConfig().SlotsPerEpoch
|
||||
committeeIndex := uint64(1)
|
||||
indices, err := cache.ShuffledIndices(slot, committeeIndex)
|
||||
indices, err := cache.Committee(slot, item.Seed, committeeIndex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -54,102 +57,26 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantedIndex := uint64(0)
|
||||
indices, err = cache.ShuffledIndices(slot, wantedIndex)
|
||||
indices, err = cache.Committee(slot, item.Seed, wantedIndex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
start, end := startEndIndices(item, wantedIndex)
|
||||
if !reflect.DeepEqual(indices, item.Committee[start:end]) {
|
||||
if !reflect.DeepEqual(indices, item.ShuffledIndices[start:end]) {
|
||||
t.Errorf(
|
||||
"Expected fetched active indices to be %v, got %v",
|
||||
indices,
|
||||
item.Committee[start:end],
|
||||
item.ShuffledIndices[start:end],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
item1 := &Committee{Epoch: 1}
|
||||
if err := cache.AddCommitteeShuffledList(item1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
item2 := &Committee{Epoch: 2}
|
||||
if err := cache.AddCommitteeShuffledList(item2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
epochs, err := cache.Epochs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted := item1.Epoch + item2.Epoch
|
||||
if sum(epochs) != wanted {
|
||||
t.Errorf("Wanted: %v, got: %v", wanted, sum(epochs))
|
||||
}
|
||||
|
||||
item3 := &Committee{Epoch: 4}
|
||||
if err := cache.AddCommitteeShuffledList(item3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
epochs, err = cache.Epochs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted = item1.Epoch + item2.Epoch + item3.Epoch
|
||||
if sum(epochs) != wanted {
|
||||
t.Errorf("Wanted: %v, got: %v", wanted, sum(epochs))
|
||||
}
|
||||
|
||||
item4 := &Committee{Epoch: 6}
|
||||
if err := cache.AddCommitteeShuffledList(item4); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
epochs, err = cache.Epochs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wanted = item2.Epoch + item3.Epoch + item4.Epoch
|
||||
if sum(epochs) != wanted {
|
||||
t.Errorf("Wanted: %v, got: %v", wanted, sum(epochs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCache_EpochInCache(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 1}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 2}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 99}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := cache.AddCommitteeShuffledList(&Committee{Epoch: 100}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
inCache, err := cache.EpochInCache(1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if inCache {
|
||||
t.Error("Epoch shouldn't be in cache")
|
||||
}
|
||||
inCache, err = cache.EpochInCache(100)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !inCache {
|
||||
t.Error("Epoch should be in cache")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
cache := NewCommitteeCache()
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committee{Epoch: 1, Committee: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
indices, err := cache.ActiveIndices(1)
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
indices, err := cache.ActiveIndices(item.Seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -161,19 +88,104 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
indices, err = cache.ActiveIndices(1)
|
||||
indices, err = cache.ActiveIndices(item.Seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(indices, item.Committee) {
|
||||
if !reflect.DeepEqual(indices, item.SortedIndices) {
|
||||
t.Error("Did not receive correct active indices from cache")
|
||||
}
|
||||
}
|
||||
|
||||
func sum(values []uint64) uint64 {
|
||||
sum := uint64(0)
|
||||
for _, v := range values {
|
||||
sum = v + sum
|
||||
func TestCommitteeCache_AddProposerIndicesList(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
seed := [32]byte{'A'}
|
||||
indices := []uint64{1, 2, 3, 4, 5}
|
||||
indices, err := cache.ProposerIndices(seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if indices != nil {
|
||||
t.Error("Expected committee count not to exist in empty cache")
|
||||
}
|
||||
if err := cache.AddProposerIndicesList(seed, indices); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
received, err := cache.ProposerIndices(seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(indices, received) {
|
||||
t.Error("Did not receive correct proposer indices from cache")
|
||||
}
|
||||
|
||||
item := &Committees{Seed: [32]byte{'B'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
if err := cache.AddCommitteeShuffledList(item); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
indices, err = cache.ProposerIndices(item.Seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if indices != nil {
|
||||
t.Error("Expected committee count not to exist in empty cache")
|
||||
}
|
||||
if err := cache.AddProposerIndicesList(item.Seed, indices); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
received, err = cache.ProposerIndices(item.Seed)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(indices, received) {
|
||||
t.Error("Did not receive correct proposer indices from cache")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
// Should rotate out all the epochs except 190 through 199.
|
||||
for i := 100; i < 200; i++ {
|
||||
s := []byte(strconv.Itoa(i))
|
||||
item := &Committees{Seed: bytesutil.ToBytes32(s)}
|
||||
if err := cache.AddCommitteeShuffledList(item); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
k := cache.CommitteeCache.ListKeys()
|
||||
if len(k) != maxCommitteesCacheSize {
|
||||
t.Errorf("wanted: %d, got: %d", maxCommitteesCacheSize, len(k))
|
||||
}
|
||||
|
||||
sort.Slice(k, func(i, j int) bool {
|
||||
return k[i] < k[j]
|
||||
})
|
||||
s := bytesutil.ToBytes32([]byte(strconv.Itoa(190)))
|
||||
if k[0] != key(s) {
|
||||
t.Error("incorrect key received for slot 190")
|
||||
}
|
||||
s = bytesutil.ToBytes32([]byte(strconv.Itoa(199)))
|
||||
if k[len(k)-1] != key(s) {
|
||||
t.Error("incorrect key received for slot 199")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommitteeCacheOutOfRange(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
seed := bytesutil.ToBytes32([]byte("foo"))
|
||||
cache.CommitteeCache.Add(&Committees{
|
||||
CommitteeCount: 1,
|
||||
Seed: seed,
|
||||
ShuffledIndices: []uint64{0},
|
||||
SortedIndices: []uint64{},
|
||||
ProposerIndices: []uint64{},
|
||||
})
|
||||
_, err := cache.Committee(0, seed, math.MaxUint64) // Overflow!
|
||||
if err == nil {
|
||||
t.Fatal("Did not fail as expected")
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
7
beacon-chain/cache/depositcache/BUILD.bazel
vendored
7
beacon-chain/cache/depositcache/BUILD.bazel
vendored
@@ -9,10 +9,12 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//proto/beacon/db:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
@@ -26,9 +28,10 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//proto/beacon/db:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -33,28 +35,19 @@ type DepositFetcher interface {
|
||||
// stores all the deposit related data that is required by the beacon-node.
|
||||
type DepositCache struct {
|
||||
// Beacon chain deposits in memory.
|
||||
pendingDeposits []*DepositContainer
|
||||
deposits []*DepositContainer
|
||||
pendingDeposits []*dbpb.DepositContainer
|
||||
deposits []*dbpb.DepositContainer
|
||||
depositsLock sync.RWMutex
|
||||
chainStartDeposits []*ethpb.Deposit
|
||||
chainstartPubkeys map[string]bool
|
||||
chainstartPubkeysLock sync.RWMutex
|
||||
}
|
||||
|
||||
// DepositContainer object for holding the deposit and a reference to the block in
|
||||
// which the deposit transaction was included in the proof of work chain.
|
||||
type DepositContainer struct {
|
||||
Deposit *ethpb.Deposit
|
||||
Block *big.Int
|
||||
Index int
|
||||
depositRoot [32]byte
|
||||
}
|
||||
|
||||
// NewDepositCache instantiates a new deposit cache
|
||||
func NewDepositCache() *DepositCache {
|
||||
return &DepositCache{
|
||||
pendingDeposits: []*DepositContainer{},
|
||||
deposits: []*DepositContainer{},
|
||||
pendingDeposits: []*dbpb.DepositContainer{},
|
||||
deposits: []*dbpb.DepositContainer{},
|
||||
chainstartPubkeys: make(map[string]bool),
|
||||
chainStartDeposits: make([]*ethpb.Deposit, 0),
|
||||
}
|
||||
@@ -62,10 +55,10 @@ func NewDepositCache() *DepositCache {
|
||||
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum *big.Int, index int, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.InsertDeposit")
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil || blockNum == nil {
|
||||
if d == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
@@ -78,14 +71,36 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
defer dc.depositsLock.Unlock()
|
||||
// keep the slice sorted on insertion in order to avoid costly sorting on retrival.
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
|
||||
newDeposits := append([]*DepositContainer{{Deposit: d, Block: blockNum, depositRoot: depositRoot, Index: index}}, dc.deposits[heightIdx:]...)
|
||||
newDeposits := append([]*dbpb.DepositContainer{{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}}, dc.deposits[heightIdx:]...)
|
||||
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
|
||||
historicalDepositsCount.Inc()
|
||||
}
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
func (dc *DepositCache) InsertDepositContainers(ctx context.Context, ctrs []*dbpb.DepositContainer) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
sort.SliceStable(ctrs, func(i int, j int) bool { return ctrs[i].Index < ctrs[j].Index })
|
||||
dc.deposits = ctrs
|
||||
historicalDepositsCount.Add(float64(len(ctrs)))
|
||||
}
|
||||
|
||||
// AllDepositContainers returns a list of deposits all historical deposit containers until the given block number.
|
||||
func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*dbpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.AllDepositContainers")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
return dc.deposits
|
||||
}
|
||||
|
||||
// MarkPubkeyForChainstart sets the pubkey deposit status to true.
|
||||
func (dc *DepositCache) MarkPubkeyForChainstart(ctx context.Context, pubkey string) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.MarkPubkeyForChainstart")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.MarkPubkeyForChainstart")
|
||||
defer span.End()
|
||||
dc.chainstartPubkeysLock.Lock()
|
||||
defer dc.chainstartPubkeysLock.Unlock()
|
||||
@@ -94,7 +109,7 @@ func (dc *DepositCache) MarkPubkeyForChainstart(ctx context.Context, pubkey stri
|
||||
|
||||
// PubkeyInChainstart returns bool for whether the pubkey passed in has deposited.
|
||||
func (dc *DepositCache) PubkeyInChainstart(ctx context.Context, pubkey string) bool {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.PubkeyInChainstart")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PubkeyInChainstart")
|
||||
defer span.End()
|
||||
dc.chainstartPubkeysLock.Lock()
|
||||
defer dc.chainstartPubkeysLock.Unlock()
|
||||
@@ -108,14 +123,14 @@ func (dc *DepositCache) PubkeyInChainstart(ctx context.Context, pubkey string) b
|
||||
// AllDeposits returns a list of deposits all historical deposits until the given block number
|
||||
// (inclusive). If no block is specified then this method returns all historical deposits.
|
||||
func (dc *DepositCache) AllDeposits(ctx context.Context, beforeBlk *big.Int) []*ethpb.Deposit {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.AllDeposits")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.AllDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var deposits []*ethpb.Deposit
|
||||
for _, ctnr := range dc.deposits {
|
||||
if beforeBlk == nil || beforeBlk.Cmp(ctnr.Block) > -1 {
|
||||
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
deposits = append(deposits, ctnr.Deposit)
|
||||
}
|
||||
}
|
||||
@@ -125,23 +140,23 @@ func (dc *DepositCache) AllDeposits(ctx context.Context, beforeBlk *big.Int) []*
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made prior to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "Beacondb.DepositsNumberAndRootAtHeight")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Block.Cmp(blockHeight) > 0 })
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Eth1BlockHeight > blockHeight.Uint64() })
|
||||
// send the deposit root of the empty trie, if eth1follow distance is greater than the time of the earliest
|
||||
// deposit.
|
||||
if heightIdx == 0 {
|
||||
return 0, [32]byte{}
|
||||
}
|
||||
return uint64(heightIdx), dc.deposits[heightIdx-1].depositRoot
|
||||
return uint64(heightIdx), bytesutil.ToBytes32(dc.deposits[heightIdx-1].DepositRoot)
|
||||
}
|
||||
|
||||
// DepositByPubkey looks through historical deposits and finds one which contains
|
||||
// a certain public key within its deposit data.
|
||||
func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*ethpb.Deposit, *big.Int) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DepositByPubkey")
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositByPubkey")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
@@ -151,7 +166,7 @@ func (dc *DepositCache) DepositByPubkey(ctx context.Context, pubKey []byte) (*et
|
||||
for _, ctnr := range dc.deposits {
|
||||
if bytes.Equal(ctnr.Deposit.Data.PublicKey, pubKey) {
|
||||
deposit = ctnr.Deposit
|
||||
blockNum = ctnr.Block
|
||||
blockNum = big.NewInt(int64(ctnr.Eth1BlockHeight))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
149
beacon-chain/cache/depositcache/deposits_test.go
vendored
149
beacon-chain/cache/depositcache/deposits_test.go
vendored
@@ -6,7 +6,8 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
@@ -19,21 +20,7 @@ func TestBeaconDB_InsertDeposit_LogsOnNilDepositInsertion(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.InsertDeposit(context.Background(), nil, big.NewInt(1), 0, [32]byte{})
|
||||
|
||||
if len(dc.deposits) != 0 {
|
||||
t.Fatal("Number of deposits changed")
|
||||
}
|
||||
if hook.LastEntry().Message != nilDepositErr {
|
||||
t.Errorf("Did not log correct message, wanted \"Ignoring nil deposit insertion\", got \"%s\"", hook.LastEntry().Message)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconDB_InsertDeposit_LogsOnNilBlockNumberInsertion(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.InsertDeposit(context.Background(), ðpb.Deposit{}, nil, 0, [32]byte{})
|
||||
dc.InsertDeposit(context.Background(), nil, 1, 0, [32]byte{})
|
||||
|
||||
if len(dc.deposits) != 0 {
|
||||
t.Fatal("Number of deposits changed")
|
||||
@@ -47,27 +34,27 @@ func TestBeaconDB_InsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
insertions := []struct {
|
||||
blkNum *big.Int
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: big.NewInt(0),
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: big.NewInt(0),
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 3,
|
||||
},
|
||||
{
|
||||
blkNum: big.NewInt(0),
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: big.NewInt(0),
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 4,
|
||||
},
|
||||
@@ -77,7 +64,7 @@ func TestBeaconDB_InsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
}
|
||||
|
||||
expectedIndices := []int{0, 1, 3, 4}
|
||||
expectedIndices := []int64{0, 1, 3, 4}
|
||||
for i, ei := range expectedIndices {
|
||||
if dc.deposits[i].Index != ei {
|
||||
t.Errorf("dc.deposits[%d].Index = %d, wanted %d", i, dc.deposits[i].Index, ei)
|
||||
@@ -88,34 +75,34 @@ func TestBeaconDB_InsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
func TestBeaconDB_AllDeposits_ReturnsAllDeposits(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
deposits := []*DepositContainer{
|
||||
deposits := []*dbpb.DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
dc.deposits = deposits
|
||||
@@ -129,34 +116,34 @@ func TestBeaconDB_AllDeposits_ReturnsAllDeposits(t *testing.T) {
|
||||
func TestBeaconDB_AllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
deposits := []*DepositContainer{
|
||||
deposits := []*dbpb.DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
dc.deposits = deposits
|
||||
@@ -171,35 +158,35 @@ func TestBeaconDB_AllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testi
|
||||
func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.deposits = []*DepositContainer{
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
depositRoot: bytesutil.ToBytes32([]byte("root")),
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: []byte("root"),
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Deposit: ðpb.Deposit{},
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -216,16 +203,16 @@ func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t
|
||||
func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLessThanOldestDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.deposits = []*DepositContainer{
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Deposit: ðpb.Deposit{},
|
||||
depositRoot: bytesutil.ToBytes32([]byte("root")),
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: []byte("root"),
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Deposit: ðpb.Deposit{},
|
||||
depositRoot: bytesutil.ToBytes32([]byte("root")),
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: []byte("root"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -242,9 +229,9 @@ func TestBeaconDB_DepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLes
|
||||
func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.deposits = []*DepositContainer{
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Block: big.NewInt(9),
|
||||
Eth1BlockHeight: 9,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk0"),
|
||||
@@ -252,7 +239,7 @@ func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(10),
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk1"),
|
||||
@@ -260,7 +247,7 @@ func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(11),
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk1"),
|
||||
@@ -268,7 +255,7 @@ func TestBeaconDB_DepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Block: big.NewInt(12),
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: []byte("pk2"),
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -23,15 +24,15 @@ var (
|
||||
// PendingDepositsFetcher specifically outlines a struct that can retrieve deposits
|
||||
// which have not yet been included in the chain.
|
||||
type PendingDepositsFetcher interface {
|
||||
PendingContainers(ctx context.Context, beforeBlk *big.Int) []*DepositContainer
|
||||
PendingContainers(ctx context.Context, beforeBlk *big.Int) []*dbpb.DepositContainer
|
||||
}
|
||||
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum *big.Int, index int, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.InsertPendingDeposit")
|
||||
func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertPendingDeposit")
|
||||
defer span.End()
|
||||
if d == nil || blockNum == nil {
|
||||
if d == nil {
|
||||
log.WithFields(log.Fields{
|
||||
"block": blockNum,
|
||||
"deposit": d,
|
||||
@@ -40,7 +41,8 @@ func (dc *DepositCache) InsertPendingDeposit(ctx context.Context, d *ethpb.Depos
|
||||
}
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
dc.pendingDeposits = append(dc.pendingDeposits, &DepositContainer{Deposit: d, Block: blockNum, Index: index, depositRoot: depositRoot})
|
||||
dc.pendingDeposits = append(dc.pendingDeposits,
|
||||
&dbpb.DepositContainer{Deposit: d, Eth1BlockHeight: blockNum, Index: index, DepositRoot: depositRoot[:]})
|
||||
pendingDepositsCount.Inc()
|
||||
span.AddAttributes(trace.Int64Attribute("count", int64(len(dc.pendingDeposits))))
|
||||
}
|
||||
@@ -54,9 +56,9 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, beforeBlk *big.Int)
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var depositCntrs []*DepositContainer
|
||||
var depositCntrs []*dbpb.DepositContainer
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if beforeBlk == nil || beforeBlk.Cmp(ctnr.Block) > -1 {
|
||||
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
}
|
||||
}
|
||||
@@ -77,15 +79,15 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, beforeBlk *big.Int)
|
||||
|
||||
// PendingContainers returns a list of deposit containers until the given block number
|
||||
// (inclusive).
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, beforeBlk *big.Int) []*DepositContainer {
|
||||
func (dc *DepositCache) PendingContainers(ctx context.Context, beforeBlk *big.Int) []*dbpb.DepositContainer {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.PendingDeposits")
|
||||
defer span.End()
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var depositCntrs []*DepositContainer
|
||||
var depositCntrs []*dbpb.DepositContainer
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if beforeBlk == nil || beforeBlk.Cmp(ctnr.Block) > -1 {
|
||||
if beforeBlk == nil || beforeBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
}
|
||||
}
|
||||
@@ -151,9 +153,9 @@ func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeInde
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
var cleanDeposits []*DepositContainer
|
||||
var cleanDeposits []*dbpb.DepositContainer
|
||||
for _, dp := range dc.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
if dp.Index >= int64(merkleTreeIndex) {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,14 +7,15 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
)
|
||||
|
||||
var _ = PendingDepositsFetcher(&DepositCache{})
|
||||
|
||||
func TestInsertPendingDeposit_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.InsertPendingDeposit(context.Background(), ðpb.Deposit{}, big.NewInt(111), 100, [32]byte{})
|
||||
dc.InsertPendingDeposit(context.Background(), ðpb.Deposit{}, 111, 100, [32]byte{})
|
||||
|
||||
if len(dc.pendingDeposits) != 1 {
|
||||
t.Error("Deposit not inserted")
|
||||
@@ -23,7 +24,7 @@ func TestInsertPendingDeposit_OK(t *testing.T) {
|
||||
|
||||
func TestInsertPendingDeposit_ignoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, nil /*blockNum*/, 0, [32]byte{})
|
||||
dc.InsertPendingDeposit(context.Background(), nil /*deposit*/, 0 /*blockNum*/, 0, [32]byte{})
|
||||
|
||||
if len(dc.pendingDeposits) > 0 {
|
||||
t.Error("Unexpected deposit insertion")
|
||||
@@ -34,7 +35,7 @@ func TestRemovePendingDeposit_OK(t *testing.T) {
|
||||
db := DepositCache{}
|
||||
depToRemove := ðpb.Deposit{Proof: [][]byte{[]byte("A")}}
|
||||
otherDep := ðpb.Deposit{Proof: [][]byte{[]byte("B")}}
|
||||
db.pendingDeposits = []*DepositContainer{
|
||||
db.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Deposit: depToRemove, Index: 1},
|
||||
{Deposit: otherDep, Index: 5},
|
||||
}
|
||||
@@ -47,7 +48,7 @@ func TestRemovePendingDeposit_OK(t *testing.T) {
|
||||
|
||||
func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dc.pendingDeposits = []*DepositContainer{{Deposit: ðpb.Deposit{}}}
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{{Deposit: ðpb.Deposit{}}}
|
||||
dc.RemovePendingDeposit(context.Background(), nil /*deposit*/)
|
||||
if len(dc.pendingDeposits) != 1 {
|
||||
t.Errorf("Deposit unexpectedly removed")
|
||||
@@ -57,7 +58,7 @@ func TestRemovePendingDeposit_IgnoresNilDeposit(t *testing.T) {
|
||||
func TestPendingDeposit_RoundTrip(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
dep := ðpb.Deposit{Proof: [][]byte{[]byte("A")}}
|
||||
dc.InsertPendingDeposit(context.Background(), dep, big.NewInt(111), 100, [32]byte{})
|
||||
dc.InsertPendingDeposit(context.Background(), dep, 111, 100, [32]byte{})
|
||||
dc.RemovePendingDeposit(context.Background(), dep)
|
||||
if len(dc.pendingDeposits) != 0 {
|
||||
t.Error("Failed to insert & delete a pending deposit")
|
||||
@@ -67,10 +68,10 @@ func TestPendingDeposit_RoundTrip(t *testing.T) {
|
||||
func TestPendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*DepositContainer{
|
||||
{Block: big.NewInt(2), Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("A")}}},
|
||||
{Block: big.NewInt(4), Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("B")}}},
|
||||
{Block: big.NewInt(6), Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("c")}}},
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("A")}}},
|
||||
{Eth1BlockHeight: 4, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("B")}}},
|
||||
{Eth1BlockHeight: 6, Deposit: ðpb.Deposit{Proof: [][]byte{[]byte("c")}}},
|
||||
}
|
||||
|
||||
deposits := dc.PendingDeposits(context.Background(), big.NewInt(4))
|
||||
@@ -92,25 +93,24 @@ func TestPendingDeposits_OK(t *testing.T) {
|
||||
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*DepositContainer{
|
||||
{Block: big.NewInt(2), Index: 2},
|
||||
{Block: big.NewInt(4), Index: 4},
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 0)
|
||||
expected := []*DepositContainer{
|
||||
{Block: big.NewInt(2), Index: 2},
|
||||
{Block: big.NewInt(4), Index: 4},
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
expected := []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
|
||||
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
|
||||
}
|
||||
@@ -119,40 +119,40 @@ func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
dc := DepositCache{}
|
||||
|
||||
dc.pendingDeposits = []*DepositContainer{
|
||||
{Block: big.NewInt(2), Index: 2},
|
||||
{Block: big.NewInt(4), Index: 4},
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 6)
|
||||
expected := []*DepositContainer{
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
expected := []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
|
||||
t.Errorf("Unexpected deposits. got=%+v want=%+v", dc.pendingDeposits, expected)
|
||||
}
|
||||
|
||||
dc.pendingDeposits = []*DepositContainer{
|
||||
{Block: big.NewInt(2), Index: 2},
|
||||
{Block: big.NewInt(4), Index: 4},
|
||||
{Block: big.NewInt(6), Index: 6},
|
||||
{Block: big.NewInt(8), Index: 8},
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
dc.pendingDeposits = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 10)
|
||||
expected = []*DepositContainer{
|
||||
{Block: big.NewInt(10), Index: 10},
|
||||
{Block: big.NewInt(12), Index: 12},
|
||||
expected = []*dbpb.DepositContainer{
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(dc.pendingDeposits, expected) {
|
||||
|
||||
2
beacon-chain/cache/eth1_data_test.go
vendored
2
beacon-chain/cache/eth1_data_test.go
vendored
@@ -91,7 +91,7 @@ func TestEth1Data_MaxSize(t *testing.T) {
|
||||
|
||||
for i := 0; i < maxEth1DataVoteSize+1; i++ {
|
||||
var hash [32]byte
|
||||
copy(hash[:], []byte(strconv.Itoa(i)))
|
||||
copy(hash[:], strconv.Itoa(i))
|
||||
eInfo := &Eth1DataVote{
|
||||
Eth1DataHash: hash,
|
||||
}
|
||||
|
||||
9
beacon-chain/cache/feature_flag_test.go
vendored
9
beacon-chain/cache/feature_flag_test.go
vendored
@@ -3,12 +3,7 @@ package cache
|
||||
import "github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
|
||||
func init() {
|
||||
featureconfig.Init(&featureconfig.Flag{
|
||||
EnableAttestationCache: true,
|
||||
EnableEth1DataVoteCache: true,
|
||||
EnableShuffledIndexCache: true,
|
||||
EnableCommitteeCache: true,
|
||||
EnableActiveCountCache: true,
|
||||
EnableActiveIndicesCache: true,
|
||||
featureconfig.Init(&featureconfig.Flags{
|
||||
EnableEth1DataVoteCache: true,
|
||||
})
|
||||
}
|
||||
|
||||
61
beacon-chain/cache/hot_state_cache.go
vendored
Normal file
61
beacon-chain/cache/hot_state_cache.go
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
)
|
||||
|
||||
var (
|
||||
// hotStateCacheSize defines the max number of hot state this can cache.
|
||||
hotStateCacheSize = 16
|
||||
// Metrics
|
||||
hotStateCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "hot_state_cache_hit",
|
||||
Help: "The total number of cache hits on the hot state cache.",
|
||||
})
|
||||
hotStateCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "hot_state_cache_miss",
|
||||
Help: "The total number of cache misses on the hot state cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// HotStateCache is used to store the processed beacon state after finalized check point..
|
||||
type HotStateCache struct {
|
||||
cache *lru.Cache
|
||||
}
|
||||
|
||||
// NewHotStateCache initializes the map and underlying cache.
|
||||
func NewHotStateCache() *HotStateCache {
|
||||
cache, err := lru.New(hotStateCacheSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &HotStateCache{
|
||||
cache: cache,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a cached response via input block root, if any.
|
||||
// The response is copied by default.
|
||||
func (c *HotStateCache) Get(root [32]byte) *stateTrie.BeaconState {
|
||||
item, exists := c.cache.Get(root)
|
||||
|
||||
if exists && item != nil {
|
||||
hotStateCacheHit.Inc()
|
||||
return item.(*stateTrie.BeaconState).Copy()
|
||||
}
|
||||
hotStateCacheMiss.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *HotStateCache) Put(root [32]byte, state *stateTrie.BeaconState) {
|
||||
c.cache.Add(root, state)
|
||||
}
|
||||
|
||||
// Has returns true if the key exists in the cache.
|
||||
func (c *HotStateCache) Has(root [32]byte) bool {
|
||||
return c.cache.Contains(root)
|
||||
}
|
||||
41
beacon-chain/cache/hot_state_cache_test.go
vendored
Normal file
41
beacon-chain/cache/hot_state_cache_test.go
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
)
|
||||
|
||||
func TestHotStateCache_RoundTrip(t *testing.T) {
|
||||
c := cache.NewHotStateCache()
|
||||
root := [32]byte{'A'}
|
||||
state := c.Get(root)
|
||||
if state != nil {
|
||||
t.Errorf("Empty cache returned an object: %v", state)
|
||||
}
|
||||
if c.Has(root) {
|
||||
t.Error("Empty cache has an object")
|
||||
}
|
||||
|
||||
state, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: 10,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c.Put(root, state)
|
||||
|
||||
if !c.Has(root) {
|
||||
t.Error("Empty cache does not have an object")
|
||||
}
|
||||
res := c.Get(root)
|
||||
if state == nil {
|
||||
t.Errorf("Empty cache returned an object: %v", state)
|
||||
}
|
||||
if !reflect.DeepEqual(state.CloneInnerState(), res.CloneInnerState()) {
|
||||
t.Error("Expected equal protos to return from cache")
|
||||
}
|
||||
}
|
||||
130
beacon-chain/cache/skip_slot_cache.go
vendored
Normal file
130
beacon-chain/cache/skip_slot_cache.go
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
)
|
||||
|
||||
var (
|
||||
// Metrics
|
||||
skipSlotCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "skip_slot_cache_hit",
|
||||
Help: "The total number of cache hits on the skip slot cache.",
|
||||
})
|
||||
skipSlotCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "skip_slot_cache_miss",
|
||||
Help: "The total number of cache misses on the skip slot cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// SkipSlotCache is used to store the cached results of processing skip slots in state.ProcessSlots.
|
||||
type SkipSlotCache struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
inProgress map[uint64]bool
|
||||
}
|
||||
|
||||
// NewSkipSlotCache initializes the map and underlying cache.
|
||||
func NewSkipSlotCache() *SkipSlotCache {
|
||||
cache, err := lru.New(8)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &SkipSlotCache{
|
||||
cache: cache,
|
||||
inProgress: make(map[uint64]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// Get waits for any in progress calculation to complete before returning a
|
||||
// cached response, if any.
|
||||
func (c *SkipSlotCache) Get(ctx context.Context, slot uint64) (*stateTrie.BeaconState, error) {
|
||||
if !featureconfig.Get().EnableSkipSlotsCache {
|
||||
// Return a miss result if cache is not enabled.
|
||||
skipSlotCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
delay := minDelay
|
||||
|
||||
// Another identical request may be in progress already. Let's wait until
|
||||
// any in progress request resolves or our timeout is exceeded.
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
if !c.inProgress[slot] {
|
||||
c.lock.RUnlock()
|
||||
break
|
||||
}
|
||||
c.lock.RUnlock()
|
||||
|
||||
// This increasing backoff is to decrease the CPU cycles while waiting
|
||||
// for the in progress boolean to flip to false.
|
||||
time.Sleep(time.Duration(delay) * time.Nanosecond)
|
||||
delay *= delayFactor
|
||||
delay = math.Min(delay, maxDelay)
|
||||
}
|
||||
|
||||
item, exists := c.cache.Get(slot)
|
||||
|
||||
if exists && item != nil {
|
||||
skipSlotCacheHit.Inc()
|
||||
return item.(*stateTrie.BeaconState).Copy(), nil
|
||||
}
|
||||
skipSlotCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// MarkInProgress a request so that any other similar requests will block on
|
||||
// Get until MarkNotInProgress is called.
|
||||
func (c *SkipSlotCache) MarkInProgress(slot uint64) error {
|
||||
if !featureconfig.Get().EnableSkipSlotsCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.inProgress[slot] {
|
||||
return ErrAlreadyInProgress
|
||||
}
|
||||
c.inProgress[slot] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkNotInProgress will release the lock on a given request. This should be
|
||||
// called after put.
|
||||
func (c *SkipSlotCache) MarkNotInProgress(slot uint64) error {
|
||||
if !featureconfig.Get().EnableSkipSlotsCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
delete(c.inProgress, slot)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *SkipSlotCache) Put(ctx context.Context, slot uint64, state *stateTrie.BeaconState) error {
|
||||
if !featureconfig.Get().EnableSkipSlotsCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy state so cached value is not mutated.
|
||||
c.cache.Add(slot, state.Copy())
|
||||
|
||||
return nil
|
||||
}
|
||||
57
beacon-chain/cache/skip_slot_cache_test.go
vendored
Normal file
57
beacon-chain/cache/skip_slot_cache_test.go
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
)
|
||||
|
||||
func TestSkipSlotCache_RoundTrip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := cache.NewSkipSlotCache()
|
||||
fc := featureconfig.Get()
|
||||
fc.EnableSkipSlotsCache = true
|
||||
featureconfig.Init(fc)
|
||||
|
||||
state, err := c.Get(ctx, 5)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if state != nil {
|
||||
t.Errorf("Empty cache returned an object: %v", state)
|
||||
}
|
||||
|
||||
if err := c.MarkInProgress(5); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
state, err = stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
Slot: 10,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = c.Put(ctx, 5, state); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := c.MarkNotInProgress(5); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
res, err := c.Get(ctx, 5)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(state.CloneInnerState(), res.CloneInnerState()) {
|
||||
t.Error("Expected equal protos to return from cache")
|
||||
}
|
||||
}
|
||||
@@ -14,19 +14,22 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state/stateutils:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
@@ -37,6 +40,7 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"block_operations_fuzz_test.go",
|
||||
"block_operations_test.go",
|
||||
"block_test.go",
|
||||
"eth1_data_test.go",
|
||||
@@ -44,17 +48,17 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/state/stateutils:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_phoreproject_bls//:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
|
||||
@@ -4,18 +4,20 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// NewGenesisBlock returns the canonical, genesis block for the beacon chain protocol.
|
||||
func NewGenesisBlock(stateRoot []byte) *ethpb.BeaconBlock {
|
||||
func NewGenesisBlock(stateRoot []byte) *ethpb.SignedBeaconBlock {
|
||||
zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
genBlock := ðpb.BeaconBlock{
|
||||
ParentRoot: zeroHash,
|
||||
StateRoot: stateRoot,
|
||||
Body: ðpb.BeaconBlockBody{},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
}
|
||||
return genBlock
|
||||
return ðpb.SignedBeaconBlock{
|
||||
Block: genBlock,
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
441
beacon-chain/core/blocks/block_operations_fuzz_test.go
Normal file
441
beacon-chain/core/blocks/block_operations_fuzz_test.go
Normal file
@@ -0,0 +1,441 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
|
||||
//"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
)
|
||||
|
||||
func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ctx := context.Background()
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
att := ð.Attestation{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(att)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
_, _ = ProcessAttestationNoVerify(ctx, s, att)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
block := ð.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
_, _ = ProcessBlockHeader(s, block)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzverifySigningRoot_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
pubkey := [48]byte{}
|
||||
sig := [96]byte{}
|
||||
domain := [4]byte{}
|
||||
p := []byte{}
|
||||
s := []byte{}
|
||||
d := uint64(0)
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
fuzzer.Fuzz(&sig)
|
||||
fuzzer.Fuzz(&domain)
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(&p)
|
||||
fuzzer.Fuzz(&s)
|
||||
fuzzer.Fuzz(&d)
|
||||
domain := bytesutil.FromBytes4(domain[:])
|
||||
verifySigningRoot(state, pubkey[:], sig[:], domain)
|
||||
verifySigningRoot(state, p, s, d)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ba := []byte{}
|
||||
pubkey := [48]byte{}
|
||||
sig := [96]byte{}
|
||||
domain := [4]byte{}
|
||||
p := []byte{}
|
||||
s := []byte{}
|
||||
d := uint64(0)
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(&ba)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
fuzzer.Fuzz(&sig)
|
||||
fuzzer.Fuzz(&domain)
|
||||
fuzzer.Fuzz(&p)
|
||||
fuzzer.Fuzz(&s)
|
||||
fuzzer.Fuzz(&d)
|
||||
domain := bytesutil.FromBytes4(domain[:])
|
||||
verifySignature(ba, pubkey[:], sig[:], domain)
|
||||
verifySignature(ba, p, s, d)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
block := ð.BeaconBlock{}
|
||||
state := &stateTrie.BeaconState{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
s, err := ProcessEth1DataInBlock(state, block)
|
||||
if err != nil && s != nil {
|
||||
t.Fatalf("state should be nil on err. found: %v on error: %v for state: %v and block: %v", s, err, state, block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzareEth1DataEqual_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
eth1data := ð.Eth1Data{}
|
||||
eth1data2 := ð.Eth1Data{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(eth1data)
|
||||
fuzzer.Fuzz(eth1data2)
|
||||
areEth1DataEqual(eth1data, eth1data2)
|
||||
areEth1DataEqual(eth1data, eth1data)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
eth1data := ð.Eth1Data{}
|
||||
stateVotes := []*eth.Eth1Data{}
|
||||
for i := 0; i < 100000; i++ {
|
||||
fuzzer.Fuzz(eth1data)
|
||||
fuzzer.Fuzz(&stateVotes)
|
||||
s, _ := beaconstate.InitializeFromProto(ðereum_beacon_p2p_v1.BeaconState{
|
||||
Eth1DataVotes: stateVotes,
|
||||
})
|
||||
Eth1DataHasEnoughSupport(s, eth1data)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
block := ð.BeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
_, _ = ProcessBlockHeaderNoVerify(s, block)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessRandao_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
blockBody := ð.BeaconBlockBody{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
r, err := ProcessRandao(s, blockBody)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
blockBody := ð.BeaconBlockBody{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
r, err := ProcessRandaoNoVerify(s, blockBody)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
blockBody := ð.BeaconBlockBody{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
r, err := ProcessProposerSlashings(ctx, s, blockBody)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
proposerSlashing := ð.ProposerSlashing{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(proposerSlashing)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
VerifyProposerSlashing(s, proposerSlashing)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
blockBody := ð.BeaconBlockBody{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, blockBody)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
attesterSlashing := ð.AttesterSlashing{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
VerifyAttesterSlashing(ctx, s, attesterSlashing)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzIsSlashableAttestationData_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
attestationData := ð.AttestationData{}
|
||||
attestationData2 := ð.AttestationData{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(attestationData)
|
||||
fuzzer.Fuzz(attestationData2)
|
||||
IsSlashableAttestationData(attestationData, attestationData2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzslashableAttesterIndices_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
attesterSlashing := ð.AttesterSlashing{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
slashableAttesterIndices(attesterSlashing)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessAttestations_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
blockBody := ð.BeaconBlockBody{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
r, err := ProcessAttestations(ctx, s, blockBody)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
blockBody := ð.BeaconBlockBody{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
r, err := ProcessAttestationsNoVerify(ctx, s, blockBody)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessAttestation_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
attestation := ð.Attestation{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attestation)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
r, err := ProcessAttestation(ctx, s, attestation)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, attestation)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
idxAttestation := ð.IndexedAttestation{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(idxAttestation)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
VerifyIndexedAttestation(ctx, s, idxAttestation)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzVerifyAttestation_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
attestation := ð.Attestation{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attestation)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
VerifyAttestation(ctx, s, attestation)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
blockBody := ð.BeaconBlockBody{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
r, err := ProcessDeposits(ctx, s, blockBody)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
deposit := ð.Deposit{}
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
r, err := ProcessPreGenesisDeposit(ctx, s, deposit)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
deposit := ð.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
r, err := ProcessDeposit(s, deposit)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
deposit := ð.Deposit{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
verifyDeposit(s, deposit)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
blockBody := ð.BeaconBlockBody{}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
r, err := ProcessVoluntaryExits(ctx, s, blockBody)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
blockBody := ð.BeaconBlockBody{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, _ := beaconstate.InitializeFromProtoUnsafe(state)
|
||||
r, err := ProcessVoluntaryExitsNoVerify(s, blockBody)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, blockBody)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzVerifyExit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
ve := ð.SignedVoluntaryExit{}
|
||||
val := ð.Validator{}
|
||||
fork := &pb.Fork{}
|
||||
var slot uint64
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(ve)
|
||||
fuzzer.Fuzz(val)
|
||||
fuzzer.Fuzz(fork)
|
||||
fuzzer.Fuzz(&slot)
|
||||
VerifyExit(val, slot, fork, ve)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -11,11 +11,11 @@ func TestGenesisBlock_InitializedCorrectly(t *testing.T) {
|
||||
stateHash := []byte{0}
|
||||
b1 := blocks.NewGenesisBlock(stateHash)
|
||||
|
||||
if b1.ParentRoot == nil {
|
||||
if b1.Block.ParentRoot == nil {
|
||||
t.Error("genesis block missing ParentHash field")
|
||||
}
|
||||
|
||||
if !bytes.Equal(b1.StateRoot, stateHash) {
|
||||
if !bytes.Equal(b1.Block.StateRoot, stateHash) {
|
||||
t.Error("genesis block StateRootHash32 isn't initialized correctly")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,10 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
@@ -92,9 +93,9 @@ func TestEth1DataHasEnoughSupport(t *testing.T) {
|
||||
c.SlotsPerEth1VotingPeriod = tt.votingPeriodLength
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
s := &pb.BeaconState{
|
||||
s, _ := beaconstate.InitializeFromProto(&pb.BeaconState{
|
||||
Eth1DataVotes: tt.stateVotes,
|
||||
}
|
||||
})
|
||||
result, err := blocks.Eth1DataHasEnoughSupport(s, tt.data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
@@ -35,11 +35,12 @@ go_test(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/state/stateutils:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/params/spectest:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
@@ -68,11 +69,12 @@ go_test(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/state/stateutils:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/params/spectest:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
@@ -8,11 +8,11 @@ import (
|
||||
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"gopkg.in/d4l3k/messagediff.v1"
|
||||
@@ -26,7 +26,6 @@ func runBlockHeaderTest(t *testing.T, config string) {
|
||||
testFolders, testsFolderPath := testutil.TestFolders(t, config, "operations/block_header/pyspec_tests")
|
||||
for _, folder := range testFolders {
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
blockFile, err := testutil.BazelFileBytes(testsFolderPath, folder.Name(), "block.ssz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -40,10 +39,14 @@ func runBlockHeaderTest(t *testing.T, config string) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
preBeaconState := &pb.BeaconState{}
|
||||
if err := ssz.Unmarshal(preBeaconStateFile, preBeaconState); err != nil {
|
||||
preBeaconStateBase := &pb.BeaconState{}
|
||||
if err := ssz.Unmarshal(preBeaconStateFile, preBeaconStateBase); err != nil {
|
||||
t.Fatalf("Failed to unmarshal: %v", err)
|
||||
}
|
||||
preBeaconState, err := beaconstate.InitializeFromProto(preBeaconStateBase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// If the post.ssz is not present, it means the test should fail on our end.
|
||||
postSSZFilepath, err := bazel.Runfile(path.Join(testsFolderPath, folder.Name(), "post.ssz"))
|
||||
@@ -54,7 +57,8 @@ func runBlockHeaderTest(t *testing.T, config string) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
beaconState, err := blocks.ProcessBlockHeader(preBeaconState, block)
|
||||
// Spectest blocks are not signed, so we'll call NoVerify to skip sig verification.
|
||||
beaconState, err := blocks.ProcessBlockHeaderNoVerify(preBeaconState, block)
|
||||
if postSSZExists {
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
@@ -69,9 +73,8 @@ func runBlockHeaderTest(t *testing.T, config string) {
|
||||
if err := ssz.Unmarshal(postBeaconStateFile, postBeaconState); err != nil {
|
||||
t.Fatalf("Failed to unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if !proto.Equal(beaconState, postBeaconState) {
|
||||
diff, _ := messagediff.PrettyDiff(beaconState, postBeaconState)
|
||||
if !proto.Equal(beaconState.CloneInnerState(), postBeaconState) {
|
||||
diff, _ := messagediff.PrettyDiff(beaconState.CloneInnerState(), postBeaconState)
|
||||
t.Log(diff)
|
||||
t.Fatal("Post state does not match expected")
|
||||
}
|
||||
|
||||
@@ -10,11 +10,12 @@ import (
|
||||
|
||||
"github.com/bazelbuild/rules_go/go/tools/bazel"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"gopkg.in/d4l3k/messagediff.v1"
|
||||
@@ -28,15 +29,19 @@ func runBlockProcessingTest(t *testing.T, config string) {
|
||||
testFolders, testsFolderPath := testutil.TestFolders(t, config, "sanity/blocks/pyspec_tests")
|
||||
for _, folder := range testFolders {
|
||||
t.Run(folder.Name(), func(t *testing.T) {
|
||||
helpers.ClearAllCaches()
|
||||
helpers.ClearCache()
|
||||
preBeaconStateFile, err := testutil.BazelFileBytes(testsFolderPath, folder.Name(), "pre.ssz")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
beaconState := &pb.BeaconState{}
|
||||
if err := ssz.Unmarshal(preBeaconStateFile, beaconState); err != nil {
|
||||
beaconStateBase := &pb.BeaconState{}
|
||||
if err := ssz.Unmarshal(preBeaconStateFile, beaconStateBase); err != nil {
|
||||
t.Fatalf("Failed to unmarshal: %v", err)
|
||||
}
|
||||
beaconState, err := beaconstate.InitializeFromProto(beaconStateBase)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
file, err := testutil.BazelFileBytes(testsFolderPath, folder.Name(), "meta.yaml")
|
||||
if err != nil {
|
||||
@@ -55,7 +60,7 @@ func runBlockProcessingTest(t *testing.T, config string) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
block := ðpb.BeaconBlock{}
|
||||
block := ðpb.SignedBeaconBlock{}
|
||||
if err := ssz.Unmarshal(blockFile, block); err != nil {
|
||||
t.Fatalf("Failed to unmarshal: %v", err)
|
||||
}
|
||||
@@ -89,8 +94,8 @@ func runBlockProcessingTest(t *testing.T, config string) {
|
||||
t.Fatalf("Failed to unmarshal: %v", err)
|
||||
}
|
||||
|
||||
if !proto.Equal(beaconState, postBeaconState) {
|
||||
diff, _ := messagediff.PrettyDiff(beaconState, postBeaconState)
|
||||
if !proto.Equal(beaconState.CloneInnerState(), postBeaconState) {
|
||||
diff, _ := messagediff.PrettyDiff(beaconState.CloneInnerState(), postBeaconState)
|
||||
t.Log(diff)
|
||||
t.Fatal("Post state does not match expected")
|
||||
}
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params/spectest"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
)
|
||||
@@ -24,12 +24,12 @@ func runVoluntaryExitTest(t *testing.T, config string) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
voluntaryExit := ðpb.VoluntaryExit{}
|
||||
voluntaryExit := ðpb.SignedVoluntaryExit{}
|
||||
if err := ssz.Unmarshal(exitFile, voluntaryExit); err != nil {
|
||||
t.Fatalf("Failed to unmarshal: %v", err)
|
||||
}
|
||||
|
||||
body := ðpb.BeaconBlockBody{VoluntaryExits: []*ethpb.VoluntaryExit{voluntaryExit}}
|
||||
body := ðpb.BeaconBlockBody{VoluntaryExits: []*ethpb.SignedVoluntaryExit{voluntaryExit}}
|
||||
testutil.RunBlockOperationTest(t, folderPath, body, blocks.ProcessVoluntaryExits)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,20 +2,19 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"epoch_processing.go",
|
||||
"participation.go",
|
||||
],
|
||||
srcs = ["epoch_processing.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -24,17 +23,18 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"epoch_processing_fuzz_test.go",
|
||||
"epoch_processing_test.go",
|
||||
"participation_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -5,93 +5,35 @@
|
||||
package epoch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-ssz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/validators"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/mathutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// MatchedAttestations is an object that contains the correctly
|
||||
// voted attestations based on source, target and head criteria.
|
||||
type MatchedAttestations struct {
|
||||
source []*pb.PendingAttestation
|
||||
Target []*pb.PendingAttestation
|
||||
head []*pb.PendingAttestation
|
||||
// sortableIndices implements the Sort interface to sort newly activated validator indices
|
||||
// by activation epoch and by index number.
|
||||
type sortableIndices struct {
|
||||
indices []uint64
|
||||
validators []*ethpb.Validator
|
||||
}
|
||||
|
||||
// MatchAttestations matches the attestations gathered in a span of an epoch
|
||||
// and categorize them whether they correctly voted for source, target and head.
|
||||
// We combined the individual helpers from spec for efficiency and to achieve O(N) run time.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_matching_source_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]:
|
||||
// assert epoch in (get_current_epoch(state), get_previous_epoch(state))
|
||||
// return state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations
|
||||
//
|
||||
// def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]:
|
||||
// return [
|
||||
// a for a in get_matching_source_attestations(state, epoch)
|
||||
// if a.data.target_root == get_block_root(state, epoch)
|
||||
// ]
|
||||
//
|
||||
// def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> List[PendingAttestation]:
|
||||
// return [
|
||||
// a for a in get_matching_source_attestations(state, epoch)
|
||||
// if a.data.beacon_block_root == get_block_root_at_slot(state, get_attestation_data_slot(state, a.data))
|
||||
// ]
|
||||
func MatchAttestations(state *pb.BeaconState, epoch uint64) (*MatchedAttestations, error) {
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
previousEpoch := helpers.PrevEpoch(state)
|
||||
|
||||
// Input epoch for matching the source attestations has to be within range
|
||||
// of current epoch & previous epoch.
|
||||
if epoch != currentEpoch && epoch != previousEpoch {
|
||||
return nil, fmt.Errorf("input epoch: %d != current epoch: %d or previous epoch: %d",
|
||||
epoch, currentEpoch, previousEpoch)
|
||||
func (s sortableIndices) Len() int { return len(s.indices) }
|
||||
func (s sortableIndices) Swap(i, j int) { s.indices[i], s.indices[j] = s.indices[j], s.indices[i] }
|
||||
func (s sortableIndices) Less(i, j int) bool {
|
||||
if s.validators[s.indices[i]].ActivationEligibilityEpoch == s.validators[s.indices[j]].ActivationEligibilityEpoch {
|
||||
return s.indices[i] < s.indices[j]
|
||||
}
|
||||
|
||||
// Decide if the source attestations are coming from current or previous epoch.
|
||||
var srcAtts []*pb.PendingAttestation
|
||||
if epoch == currentEpoch {
|
||||
srcAtts = state.CurrentEpochAttestations
|
||||
} else {
|
||||
srcAtts = state.PreviousEpochAttestations
|
||||
}
|
||||
targetRoot, err := helpers.BlockRoot(state, epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root for epoch %d", epoch)
|
||||
}
|
||||
|
||||
tgtAtts := make([]*pb.PendingAttestation, 0, len(srcAtts))
|
||||
headAtts := make([]*pb.PendingAttestation, 0, len(srcAtts))
|
||||
for _, srcAtt := range srcAtts {
|
||||
// If the target root matches attestation's target root,
|
||||
// then we know this attestation has correctly voted for target.
|
||||
if bytes.Equal(srcAtt.Data.Target.Root, targetRoot) {
|
||||
tgtAtts = append(tgtAtts, srcAtt)
|
||||
}
|
||||
headRoot, err := helpers.BlockRootAtSlot(state, srcAtt.Data.Slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root for slot %d", srcAtt.Data.Slot)
|
||||
}
|
||||
if bytes.Equal(srcAtt.Data.BeaconBlockRoot, headRoot) {
|
||||
headAtts = append(headAtts, srcAtt)
|
||||
}
|
||||
}
|
||||
|
||||
return &MatchedAttestations{
|
||||
source: srcAtts,
|
||||
Target: tgtAtts,
|
||||
head: headAtts,
|
||||
}, nil
|
||||
return s.validators[s.indices[i]].ActivationEligibilityEpoch < s.validators[s.indices[j]].ActivationEligibilityEpoch
|
||||
}
|
||||
|
||||
// AttestingBalance returns the total balance from all the attesting indices.
|
||||
@@ -103,7 +45,7 @@ func MatchAttestations(state *pb.BeaconState, epoch uint64) (*MatchedAttestation
|
||||
// Spec pseudocode definition:
|
||||
// def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei:
|
||||
// return get_total_balance(state, get_unslashed_attesting_indices(state, attestations))
|
||||
func AttestingBalance(state *pb.BeaconState, atts []*pb.PendingAttestation) (uint64, error) {
|
||||
func AttestingBalance(state *stateTrie.BeaconState, atts []*pb.PendingAttestation) (uint64, error) {
|
||||
indices, err := unslashedAttestingIndices(state, atts)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get attesting indices")
|
||||
@@ -111,183 +53,42 @@ func AttestingBalance(state *pb.BeaconState, atts []*pb.PendingAttestation) (uin
|
||||
return helpers.TotalBalance(state, indices), nil
|
||||
}
|
||||
|
||||
// ProcessJustificationAndFinalization processes justification and finalization during
|
||||
// epoch processing. This is where a beacon node can justify and finalize a new epoch.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_justification_and_finalization(state: BeaconState) -> None:
|
||||
// if get_current_epoch(state) <= GENESIS_EPOCH + 1:
|
||||
// return
|
||||
//
|
||||
// previous_epoch = get_previous_epoch(state)
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// old_previous_justified_checkpoint = state.previous_justified_checkpoint
|
||||
// old_current_justified_checkpoint = state.current_justified_checkpoint
|
||||
//
|
||||
// # Process justifications
|
||||
// state.previous_justified_checkpoint = state.current_justified_checkpoint
|
||||
// state.justification_bits[1:] = state.justification_bits[:-1]
|
||||
// state.justification_bits[0] = 0b0
|
||||
// matching_target_attestations = get_matching_target_attestations(state, previous_epoch) # Previous epoch
|
||||
// if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
|
||||
// state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
|
||||
// root=get_block_root(state, previous_epoch))
|
||||
// state.justification_bits[1] = 0b1
|
||||
// matching_target_attestations = get_matching_target_attestations(state, current_epoch) # Current epoch
|
||||
// if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
|
||||
// state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
|
||||
// root=get_block_root(state, current_epoch))
|
||||
// state.justification_bits[0] = 0b1
|
||||
//
|
||||
// # Process finalizations
|
||||
// bits = state.justification_bits
|
||||
// # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
|
||||
// if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch:
|
||||
// state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||
// # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
|
||||
// if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
// state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||
// # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
|
||||
// if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
// state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
// # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
|
||||
// if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
|
||||
// state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
func ProcessJustificationAndFinalization(state *pb.BeaconState, prevAttestedBal uint64, currAttestedBal uint64) (*pb.BeaconState, error) {
|
||||
if state.Slot <= helpers.StartSlot(2) {
|
||||
return state, nil
|
||||
}
|
||||
|
||||
prevEpoch := helpers.PrevEpoch(state)
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
oldPrevJustifiedCheckpoint := state.PreviousJustifiedCheckpoint
|
||||
oldCurrJustifiedCheckpoint := state.CurrentJustifiedCheckpoint
|
||||
|
||||
totalBal, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get total balance")
|
||||
}
|
||||
|
||||
// Process justifications
|
||||
state.PreviousJustifiedCheckpoint = state.CurrentJustifiedCheckpoint
|
||||
state.JustificationBits.Shift(1)
|
||||
|
||||
// Note: the spec refers to the bit index position starting at 1 instead of starting at zero.
|
||||
// We will use that paradigm here for consistency with the godoc spec definition.
|
||||
|
||||
// If 2/3 or more of total balance attested in the previous epoch.
|
||||
if 3*prevAttestedBal >= 2*totalBal {
|
||||
blockRoot, err := helpers.BlockRoot(state, prevEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root for previous epoch %d", prevEpoch)
|
||||
}
|
||||
state.CurrentJustifiedCheckpoint = ðpb.Checkpoint{Epoch: prevEpoch, Root: blockRoot}
|
||||
state.JustificationBits.SetBitAt(1, true)
|
||||
}
|
||||
|
||||
// If 2/3 or more of the total balance attested in the current epoch.
|
||||
if 3*currAttestedBal >= 2*totalBal {
|
||||
blockRoot, err := helpers.BlockRoot(state, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get block root for current epoch %d", prevEpoch)
|
||||
}
|
||||
state.CurrentJustifiedCheckpoint = ðpb.Checkpoint{Epoch: currentEpoch, Root: blockRoot}
|
||||
state.JustificationBits.SetBitAt(0, true)
|
||||
}
|
||||
|
||||
// Process finalization according to ETH2.0 specifications.
|
||||
justification := state.JustificationBits.Bytes()[0]
|
||||
|
||||
// 2nd/3rd/4th (0b1110) most recent epochs are justified, the 2nd using the 4th as source.
|
||||
if justification&0x0E == 0x0E && (oldPrevJustifiedCheckpoint.Epoch+3) == currentEpoch {
|
||||
state.FinalizedCheckpoint = oldPrevJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// 2nd/3rd (0b0110) most recent epochs are justified, the 2nd using the 3rd as source.
|
||||
if justification&0x06 == 0x06 && (oldPrevJustifiedCheckpoint.Epoch+2) == currentEpoch {
|
||||
state.FinalizedCheckpoint = oldPrevJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// 1st/2nd/3rd (0b0111) most recent epochs are justified, the 1st using the 3rd as source.
|
||||
if justification&0x07 == 0x07 && (oldCurrJustifiedCheckpoint.Epoch+2) == currentEpoch {
|
||||
state.FinalizedCheckpoint = oldCurrJustifiedCheckpoint
|
||||
}
|
||||
|
||||
// The 1st/2nd (0b0011) most recent epochs are justified, the 1st using the 2nd as source
|
||||
if justification&0x03 == 0x03 && (oldCurrJustifiedCheckpoint.Epoch+1) == currentEpoch {
|
||||
state.FinalizedCheckpoint = oldCurrJustifiedCheckpoint
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ProcessRewardsAndPenalties processes the rewards and penalties of individual validator.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_rewards_and_penalties(state: BeaconState) -> None:
|
||||
// if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
// return
|
||||
//
|
||||
// rewards1, penalties1 = get_attestation_deltas(state)
|
||||
// rewards2, penalties2 = get_crosslink_deltas(state)
|
||||
// for i in range(len(state.validator_registry)):
|
||||
// increase_balance(state, i, rewards1[i] + rewards2[i])
|
||||
// decrease_balance(state, i, penalties1[i] + penalties2[i])
|
||||
func ProcessRewardsAndPenalties(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
// Can't process rewards and penalties in genesis epoch.
|
||||
if helpers.CurrentEpoch(state) == 0 {
|
||||
return state, nil
|
||||
}
|
||||
attsRewards, attsPenalties, err := attestationDelta(state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attestation delta")
|
||||
}
|
||||
|
||||
for i := 0; i < len(state.Validators); i++ {
|
||||
state = helpers.IncreaseBalance(state, uint64(i), attsRewards[i])
|
||||
state = helpers.DecreaseBalance(state, uint64(i), attsPenalties[i])
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ProcessRegistryUpdates rotates validators in and out of active pool.
|
||||
// the amount to rotate is determined churn limit.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def process_registry_updates(state: BeaconState) -> None:
|
||||
// # Process activation eligibility and ejections
|
||||
// for index, validator in enumerate(state.validator_registry):
|
||||
// if (
|
||||
// validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
|
||||
// validator.effective_balance >= MAX_EFFECTIVE_BALANCE
|
||||
// ):
|
||||
// validator.activation_eligibility_epoch = get_current_epoch(state)
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if is_eligible_for_activation_queue(validator):
|
||||
// validator.activation_eligibility_epoch = get_current_epoch(state) + 1
|
||||
//
|
||||
// if is_active_validator(validator, get_current_epoch(state)) and validator.effective_balance <= EJECTION_BALANCE:
|
||||
// initiate_validator_exit(state, index)
|
||||
// initiate_validator_exit(state, ValidatorIndex(index))
|
||||
//
|
||||
// # Queue validators eligible for activation and not dequeued for activation prior to finalized epoch
|
||||
// # Queue validators eligible for activation and not yet dequeued for activation
|
||||
// activation_queue = sorted([
|
||||
// index for index, validator in enumerate(state.validator_registry) if
|
||||
// validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and
|
||||
// validator.activation_epoch >= get_delayed_activation_exit_epoch(state.finalized_epoch)
|
||||
// ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch)
|
||||
// # Dequeued validators for activation up to churn limit (without resetting activation epoch)
|
||||
// for index in activation_queue[:get_churn_limit(state)]:
|
||||
// validator = state.validator_registry[index]
|
||||
// if validator.activation_epoch == FAR_FUTURE_EPOCH:
|
||||
// validator.activation_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state))
|
||||
func ProcessRegistryUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
// index for index, validator in enumerate(state.validators)
|
||||
// if is_eligible_for_activation(state, validator)
|
||||
// # Order by the sequence of activation_eligibility_epoch setting and then index
|
||||
// ], key=lambda index: (state.validators[index].activation_eligibility_epoch, index))
|
||||
// # Dequeued validators for activation up to churn limit
|
||||
// for index in activation_queue[:get_validator_churn_limit(state)]:
|
||||
// validator = state.validators[index]
|
||||
// validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
func ProcessRegistryUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState, error) {
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
|
||||
vals := state.Validators()
|
||||
var err error
|
||||
for idx, validator := range state.Validators {
|
||||
for idx, validator := range vals {
|
||||
// Process the validators for activation eligibility.
|
||||
eligibleToActivate := validator.ActivationEligibilityEpoch == params.BeaconConfig().FarFutureEpoch
|
||||
properBalance := validator.EffectiveBalance >= params.BeaconConfig().MaxEffectiveBalance
|
||||
if eligibleToActivate && properBalance {
|
||||
validator.ActivationEligibilityEpoch = currentEpoch
|
||||
if helpers.IsEligibleForActivationQueue(validator) {
|
||||
validator.ActivationEligibilityEpoch = helpers.CurrentEpoch(state) + 1
|
||||
if err := state.UpdateValidatorAtIndex(uint64(idx), validator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Process the validators for ejection.
|
||||
isActive := helpers.IsActiveValidator(validator, currentEpoch)
|
||||
belowEjectionBalance := validator.EffectiveBalance <= params.BeaconConfig().EjectionBalance
|
||||
@@ -299,22 +100,24 @@ func ProcessRegistryUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Queue the validators whose eligible to activate and sort them by activation eligibility epoch number
|
||||
// Queue validators eligible for activation and not yet dequeued for activation.
|
||||
var activationQ []uint64
|
||||
for idx, validator := range state.Validators {
|
||||
eligibleActivated := validator.ActivationEligibilityEpoch != params.BeaconConfig().FarFutureEpoch
|
||||
canBeActive := validator.ActivationEpoch >= helpers.DelayedActivationExitEpoch(state.FinalizedCheckpoint.Epoch)
|
||||
if eligibleActivated && canBeActive {
|
||||
for idx, validator := range vals {
|
||||
if helpers.IsEligibleForActivation(state, validator) {
|
||||
activationQ = append(activationQ, uint64(idx))
|
||||
}
|
||||
}
|
||||
sort.Slice(activationQ, func(i, j int) bool {
|
||||
return state.Validators[i].ActivationEligibilityEpoch < state.Validators[j].ActivationEligibilityEpoch
|
||||
})
|
||||
|
||||
sort.Sort(sortableIndices{indices: activationQ, validators: vals})
|
||||
|
||||
// Only activate just enough validators according to the activation churn limit.
|
||||
limit := len(activationQ)
|
||||
churnLimit, err := helpers.ValidatorChurnLimit(state)
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(state, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
|
||||
churnLimit, err := helpers.ValidatorChurnLimit(activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get churn limit")
|
||||
}
|
||||
@@ -323,10 +126,15 @@ func ProcessRegistryUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
if int(churnLimit) < limit {
|
||||
limit = int(churnLimit)
|
||||
}
|
||||
|
||||
for _, index := range activationQ[:limit] {
|
||||
validator := state.Validators[index]
|
||||
if validator.ActivationEpoch == params.BeaconConfig().FarFutureEpoch {
|
||||
validator.ActivationEpoch = helpers.DelayedActivationExitEpoch(currentEpoch)
|
||||
validator, err := state.ValidatorAtIndex(index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validator.ActivationEpoch = helpers.ActivationExitEpoch(currentEpoch)
|
||||
if err := state.UpdateValidatorAtIndex(index, validator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return state, nil
|
||||
@@ -343,7 +151,7 @@ func ProcessRegistryUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
// penalty_numerator = validator.effective_balance // increment * min(sum(state.slashings) * 3, total_balance)
|
||||
// penalty = penalty_numerator // total_balance * increment
|
||||
// decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
func ProcessSlashings(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
func ProcessSlashings(state *stateTrie.BeaconState) (*stateTrie.BeaconState, error) {
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
totalBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
@@ -354,22 +162,28 @@ func ProcessSlashings(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
exitLength := params.BeaconConfig().EpochsPerSlashingsVector
|
||||
|
||||
// Compute the sum of state slashings
|
||||
slashings := state.Slashings()
|
||||
totalSlashing := uint64(0)
|
||||
for _, slashing := range state.Slashings {
|
||||
for _, slashing := range slashings {
|
||||
totalSlashing += slashing
|
||||
}
|
||||
|
||||
// Compute slashing for each validator.
|
||||
for index, validator := range state.Validators {
|
||||
correctEpoch := (currentEpoch + exitLength/2) == validator.WithdrawableEpoch
|
||||
if validator.Slashed && correctEpoch {
|
||||
// a callback is used here to apply the following actions to all validators
|
||||
// below equally.
|
||||
err = state.ApplyToEveryValidator(func(idx int, val *ethpb.Validator) (bool, error) {
|
||||
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch
|
||||
if val.Slashed && correctEpoch {
|
||||
minSlashing := mathutil.Min(totalSlashing*3, totalBalance)
|
||||
increment := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
penaltyNumerator := validator.EffectiveBalance / increment * minSlashing
|
||||
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
|
||||
penalty := penaltyNumerator / totalBalance * increment
|
||||
state = helpers.DecreaseBalance(state, uint64(index), penalty)
|
||||
if err := helpers.DecreaseBalance(state, uint64(idx), penalty); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return state, err
|
||||
}
|
||||
|
||||
@@ -409,54 +223,97 @@ func ProcessSlashings(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
// # Rotate current/previous epoch attestations
|
||||
// state.previous_epoch_attestations = state.current_epoch_attestations
|
||||
// state.current_epoch_attestations = []
|
||||
func ProcessFinalUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
func ProcessFinalUpdates(state *stateTrie.BeaconState) (*stateTrie.BeaconState, error) {
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
nextEpoch := currentEpoch + 1
|
||||
|
||||
// Reset ETH1 data votes.
|
||||
if (state.Slot+1)%params.BeaconConfig().SlotsPerEth1VotingPeriod == 0 {
|
||||
state.Eth1DataVotes = []*ethpb.Eth1Data{}
|
||||
if (state.Slot()+1)%params.BeaconConfig().SlotsPerEth1VotingPeriod == 0 {
|
||||
if err := state.SetEth1DataVotes([]*ethpb.Eth1Data{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
bals := state.Balances()
|
||||
// Update effective balances with hysteresis.
|
||||
for i, v := range state.Validators {
|
||||
balance := state.Balances[i]
|
||||
halfInc := params.BeaconConfig().EffectiveBalanceIncrement / 2
|
||||
if balance < v.EffectiveBalance || v.EffectiveBalance+3*halfInc < balance {
|
||||
v.EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance
|
||||
if v.EffectiveBalance > balance-balance%params.BeaconConfig().EffectiveBalanceIncrement {
|
||||
v.EffectiveBalance = balance - balance%params.BeaconConfig().EffectiveBalanceIncrement
|
||||
}
|
||||
validatorFunc := func(idx int, val *ethpb.Validator) (bool, error) {
|
||||
if val == nil {
|
||||
return false, fmt.Errorf("validator %d is nil in state", idx)
|
||||
}
|
||||
if idx >= len(bals) {
|
||||
return false, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
|
||||
}
|
||||
balance := bals[idx]
|
||||
halfInc := params.BeaconConfig().EffectiveBalanceIncrement / 2
|
||||
if balance < val.EffectiveBalance || val.EffectiveBalance+3*halfInc < balance {
|
||||
val.EffectiveBalance = params.BeaconConfig().MaxEffectiveBalance
|
||||
if val.EffectiveBalance > balance-balance%params.BeaconConfig().EffectiveBalanceIncrement {
|
||||
val.EffectiveBalance = balance - balance%params.BeaconConfig().EffectiveBalanceIncrement
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := state.ApplyToEveryValidator(validatorFunc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set total slashed balances.
|
||||
slashedExitLength := params.BeaconConfig().EpochsPerSlashingsVector
|
||||
state.Slashings[nextEpoch%slashedExitLength] = 0
|
||||
slashedEpoch := int(nextEpoch % slashedExitLength)
|
||||
slashings := state.Slashings()
|
||||
if len(slashings) != int(slashedExitLength) {
|
||||
return nil, fmt.Errorf(
|
||||
"state slashing length %d different than EpochsPerHistoricalVector %d",
|
||||
len(slashings),
|
||||
slashedExitLength,
|
||||
)
|
||||
}
|
||||
if err := state.UpdateSlashingsAtIndex(uint64(slashedEpoch) /* index */, 0 /* value */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set RANDAO mix.
|
||||
randaoMixLength := params.BeaconConfig().EpochsPerHistoricalVector
|
||||
mix := helpers.RandaoMix(state, currentEpoch)
|
||||
state.RandaoMixes[nextEpoch%randaoMixLength] = mix
|
||||
if state.RandaoMixesLength() != int(randaoMixLength) {
|
||||
return nil, fmt.Errorf(
|
||||
"state randao length %d different than EpochsPerHistoricalVector %d",
|
||||
state.RandaoMixesLength(),
|
||||
randaoMixLength,
|
||||
)
|
||||
}
|
||||
mix, err := helpers.RandaoMix(state, currentEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := state.UpdateRandaoMixesAtIndex(mix, nextEpoch%randaoMixLength); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set historical root accumulator.
|
||||
epochsPerHistoricalRoot := params.BeaconConfig().SlotsPerHistoricalRoot / params.BeaconConfig().SlotsPerEpoch
|
||||
if nextEpoch%epochsPerHistoricalRoot == 0 {
|
||||
historicalBatch := &pb.HistoricalBatch{
|
||||
BlockRoots: state.BlockRoots,
|
||||
StateRoots: state.StateRoots,
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
}
|
||||
batchRoot, err := ssz.HashTreeRoot(historicalBatch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not hash historical batch")
|
||||
}
|
||||
state.HistoricalRoots = append(state.HistoricalRoots, batchRoot[:])
|
||||
if err := state.AppendHistoricalRoots(batchRoot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Rotate current and previous epoch attestations.
|
||||
state.PreviousEpochAttestations = state.CurrentEpochAttestations
|
||||
state.CurrentEpochAttestations = []*pb.PendingAttestation{}
|
||||
|
||||
if err := state.SetPreviousEpochAttestations(state.CurrentEpochAttestations()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := state.SetCurrentEpochAttestations([]*pb.PendingAttestation{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
@@ -470,11 +327,16 @@ func ProcessFinalUpdates(state *pb.BeaconState) (*pb.BeaconState, error) {
|
||||
// for a in attestations:
|
||||
// output = output.union(get_attesting_indices(state, a.data, a.aggregation_bits))
|
||||
// return set(filter(lambda index: not state.validators[index].slashed, output))
|
||||
func unslashedAttestingIndices(state *pb.BeaconState, atts []*pb.PendingAttestation) ([]uint64, error) {
|
||||
func unslashedAttestingIndices(state *stateTrie.BeaconState, atts []*pb.PendingAttestation) ([]uint64, error) {
|
||||
var setIndices []uint64
|
||||
seen := make(map[uint64]bool)
|
||||
|
||||
for _, att := range atts {
|
||||
attestingIndices, err := helpers.AttestingIndices(state, att.Data, att.AggregationBits)
|
||||
committee, err := helpers.BeaconCommitteeFromState(state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attestingIndices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attester indices")
|
||||
}
|
||||
@@ -492,7 +354,7 @@ func unslashedAttestingIndices(state *pb.BeaconState, atts []*pb.PendingAttestat
|
||||
sort.Slice(setIndices, func(i, j int) bool { return setIndices[i] < setIndices[j] })
|
||||
// Remove the slashed validator indices.
|
||||
for i := 0; i < len(setIndices); i++ {
|
||||
if state.Validators[setIndices[i]].Slashed {
|
||||
if v, _ := state.ValidatorAtIndex(setIndices[i]); v != nil && v.Slashed {
|
||||
setIndices = append(setIndices[:i], setIndices[i+1:]...)
|
||||
}
|
||||
}
|
||||
@@ -511,193 +373,17 @@ func unslashedAttestingIndices(state *pb.BeaconState, atts []*pb.PendingAttestat
|
||||
// total_balance = get_total_active_balance(state)
|
||||
// effective_balance = state.validator_registry[index].effective_balance
|
||||
// return effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH
|
||||
func BaseReward(state *pb.BeaconState, index uint64) (uint64, error) {
|
||||
func BaseReward(state *stateTrie.BeaconState, index uint64) (uint64, error) {
|
||||
totalBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not calculate active balance")
|
||||
}
|
||||
effectiveBalance := state.Validators[index].EffectiveBalance
|
||||
val, err := state.ValidatorAtIndex(index)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
effectiveBalance := val.EffectiveBalance
|
||||
baseReward := effectiveBalance * params.BeaconConfig().BaseRewardFactor /
|
||||
mathutil.IntegerSquareRoot(totalBalance) / params.BeaconConfig().BaseRewardsPerEpoch
|
||||
return baseReward, nil
|
||||
}
|
||||
|
||||
// attestationDelta calculates the rewards and penalties of individual
|
||||
// validator for voting the correct FFG source, FFG target, and head. It
|
||||
// also calculates proposer delay inclusion and inactivity rewards
|
||||
// and penalties. Individual rewards and penalties are returned in list.
|
||||
//
|
||||
// Note: we calculated adjusted quotient outside of base reward because it's too inefficient
|
||||
// to repeat the same calculation for every validator versus just doing it once.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
// previous_epoch = get_previous_epoch(state)
|
||||
// total_balance = get_total_active_balance(state)
|
||||
// rewards = [Gwei(0) for _ in range(len(state.validators))]
|
||||
// penalties = [Gwei(0) for _ in range(len(state.validators))]
|
||||
// eligible_validator_indices = [
|
||||
// ValidatorIndex(index) for index, v in enumerate(state.validators)
|
||||
// if is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch)
|
||||
// ]
|
||||
//
|
||||
// # Micro-incentives for matching FFG source, FFG target, and head
|
||||
// matching_source_attestations = get_matching_source_attestations(state, previous_epoch)
|
||||
// matching_target_attestations = get_matching_target_attestations(state, previous_epoch)
|
||||
// matching_head_attestations = get_matching_head_attestations(state, previous_epoch)
|
||||
// for attestations in (matching_source_attestations, matching_target_attestations, matching_head_attestations):
|
||||
// unslashed_attesting_indices = get_unslashed_attesting_indices(state, attestations)
|
||||
// attesting_balance = get_total_balance(state, unslashed_attesting_indices)
|
||||
// for index in eligible_validator_indices:
|
||||
// if index in unslashed_attesting_indices:
|
||||
// rewards[index] += get_base_reward(state, index) * attesting_balance // total_balance
|
||||
// else:
|
||||
// penalties[index] += get_base_reward(state, index)
|
||||
//
|
||||
// # Proposer and inclusion delay micro-rewards
|
||||
// for index in get_unslashed_attesting_indices(state, matching_source_attestations):
|
||||
// index = ValidatorIndex(index)
|
||||
// attestation = min([
|
||||
// a for a in matching_source_attestations
|
||||
// if index in get_attesting_indices(state, a.data, a.aggregation_bits)
|
||||
// ], key=lambda a: a.inclusion_delay)
|
||||
// proposer_reward = Gwei(get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT)
|
||||
// rewards[attestation.proposer_index] += proposer_reward
|
||||
// max_attester_reward = get_base_reward(state, index) - proposer_reward
|
||||
// rewards[index] += Gwei(max_attester_reward // attestation.inclusion_delay)
|
||||
//
|
||||
// # Inactivity penalty
|
||||
// finality_delay = previous_epoch - state.finalized_checkpoint.epoch
|
||||
// if finality_delay > MIN_EPOCHS_TO_INACTIVITY_PENALTY:
|
||||
// matching_target_attesting_indices = get_unslashed_attesting_indices(state, matching_target_attestations)
|
||||
// for index in eligible_validator_indices:
|
||||
// index = ValidatorIndex(index)
|
||||
// penalties[index] += Gwei(BASE_REWARDS_PER_EPOCH * get_base_reward(state, index))
|
||||
// if index not in matching_target_attesting_indices:
|
||||
// penalties[index] += Gwei(
|
||||
// state.validators[index].effective_balance * finality_delay // INACTIVITY_PENALTY_QUOTIENT
|
||||
// )
|
||||
//
|
||||
// return rewards, penalties
|
||||
func attestationDelta(state *pb.BeaconState) ([]uint64, []uint64, error) {
|
||||
prevEpoch := helpers.PrevEpoch(state)
|
||||
totalBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get total active balance")
|
||||
}
|
||||
|
||||
rewards := make([]uint64, len(state.Validators))
|
||||
penalties := make([]uint64, len(state.Validators))
|
||||
|
||||
// Filter out the list of eligible validator indices. The eligible validator
|
||||
// has to be active or slashed but before withdrawn.
|
||||
var eligible []uint64
|
||||
for i, v := range state.Validators {
|
||||
isActive := helpers.IsActiveValidator(v, prevEpoch)
|
||||
isSlashed := v.Slashed && (prevEpoch+1 < v.WithdrawableEpoch)
|
||||
if isActive || isSlashed {
|
||||
eligible = append(eligible, uint64(i))
|
||||
}
|
||||
}
|
||||
|
||||
// Apply rewards and penalties for voting correct source target and head.
|
||||
// Construct a attestations list contains source, target and head attestations.
|
||||
atts, err := MatchAttestations(state, prevEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get source, target and head attestations")
|
||||
}
|
||||
var attsPackage [][]*pb.PendingAttestation
|
||||
attsPackage = append(attsPackage, atts.source)
|
||||
attsPackage = append(attsPackage, atts.Target)
|
||||
attsPackage = append(attsPackage, atts.head)
|
||||
|
||||
// Cache the validators who voted correctly for source in a map
|
||||
// to calculate earliest attestation rewards later.
|
||||
attestersVotedSource := make(map[uint64]*pb.PendingAttestation)
|
||||
// Compute rewards / penalties for each attestation in the list and update
|
||||
// the rewards and penalties lists.
|
||||
for i, matchAtt := range attsPackage {
|
||||
indices, err := unslashedAttestingIndices(state, matchAtt)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get attestation indices")
|
||||
}
|
||||
|
||||
attested := make(map[uint64]bool)
|
||||
// Construct a map to look up validators that voted for source, target or head.
|
||||
for _, index := range indices {
|
||||
if i == 0 {
|
||||
attestersVotedSource[index] = &pb.PendingAttestation{InclusionDelay: params.BeaconConfig().FarFutureEpoch}
|
||||
}
|
||||
attested[index] = true
|
||||
}
|
||||
attestedBalance := helpers.TotalBalance(state, indices)
|
||||
|
||||
// Update rewards and penalties to each eligible validator index.
|
||||
for _, index := range eligible {
|
||||
base, err := BaseReward(state, index)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get base reward")
|
||||
}
|
||||
if _, ok := attested[index]; ok {
|
||||
rewards[index] += base * attestedBalance / totalBalance
|
||||
} else {
|
||||
penalties[index] += base
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For every index, filter the matching source attestation that correspond to the index,
|
||||
// sort by inclusion delay and get the one that was included on chain first.
|
||||
for _, att := range atts.source {
|
||||
indices, err := helpers.AttestingIndices(state, att.Data, att.AggregationBits)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get attester indices")
|
||||
}
|
||||
for _, i := range indices {
|
||||
if _, ok := attestersVotedSource[i]; ok {
|
||||
if attestersVotedSource[i].InclusionDelay > att.InclusionDelay {
|
||||
attestersVotedSource[i] = att
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i, a := range attestersVotedSource {
|
||||
baseReward, err := BaseReward(state, i)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get proposer reward")
|
||||
}
|
||||
proposerReward := baseReward / params.BeaconConfig().ProposerRewardQuotient
|
||||
rewards[a.ProposerIndex] += proposerReward
|
||||
attesterReward := baseReward - proposerReward
|
||||
rewards[i] += attesterReward / a.InclusionDelay
|
||||
}
|
||||
|
||||
// Apply penalties for quadratic leaks.
|
||||
// When epoch since finality exceeds inactivity penalty constant, the penalty gets increased
|
||||
// based on the finality delay.
|
||||
finalityDelay := prevEpoch - state.FinalizedCheckpoint.Epoch
|
||||
if finalityDelay > params.BeaconConfig().MinEpochsToInactivityPenalty {
|
||||
targetIndices, err := unslashedAttestingIndices(state, atts.Target)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get attestation indices")
|
||||
}
|
||||
attestedTarget := make(map[uint64]bool)
|
||||
for _, index := range targetIndices {
|
||||
attestedTarget[index] = true
|
||||
}
|
||||
for _, index := range eligible {
|
||||
base, err := BaseReward(state, index)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get base reward")
|
||||
}
|
||||
penalties[index] += params.BeaconConfig().BaseRewardsPerEpoch * base
|
||||
if _, ok := attestedTarget[index]; !ok {
|
||||
penalties[index] += state.Validators[index].EffectiveBalance * finalityDelay /
|
||||
params.BeaconConfig().InactivityPenaltyQuotient
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rewards, penalties, nil
|
||||
}
|
||||
|
||||
23
beacon-chain/core/epoch/epoch_processing_fuzz_test.go
Normal file
23
beacon-chain/core/epoch/epoch_processing_fuzz_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package epoch
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
)
|
||||
|
||||
func TestFuzzFinalUpdates_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
base := ðereum_beacon_p2p_v1.BeaconState{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(base)
|
||||
s, err := beaconstate.InitializeFromProtoUnsafe(base)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, _ = ProcessFinalUpdates(s)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,42 +0,0 @@
|
||||
package epoch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
)
|
||||
|
||||
// ComputeValidatorParticipation by matching validator attestations from the previous epoch,
|
||||
// computing the attesting balance, and how much attested compared to the total balance.
|
||||
func ComputeValidatorParticipation(state *pb.BeaconState, epoch uint64) (*ethpb.ValidatorParticipation, error) {
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
previousEpoch := helpers.PrevEpoch(state)
|
||||
if epoch != currentEpoch && epoch != previousEpoch {
|
||||
return nil, fmt.Errorf(
|
||||
"requested epoch is not previous epoch %d or current epoch %d, requested %d",
|
||||
previousEpoch,
|
||||
currentEpoch,
|
||||
epoch,
|
||||
)
|
||||
}
|
||||
atts, err := MatchAttestations(state, epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve head attestations")
|
||||
}
|
||||
attestedBalances, err := AttestingBalance(state, atts.Target)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve attested balances")
|
||||
}
|
||||
totalBalances, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve total balances")
|
||||
}
|
||||
return ðpb.ValidatorParticipation{
|
||||
GlobalParticipationRate: float32(attestedBalances) / float32(totalBalances),
|
||||
VotedEther: attestedBalances,
|
||||
EligibleEther: totalBalances,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,166 +0,0 @@
|
||||
package epoch_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func TestComputeValidatorParticipation_PreviousEpoch(t *testing.T) {
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
e := uint64(1)
|
||||
attestedBalance := uint64(20) * params.BeaconConfig().MaxEffectiveBalance
|
||||
validatorCount := uint64(100)
|
||||
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
balances := make([]uint64, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
blockRoots := make([][]byte, 256)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
slot := bytesutil.Bytes32(uint64(i))
|
||||
blockRoots[i] = slot
|
||||
}
|
||||
target := ðpb.Checkpoint{
|
||||
Epoch: e,
|
||||
Root: blockRoots[0],
|
||||
}
|
||||
|
||||
atts := []*pb.PendingAttestation{
|
||||
{
|
||||
Data: ðpb.AttestationData{Target: target, Slot: 0},
|
||||
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
{
|
||||
Data: ðpb.AttestationData{Target: target, Slot: 1},
|
||||
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
{
|
||||
Data: ðpb.AttestationData{Target: target, Slot: 2},
|
||||
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
{
|
||||
Data: ðpb.AttestationData{Target: target, Slot: 3},
|
||||
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
{
|
||||
Data: ðpb.AttestationData{Target: target, Slot: 4},
|
||||
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{
|
||||
Slot: e*params.BeaconConfig().SlotsPerEpoch + 1,
|
||||
Validators: validators,
|
||||
Balances: balances,
|
||||
BlockRoots: blockRoots,
|
||||
Slashings: []uint64{0, 1e9, 1e9},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
PreviousEpochAttestations: atts,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
JustificationBits: bitfield.Bitvector4{0x00},
|
||||
PreviousJustifiedCheckpoint: target,
|
||||
}
|
||||
|
||||
res, err := epoch.ComputeValidatorParticipation(s, e-1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wanted := ðpb.ValidatorParticipation{
|
||||
VotedEther: attestedBalance,
|
||||
EligibleEther: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
|
||||
GlobalParticipationRate: float32(attestedBalance) / float32(validatorCount*params.BeaconConfig().MaxEffectiveBalance),
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(res, wanted) {
|
||||
t.Errorf("Incorrect validator participation, wanted %v received %v", wanted, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeValidatorParticipation_CurrentEpoch(t *testing.T) {
|
||||
params.OverrideBeaconConfig(params.MinimalSpecConfig())
|
||||
e := uint64(1)
|
||||
attestedBalance := uint64(16) * params.BeaconConfig().MaxEffectiveBalance
|
||||
validatorCount := uint64(100)
|
||||
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
balances := make([]uint64, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
|
||||
slot := e*params.BeaconConfig().SlotsPerEpoch + 4
|
||||
blockRoots := make([][]byte, 256)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
slot := bytesutil.Bytes32(uint64(i))
|
||||
blockRoots[i] = slot
|
||||
}
|
||||
target := ðpb.Checkpoint{
|
||||
Epoch: e,
|
||||
Root: blockRoots[params.BeaconConfig().SlotsPerEpoch],
|
||||
}
|
||||
|
||||
atts := []*pb.PendingAttestation{
|
||||
{
|
||||
Data: ðpb.AttestationData{Target: target, Slot: slot - 4},
|
||||
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
{
|
||||
Data: ðpb.AttestationData{Target: target, Slot: slot - 3},
|
||||
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
{
|
||||
Data: ðpb.AttestationData{Target: target, Slot: slot - 2},
|
||||
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
{
|
||||
Data: ðpb.AttestationData{Target: target, Slot: slot - 1},
|
||||
AggregationBits: []byte{0xFF, 0xFF, 0xFF, 0xFF},
|
||||
},
|
||||
}
|
||||
|
||||
s := &pb.BeaconState{
|
||||
Slot: slot,
|
||||
Validators: validators,
|
||||
Balances: balances,
|
||||
BlockRoots: blockRoots,
|
||||
Slashings: []uint64{0, 1e9, 1e9},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CurrentEpochAttestations: atts,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{},
|
||||
JustificationBits: bitfield.Bitvector4{0x00},
|
||||
CurrentJustifiedCheckpoint: target,
|
||||
}
|
||||
|
||||
res, err := epoch.ComputeValidatorParticipation(s, e)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wanted := ðpb.ValidatorParticipation{
|
||||
VotedEther: attestedBalance,
|
||||
EligibleEther: validatorCount * params.BeaconConfig().MaxEffectiveBalance,
|
||||
GlobalParticipationRate: float32(attestedBalance) / float32(validatorCount*params.BeaconConfig().MaxEffectiveBalance),
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(res, wanted) {
|
||||
t.Errorf("Incorrect validator participation, wanted %v received %v", wanted, res)
|
||||
}
|
||||
}
|
||||
@@ -14,12 +14,14 @@ go_library(
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -37,12 +39,13 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/eth/v1alpha1:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user