mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
827 Commits
v1.1.0
...
v2.0.0-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
161a13ac09 | ||
|
|
cb631360e9 | ||
|
|
2ba29a3cfc | ||
|
|
150640305a | ||
|
|
11a1f681e0 | ||
|
|
7dadc780b8 | ||
|
|
d2f74615ab | ||
|
|
6308678a89 | ||
|
|
93a4ebc662 | ||
|
|
4109d4a868 | ||
|
|
568797d53d | ||
|
|
18e1d43360 | ||
|
|
8f0008c45a | ||
|
|
f49637b09b | ||
|
|
3daa2d8a18 | ||
|
|
61c24c3870 | ||
|
|
23e2d62c0c | ||
|
|
8df96426ef | ||
|
|
989daf267a | ||
|
|
495e92ce9f | ||
|
|
514d4ef8f5 | ||
|
|
d020e1b2d9 | ||
|
|
ee5d75732d | ||
|
|
fff0208207 | ||
|
|
c47923d20e | ||
|
|
5230af0e0c | ||
|
|
34391fa780 | ||
|
|
31a78ab22a | ||
|
|
3e71997290 | ||
|
|
77de467250 | ||
|
|
9935ca3733 | ||
|
|
5b37deb1a6 | ||
|
|
01a72a9df2 | ||
|
|
f98505bfbe | ||
|
|
6340e58f36 | ||
|
|
ac3c544e29 | ||
|
|
25f2ca4159 | ||
|
|
ae21665b0a | ||
|
|
408392aa06 | ||
|
|
562e128251 | ||
|
|
4f86714c2a | ||
|
|
1e4063e69c | ||
|
|
e8e62856c8 | ||
|
|
ed68dd757f | ||
|
|
fd920bb786 | ||
|
|
416840036a | ||
|
|
1374b6dd4f | ||
|
|
dcc1f7c0ec | ||
|
|
2d9ae57378 | ||
|
|
c6e7ab79ef | ||
|
|
006a2620c4 | ||
|
|
741e136b22 | ||
|
|
1ccbe79209 | ||
|
|
57c2d86da1 | ||
|
|
ffd7579476 | ||
|
|
97f6143a43 | ||
|
|
fc44ecbb16 | ||
|
|
5325558f03 | ||
|
|
a6abfa5dd8 | ||
|
|
2faaafb159 | ||
|
|
ddfa269e4f | ||
|
|
cb9c1ad0fe | ||
|
|
279cc16c88 | ||
|
|
4b90c10cb2 | ||
|
|
83a9079700 | ||
|
|
7c45c5f8cd | ||
|
|
90038a6001 | ||
|
|
b586d3784b | ||
|
|
bc551b7e30 | ||
|
|
d7679d2e71 | ||
|
|
ddbac85ec4 | ||
|
|
ee28dc3d4f | ||
|
|
d210dd7691 | ||
|
|
d6416b29b2 | ||
|
|
dc71f35f4f | ||
|
|
4beb352e6f | ||
|
|
29b851a2b7 | ||
|
|
ebf3897017 | ||
|
|
cc790ceb2e | ||
|
|
e875fe3dfc | ||
|
|
265b5feabf | ||
|
|
30b2adc5d6 | ||
|
|
924fe20dcd | ||
|
|
2d41f0885c | ||
|
|
019f0257c2 | ||
|
|
f319535af5 | ||
|
|
c45fe5cc1c | ||
|
|
1f48accb0e | ||
|
|
b10964514a | ||
|
|
4b9fb1cd1b | ||
|
|
02966e64d8 | ||
|
|
43a24e3d8b | ||
|
|
59ed552c64 | ||
|
|
95e07963fb | ||
|
|
2cd2bc87d0 | ||
|
|
69ed46af0f | ||
|
|
28e472aa5f | ||
|
|
46b22bb649 | ||
|
|
44375a3d67 | ||
|
|
eb690d3737 | ||
|
|
dad03ade77 | ||
|
|
6ee0a7e811 | ||
|
|
0ea4e9a71c | ||
|
|
da5f37fc71 | ||
|
|
59e8523272 | ||
|
|
8d4b92962b | ||
|
|
8f88d574f5 | ||
|
|
921acc300b | ||
|
|
516401537a | ||
|
|
572498f954 | ||
|
|
20a1a58860 | ||
|
|
822522a3af | ||
|
|
4d02329cd5 | ||
|
|
031830baa4 | ||
|
|
2cc9fc9e0e | ||
|
|
1284496648 | ||
|
|
c1f841d6d4 | ||
|
|
7d5f30b01e | ||
|
|
89941d4be2 | ||
|
|
74df479bd3 | ||
|
|
d22552944d | ||
|
|
2903275e7e | ||
|
|
3d6fad3121 | ||
|
|
ffbb722777 | ||
|
|
8b10adc92a | ||
|
|
0d60863bed | ||
|
|
0208d9b328 | ||
|
|
e150e29710 | ||
|
|
45b6a80a30 | ||
|
|
cd4bb6a9c8 | ||
|
|
114a14a4b6 | ||
|
|
a49c0f19ae | ||
|
|
362212b5ed | ||
|
|
5c96a2713d | ||
|
|
6457ec17bf | ||
|
|
d3475a563d | ||
|
|
ab101e8a4a | ||
|
|
5442d0f486 | ||
|
|
0ee203fc2e | ||
|
|
63e0a4de84 | ||
|
|
cac1e94f04 | ||
|
|
bdfdba1e23 | ||
|
|
e2a3dc13fb | ||
|
|
af2801f215 | ||
|
|
fa4a4225a7 | ||
|
|
005ce7e238 | ||
|
|
25bc3f2aeb | ||
|
|
67c3ae0117 | ||
|
|
6368da2716 | ||
|
|
3764b7dca3 | ||
|
|
eac542a8ac | ||
|
|
9f36116c2f | ||
|
|
d874b9140f | ||
|
|
dfd740414f | ||
|
|
72cfe50eff | ||
|
|
61acf6f1d1 | ||
|
|
0d8ceb18b0 | ||
|
|
5b7b9f9ac9 | ||
|
|
8e503d0980 | ||
|
|
4966300c96 | ||
|
|
bdb09ca9ea | ||
|
|
b28d65dc34 | ||
|
|
643e20c50a | ||
|
|
bb68c591f2 | ||
|
|
f8355bb241 | ||
|
|
8122da6c97 | ||
|
|
1936f991eb | ||
|
|
49440c90ba | ||
|
|
47ee6fa17e | ||
|
|
de907fc362 | ||
|
|
a1c0bee397 | ||
|
|
b42465a7dd | ||
|
|
8fe22422a6 | ||
|
|
f516e71167 | ||
|
|
ede85c21e2 | ||
|
|
a9f2170657 | ||
|
|
3a528147a2 | ||
|
|
c8cffbbf02 | ||
|
|
1f102c256d | ||
|
|
e3492698f8 | ||
|
|
5256751e8b | ||
|
|
225f9a74f6 | ||
|
|
d77616f705 | ||
|
|
a217d71d08 | ||
|
|
efa1f29311 | ||
|
|
136b157d00 | ||
|
|
c1280867ed | ||
|
|
0dd7a8b078 | ||
|
|
41ab27402f | ||
|
|
9668ab0fea | ||
|
|
7a9c717ae0 | ||
|
|
a5998fab2f | ||
|
|
8df6854a74 | ||
|
|
3de168b806 | ||
|
|
bfdaf8a3f5 | ||
|
|
38787353a2 | ||
|
|
170e2e115b | ||
|
|
be85b7f909 | ||
|
|
ad048a8750 | ||
|
|
90ea158301 | ||
|
|
ef9eff29b8 | ||
|
|
4cce4f1ee5 | ||
|
|
ce10be9a16 | ||
|
|
4f0b00894b | ||
|
|
57fac9b74f | ||
|
|
395feeb35d | ||
|
|
7bc22e4ddf | ||
|
|
016beb51c0 | ||
|
|
456b4d463e | ||
|
|
4d33068496 | ||
|
|
6e9faa3618 | ||
|
|
fd42cb712f | ||
|
|
c5b68b4f03 | ||
|
|
239d904d27 | ||
|
|
461be99fac | ||
|
|
9572add171 | ||
|
|
17cf2e5377 | ||
|
|
e4388c7fce | ||
|
|
47c28e6cd6 | ||
|
|
0c1fc65604 | ||
|
|
5c36d59672 | ||
|
|
7576442ef0 | ||
|
|
d130bf0035 | ||
|
|
0d818a5709 | ||
|
|
560fe69425 | ||
|
|
012d279663 | ||
|
|
fcd4938ec0 | ||
|
|
4db0dfc4f1 | ||
|
|
cf4ff3d6f9 | ||
|
|
9145310647 | ||
|
|
21c79f4dfa | ||
|
|
98f7f82b2f | ||
|
|
f16b9859e7 | ||
|
|
a372e30c8f | ||
|
|
415973fca3 | ||
|
|
2d9915d2d9 | ||
|
|
c6bd7a2f83 | ||
|
|
7129af5ccf | ||
|
|
f700808c8b | ||
|
|
1ea1c69174 | ||
|
|
54772596e0 | ||
|
|
a556cf27c4 | ||
|
|
5b5c8f4d3f | ||
|
|
245dc23cde | ||
|
|
837cd4eb8f | ||
|
|
8c8f1bb9c1 | ||
|
|
a8c49c50ad | ||
|
|
14d0d9195f | ||
|
|
8db0c9a0e6 | ||
|
|
938a038c42 | ||
|
|
30e93f5763 | ||
|
|
be82c8714f | ||
|
|
c0076cc7a2 | ||
|
|
a8363405f8 | ||
|
|
2d10bcf179 | ||
|
|
526596a679 | ||
|
|
5864795ca5 | ||
|
|
b7919b3115 | ||
|
|
715aa408e3 | ||
|
|
ceb1ec451a | ||
|
|
b35c3ab2e7 | ||
|
|
c8685d256c | ||
|
|
0fb43aec34 | ||
|
|
9addb5f7c3 | ||
|
|
33f7582d25 | ||
|
|
6dadb80cc4 | ||
|
|
ae140073e7 | ||
|
|
b667d30d3f | ||
|
|
e7bdf35721 | ||
|
|
c780301096 | ||
|
|
e1d543a77b | ||
|
|
2ffe8336fc | ||
|
|
11d9e7da9b | ||
|
|
53e02b1601 | ||
|
|
3a35a953bf | ||
|
|
8d1c9fe1e2 | ||
|
|
4229b466ae | ||
|
|
412ddbb29e | ||
|
|
8a7010f5aa | ||
|
|
40a96bc2b6 | ||
|
|
1beb0071b5 | ||
|
|
2a0c4e0d5f | ||
|
|
dad205bd04 | ||
|
|
15bfcf8ff6 | ||
|
|
15704053e1 | ||
|
|
5be1ccd3cf | ||
|
|
97e6eb14e2 | ||
|
|
6f126c92c0 | ||
|
|
223d5309a0 | ||
|
|
31d2482f9c | ||
|
|
3ef0c5d6e5 | ||
|
|
187a1ca53d | ||
|
|
17d7fc492e | ||
|
|
f029fdd44a | ||
|
|
44d266313e | ||
|
|
1a9a46d042 | ||
|
|
71c8c75bc3 | ||
|
|
b22935a724 | ||
|
|
3ff5b95945 | ||
|
|
e6bd5f31b1 | ||
|
|
9a4ab41761 | ||
|
|
7f0749c398 | ||
|
|
d73ac71ee6 | ||
|
|
6ba741f251 | ||
|
|
4ebb008bc9 | ||
|
|
a9ee3ee06a | ||
|
|
7c23d02c3e | ||
|
|
9dc3dd04c7 | ||
|
|
72886986ea | ||
|
|
1d3a9983cc | ||
|
|
cca4a8c821 | ||
|
|
6c8fd745a4 | ||
|
|
466546bb40 | ||
|
|
1ff4605398 | ||
|
|
2ae6452cb5 | ||
|
|
00907fd7d1 | ||
|
|
1edf1f4c87 | ||
|
|
877cb9f6e6 | ||
|
|
5d65ace970 | ||
|
|
2df024afc6 | ||
|
|
ca7b312761 | ||
|
|
d6bd619429 | ||
|
|
24d17a536c | ||
|
|
b23f63a064 | ||
|
|
4fb3e05124 | ||
|
|
cd3a2e87fe | ||
|
|
0347b4b4af | ||
|
|
0cfc16ec3b | ||
|
|
ea34af4abf | ||
|
|
0f2e6fb8d5 | ||
|
|
43623e1089 | ||
|
|
940ce0c292 | ||
|
|
71f1f19334 | ||
|
|
f75548ad1e | ||
|
|
442f8d1d3c | ||
|
|
349f832bd3 | ||
|
|
a860648960 | ||
|
|
dd0ae1bbef | ||
|
|
9eb3ff6bdf | ||
|
|
b114247836 | ||
|
|
8f5c0833ff | ||
|
|
43655e6c04 | ||
|
|
ff99a2ed40 | ||
|
|
860716666b | ||
|
|
1e99ec30cb | ||
|
|
1f6a031630 | ||
|
|
942472fc7e | ||
|
|
228369e95f | ||
|
|
9ee086feaf | ||
|
|
6e42f1bc5d | ||
|
|
4ec0d60911 | ||
|
|
2583dc1279 | ||
|
|
a747d151ba | ||
|
|
4f3c17cf69 | ||
|
|
a4be7c4a46 | ||
|
|
ae7e2764e0 | ||
|
|
fbed11b380 | ||
|
|
4d1b5f42af | ||
|
|
e953804239 | ||
|
|
3c0a9455a2 | ||
|
|
7d4418e2fe | ||
|
|
9dc4c49fbd | ||
|
|
6971245e05 | ||
|
|
12403d249f | ||
|
|
19ff25f171 | ||
|
|
4551bb47fc | ||
|
|
77bf4bd9f0 | ||
|
|
dfd2cc751c | ||
|
|
fb08014146 | ||
|
|
6981b16ecd | ||
|
|
71fd747dca | ||
|
|
7a6ce27497 | ||
|
|
6eb006175a | ||
|
|
8f90e91e99 | ||
|
|
644d5bbb1a | ||
|
|
0d8dd8d280 | ||
|
|
bb6f00fd87 | ||
|
|
f2768c1414 | ||
|
|
a81c4ed6a0 | ||
|
|
26c4e7b527 | ||
|
|
aa4c21d6df | ||
|
|
833ac0092c | ||
|
|
2ee5bd12cf | ||
|
|
d4aa4bc258 | ||
|
|
1ea042dd6e | ||
|
|
dd10a3ab5d | ||
|
|
8d428606a2 | ||
|
|
8433a313f0 | ||
|
|
693fdf87bb | ||
|
|
5aac06f04e | ||
|
|
1d2fc60ba7 | ||
|
|
2b6c7e7b37 | ||
|
|
fc898d541f | ||
|
|
91bd477f4f | ||
|
|
ef20f2da7e | ||
|
|
8a6e2a33a2 | ||
|
|
eb1d122aec | ||
|
|
638e76cbd7 | ||
|
|
bb8625ede5 | ||
|
|
40b4079924 | ||
|
|
6bb3cc45ef | ||
|
|
e97f6cfdbf | ||
|
|
d5f4385525 | ||
|
|
8094c1c4c5 | ||
|
|
276d03553c | ||
|
|
caf9bdbc6f | ||
|
|
7b55213e6d | ||
|
|
d5ad15ed80 | ||
|
|
2e322bdf6d | ||
|
|
5fe98ab260 | ||
|
|
a958dd246b | ||
|
|
aa0fb058c6 | ||
|
|
0cbdce6bd6 | ||
|
|
5ae6e83c7f | ||
|
|
180b8447f7 | ||
|
|
503ec126e1 | ||
|
|
76cdbaca22 | ||
|
|
09d7efe964 | ||
|
|
7dc3d7fc60 | ||
|
|
59897b78ff | ||
|
|
f4d6e91e72 | ||
|
|
213369f5d0 | ||
|
|
8aa909de2d | ||
|
|
36e9edd654 | ||
|
|
9db4eba72b | ||
|
|
fe440bc0eb | ||
|
|
f68e0846ba | ||
|
|
f2a2d5d43c | ||
|
|
f2fe1f7683 | ||
|
|
35dc8c31fa | ||
|
|
519e1e4f4a | ||
|
|
2f10b1c7b1 | ||
|
|
e254e8edcc | ||
|
|
e6a1d5b1b9 | ||
|
|
bc27a73600 | ||
|
|
4ef5c9ad15 | ||
|
|
ddab82e52a | ||
|
|
8fe26e7c84 | ||
|
|
f97c7ee1da | ||
|
|
ad4fadb36a | ||
|
|
255354f27e | ||
|
|
050b244fa6 | ||
|
|
ab6d8486fb | ||
|
|
b628b5f8ca | ||
|
|
ac4fe2db5a | ||
|
|
2f66d1a46c | ||
|
|
60c6cf7438 | ||
|
|
8dd2915d3c | ||
|
|
3c9b9e7bf5 | ||
|
|
a090650446 | ||
|
|
dace0f9b10 | ||
|
|
a6e1ff0e63 | ||
|
|
1dde588932 | ||
|
|
54d4d39ea1 | ||
|
|
7be6fc1780 | ||
|
|
2fdfda2804 | ||
|
|
a063952abe | ||
|
|
1a00121e60 | ||
|
|
8fffa6c92a | ||
|
|
64bd9dba9b | ||
|
|
f2767f4f8d | ||
|
|
71f53d908f | ||
|
|
7b8e48d80e | ||
|
|
b6c50df435 | ||
|
|
9b697d2c12 | ||
|
|
d1a952a6fc | ||
|
|
58a81c704b | ||
|
|
5c9f361903 | ||
|
|
ef9f6c5b4d | ||
|
|
424e115331 | ||
|
|
f1d7c0f2c9 | ||
|
|
3d96e3c142 | ||
|
|
8cda50f179 | ||
|
|
3574ee177d | ||
|
|
386b69f473 | ||
|
|
648e360842 | ||
|
|
23ff14c34b | ||
|
|
e52df323f7 | ||
|
|
823f7c99d2 | ||
|
|
711f527efb | ||
|
|
af8f444166 | ||
|
|
2634430f67 | ||
|
|
70dc22aa9a | ||
|
|
ad269ee147 | ||
|
|
cff7dbd015 | ||
|
|
afc0ace624 | ||
|
|
400e42cc2d | ||
|
|
59ee339497 | ||
|
|
c53d0e1c5f | ||
|
|
02a8ae7f36 | ||
|
|
a4ff97d24b | ||
|
|
3d3b9d1217 | ||
|
|
169cd78bbd | ||
|
|
405e2a1a03 | ||
|
|
d77c298ec6 | ||
|
|
2c6549b431 | ||
|
|
aba2ec9aac | ||
|
|
a8716d2949 | ||
|
|
04bc8a85ca | ||
|
|
a9a0ecd76d | ||
|
|
902e3f4f53 | ||
|
|
228033f7a3 | ||
|
|
53ffc67850 | ||
|
|
59eb2e60d0 | ||
|
|
0ea11ac212 | ||
|
|
3e686a71e3 | ||
|
|
efbefaeb7c | ||
|
|
dd7481e99c | ||
|
|
fda003288d | ||
|
|
131a14ee2f | ||
|
|
45d2df1af7 | ||
|
|
5f3299e598 | ||
|
|
5217081567 | ||
|
|
389bad7d24 | ||
|
|
97b4b86ddf | ||
|
|
cc9bec7a19 | ||
|
|
8b494fb1bc | ||
|
|
6aa1297829 | ||
|
|
a91f2688f1 | ||
|
|
1c1bf37b33 | ||
|
|
76e36b95f7 | ||
|
|
74cf2320ad | ||
|
|
2a1c880673 | ||
|
|
c65c6ebc38 | ||
|
|
19a9b4b064 | ||
|
|
1af88b2c35 | ||
|
|
e3149c4f0b | ||
|
|
0808c02c65 | ||
|
|
76390c94af | ||
|
|
f0e2f561d5 | ||
|
|
74d0134ee3 | ||
|
|
91df0112c7 | ||
|
|
7998348bcb | ||
|
|
99bd988375 | ||
|
|
0a2f3e4d48 | ||
|
|
dfe5372db5 | ||
|
|
f2f509be0e | ||
|
|
01eb77c834 | ||
|
|
3cd95b4a6c | ||
|
|
80ab9201b3 | ||
|
|
1c6c058bba | ||
|
|
528cd89616 | ||
|
|
c179cfb93e | ||
|
|
f67228bacb | ||
|
|
af3d3e8cd3 | ||
|
|
878439065c | ||
|
|
acf17f9b82 | ||
|
|
9afa304817 | ||
|
|
847640333a | ||
|
|
28e4a3b7e8 | ||
|
|
d7103fdef3 | ||
|
|
f822f0436e | ||
|
|
0b06c48ed0 | ||
|
|
190d862552 | ||
|
|
a8ab279cb8 | ||
|
|
feeb59de23 | ||
|
|
2ec3f79146 | ||
|
|
e26dab84f6 | ||
|
|
4886a4e749 | ||
|
|
54cf5f3c7a | ||
|
|
5f2f53a0a6 | ||
|
|
7f0c92504f | ||
|
|
961a012502 | ||
|
|
139017546d | ||
|
|
5d12cc1ded | ||
|
|
e32c88e14c | ||
|
|
6a4d4d7028 | ||
|
|
e56ab297b1 | ||
|
|
ad303fb943 | ||
|
|
89da5d1ef5 | ||
|
|
82f25bacf2 | ||
|
|
b2d9f9a2d8 | ||
|
|
9cb4eafad4 | ||
|
|
ff40a68215 | ||
|
|
2fe50c5edd | ||
|
|
3e92ae0f48 | ||
|
|
c112d27ab5 | ||
|
|
a539e3d66e | ||
|
|
446029c1ba | ||
|
|
fba56df765 | ||
|
|
8281634131 | ||
|
|
b346cde919 | ||
|
|
eca67cec4c | ||
|
|
14439d2b12 | ||
|
|
4a64d4d133 | ||
|
|
9421ac13d8 | ||
|
|
d2b1115f46 | ||
|
|
9282a73663 | ||
|
|
799a4d80cd | ||
|
|
fe6e6909e6 | ||
|
|
e477fdfd6d | ||
|
|
a921455836 | ||
|
|
beadec32b8 | ||
|
|
693ce7b952 | ||
|
|
ce725ceec3 | ||
|
|
ecf25d1284 | ||
|
|
0a73be7389 | ||
|
|
034a28710e | ||
|
|
7b16601399 | ||
|
|
50e99fb6c1 | ||
|
|
ea4ea3d1c1 | ||
|
|
1f8171d069 | ||
|
|
9fea9816bd | ||
|
|
aa389c82a1 | ||
|
|
c577fbd772 | ||
|
|
dc6dee3f4e | ||
|
|
a3c96c2f44 | ||
|
|
3b6b3f6ef6 | ||
|
|
eb694ab5d5 | ||
|
|
fa2084330b | ||
|
|
286444a2ec | ||
|
|
55b6134be1 | ||
|
|
5374d07c4d | ||
|
|
b62619ae3a | ||
|
|
dc0fc94c13 | ||
|
|
548b471b8a | ||
|
|
fa92766095 | ||
|
|
9980ca3b7e | ||
|
|
72be10f9a5 | ||
|
|
ab301aa4fe | ||
|
|
2bb0a602e4 | ||
|
|
363771a5c7 | ||
|
|
bdf2b2019b | ||
|
|
f2125e5f64 | ||
|
|
294b031fa4 | ||
|
|
4a98300c59 | ||
|
|
0f1d14437c | ||
|
|
79754bded2 | ||
|
|
90da16432f | ||
|
|
f074c5ee89 | ||
|
|
edd86fd358 | ||
|
|
067a519b37 | ||
|
|
32f2f711db | ||
|
|
c1d4ff6239 | ||
|
|
e36c3dd668 | ||
|
|
1c7c62cf8a | ||
|
|
d215607e55 | ||
|
|
ff329df808 | ||
|
|
c9858b5e6b | ||
|
|
565d51009d | ||
|
|
c6b74b2ba7 | ||
|
|
090fbbf18c | ||
|
|
cdea2debc9 | ||
|
|
4c49d4af7e | ||
|
|
46f6bd6d08 | ||
|
|
b3dcbfe2a4 | ||
|
|
a92b20d2bb | ||
|
|
8a27449595 | ||
|
|
a3781e2ffc | ||
|
|
f973924fbf | ||
|
|
9547f53e88 | ||
|
|
c30ee6c166 | ||
|
|
29d1959b81 | ||
|
|
878bc15229 | ||
|
|
e39ce36f1c | ||
|
|
b4648f1df9 | ||
|
|
f4adc0ea86 | ||
|
|
658cbf5631 | ||
|
|
b400098296 | ||
|
|
c3f875bf65 | ||
|
|
5db5ca7056 | ||
|
|
f0eb843138 | ||
|
|
d44c27ec63 | ||
|
|
0625a6906c | ||
|
|
4d28d5e4d2 | ||
|
|
08b938982b | ||
|
|
6ee290a9af | ||
|
|
4da7a7797e | ||
|
|
6e831920bf | ||
|
|
eca8d85446 | ||
|
|
1db3c86b68 | ||
|
|
0c599521b1 | ||
|
|
e40fba7679 | ||
|
|
6b82873737 | ||
|
|
3edfa8cb88 | ||
|
|
b577869ed6 | ||
|
|
5c14b4c833 | ||
|
|
dd08305aa7 | ||
|
|
1395c11c29 | ||
|
|
ef48f6a061 | ||
|
|
09ddfae1d9 | ||
|
|
ad9cd1933a | ||
|
|
0f515784d8 | ||
|
|
7d3e53f3e4 | ||
|
|
dc1bec79ed | ||
|
|
d472380fef | ||
|
|
eea0160dd4 | ||
|
|
1ae429dc61 | ||
|
|
fc265055df | ||
|
|
917252d7d0 | ||
|
|
4f9752bb3e | ||
|
|
6391dec5de | ||
|
|
1ba414bd77 | ||
|
|
6f2438436c | ||
|
|
2515dc4c06 | ||
|
|
2c36e6534c | ||
|
|
fe27ca359c | ||
|
|
bf7425bae4 | ||
|
|
558b16275d | ||
|
|
a069738c20 | ||
|
|
aef5a7b428 | ||
|
|
4bed8d4ed4 | ||
|
|
f4a6b90e8b | ||
|
|
36b6a71af4 | ||
|
|
7c3827a9a4 | ||
|
|
d17f210024 | ||
|
|
28631e7791 | ||
|
|
28839fbab2 | ||
|
|
0716519be9 | ||
|
|
068f758f49 | ||
|
|
e2c5ae53e7 | ||
|
|
473172ca8b | ||
|
|
47fdb3b99a | ||
|
|
143d3a3203 | ||
|
|
66471c2f13 | ||
|
|
7f6b15271a | ||
|
|
cbb0f1e11d | ||
|
|
25caa6d1be | ||
|
|
d551c8e1d0 | ||
|
|
2fd2bafdfa | ||
|
|
e236dedc32 | ||
|
|
7d2f7ae9e1 | ||
|
|
ed9c69ec61 | ||
|
|
b6a40094a6 | ||
|
|
de15d6d2c1 | ||
|
|
143cb142bc | ||
|
|
d3e93dd106 | ||
|
|
56c5938898 | ||
|
|
d44ab1ace5 | ||
|
|
ae028d9c1d | ||
|
|
cbd01d4ff4 | ||
|
|
65645face1 | ||
|
|
cd3851c3d5 | ||
|
|
2f98e6aaaf | ||
|
|
9afc9d92d9 | ||
|
|
a8e501b3cf | ||
|
|
5727d4eb8a | ||
|
|
fed65122fe | ||
|
|
86a9d4c6a4 | ||
|
|
0a180dc662 | ||
|
|
f9303ca2e4 | ||
|
|
4c25fe978a | ||
|
|
cf88afb287 | ||
|
|
c97ea766ca | ||
|
|
e52a821f73 | ||
|
|
d4f241d875 | ||
|
|
48ae49765e | ||
|
|
afa5b5e790 | ||
|
|
c5551ebebb | ||
|
|
616081fbdd | ||
|
|
842bafb002 | ||
|
|
953cc9733c | ||
|
|
caac08df33 | ||
|
|
3fd8c4c046 | ||
|
|
b5a82b9075 | ||
|
|
c6e96204e8 | ||
|
|
2456e6f34d | ||
|
|
48ed506487 | ||
|
|
4595789ac8 | ||
|
|
d53fdcf781 | ||
|
|
372dc47b64 | ||
|
|
82426abf5f | ||
|
|
f5f1284cef | ||
|
|
609418ecd3 | ||
|
|
f20c9122e8 | ||
|
|
afc3b3168a | ||
|
|
902c30e389 | ||
|
|
3aaa98decf | ||
|
|
e592cd7a80 | ||
|
|
b74dd967af | ||
|
|
d254f24a23 | ||
|
|
8d505e06bd | ||
|
|
09b1e06885 | ||
|
|
d9c451d547 | ||
|
|
e677b19d31 | ||
|
|
2bf03d5cba | ||
|
|
0753636159 | ||
|
|
b8037b0b50 | ||
|
|
2f063d0ddc | ||
|
|
1cfae7e098 | ||
|
|
91fe32a3d1 | ||
|
|
c0fda583e7 | ||
|
|
9f62405a81 | ||
|
|
d121b19145 | ||
|
|
a7345c1094 | ||
|
|
db79481c21 | ||
|
|
8d986bd414 | ||
|
|
c827672a30 | ||
|
|
d5ec248691 | ||
|
|
b2d6012371 | ||
|
|
d21365b882 | ||
|
|
1c43ea9e69 | ||
|
|
fc8dc21aa2 | ||
|
|
7f5ffb7dd1 | ||
|
|
92932ae58e | ||
|
|
8ffb95bd9d | ||
|
|
229abed848 | ||
|
|
c5e9b1ec9e | ||
|
|
7842fd9da6 | ||
|
|
75fc3b045b | ||
|
|
50e5b1b4f5 | ||
|
|
33e266388f | ||
|
|
2586be29ac | ||
|
|
9cc1438ea9 | ||
|
|
6e643aca12 | ||
|
|
cc5a847eeb | ||
|
|
241322a7c1 | ||
|
|
d697b0b2e5 | ||
|
|
9ec4f727c6 | ||
|
|
ffcadcf184 | ||
|
|
7c59615ae2 | ||
|
|
d6cccc18c3 | ||
|
|
befe8d88b8 | ||
|
|
153737803a | ||
|
|
4b14fa4317 | ||
|
|
27847ee2fe | ||
|
|
578dabe27c | ||
|
|
ed2c0a3e5e | ||
|
|
5d841874f7 | ||
|
|
ab9d596a5f | ||
|
|
084e5bd020 | ||
|
|
bc2c206832 | ||
|
|
655a7e98c3 | ||
|
|
f89fd67952 | ||
|
|
20b836d038 | ||
|
|
b33a8eb59f | ||
|
|
8b6abcbf0c |
35
.bazelrc
35
.bazelrc
@@ -17,39 +17,26 @@ test --host_force_python=PY2
|
||||
run --host_force_python=PY2
|
||||
|
||||
# Networking is blocked for tests by default, add "requires-network" tag to your test if networking
|
||||
# is required within the sandbox. This flag is no longer experimental after 0.29.0.
|
||||
# Network sandboxing only works on linux.
|
||||
--experimental_sandbox_default_allow_network=false
|
||||
# is required within the sandbox. Network sandboxing only works on linux.
|
||||
build --sandbox_default_allow_network=false
|
||||
|
||||
# Use mainnet protobufs at runtime
|
||||
run --define ssz=mainnet
|
||||
test --define ssz=mainnet
|
||||
build --define ssz=mainnet
|
||||
# Stamp binaries with git information
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
build --stamp
|
||||
|
||||
# Prevent PATH changes from rebuilding when switching from IDE to command line.
|
||||
build --incompatible_strict_action_env
|
||||
test --incompatible_strict_action_env
|
||||
run --incompatible_strict_action_env
|
||||
|
||||
# Disable kafka by default, it takes a long time to build...
|
||||
build --define kafka_enabled=false
|
||||
test --define kafka_enabled=false
|
||||
run --define kafka_enabled=false
|
||||
build --define blst_disabled=false
|
||||
test --define blst_disabled=false
|
||||
run --define blst_disabled=false
|
||||
|
||||
# Enable blst by default, we use a config so that our fuzzer stops complaining.
|
||||
build --config=blst_enabled
|
||||
test --config=blst_enabled
|
||||
run --config=blst_enabled
|
||||
|
||||
build:kafka_enabled --define kafka_enabled=true
|
||||
build:kafka_enabled --define gotags=kafka_enabled
|
||||
|
||||
build:blst_enabled --define blst_enabled=true
|
||||
build:blst_enabled --define gotags=blst_enabled
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
build:blst_disabled --define gotags=blst_disabled
|
||||
|
||||
# Release flags
|
||||
build:release --workspace_status_command=./scripts/workspace_status.sh
|
||||
build:release --stamp
|
||||
build:release --compilation_mode=opt
|
||||
build:release --config=llvm
|
||||
|
||||
@@ -83,7 +70,7 @@ build:fuzz --linkopt -Wl,--no-as-needed
|
||||
build:fuzz --define=gc_goopts=-d=libfuzzer,checkptr
|
||||
build:fuzz --run_under=//tools:fuzz_wrapper
|
||||
build:fuzz --compilation_mode=opt
|
||||
build:fuzz --define=blst_enabled=false
|
||||
build:fuzz --define=blst_disabled=true
|
||||
|
||||
test:fuzz --local_test_jobs="HOST_CPUS*.5"
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# across machines, developers, and workspaces.
|
||||
#
|
||||
# This config is loaded from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/latest.bazelrc
|
||||
build:remote-cache --remote_timeout=3600
|
||||
#build:remote-cache --remote_timeout=3600
|
||||
#build:remote-cache --spawn_strategy=standalone
|
||||
#build:remote-cache --strategy=Javac=standalone
|
||||
#build:remote-cache --strategy=Closure=standalone
|
||||
@@ -11,11 +11,18 @@ build:remote-cache --remote_timeout=3600
|
||||
# Prysm specific remote-cache properties.
|
||||
#build:remote-cache --disk_cache=
|
||||
build:remote-cache --remote_download_minimal
|
||||
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --remote_local_fallback
|
||||
# Enforce stricter environment rules, which eliminates some non-hermetic
|
||||
# behavior and therefore improves both the remote cache hit rate and the
|
||||
# correctness and repeatability of the build.
|
||||
build:remote-cache --incompatible_strict_action_env=true
|
||||
|
||||
# Import workspace options.
|
||||
import %workspace%/.bazelrc
|
||||
|
||||
startup --host_jvm_args=-Xmx1000m --host_jvm_args=-Xms1000m
|
||||
startup --host_jvm_args=-Xmx2g --host_jvm_args=-Xms2g
|
||||
query --repository_cache=/tmp/repositorycache
|
||||
query --experimental_repository_cache_hardlinks
|
||||
build --repository_cache=/tmp/repositorycache
|
||||
@@ -36,3 +43,7 @@ build --flaky_test_attempts=5
|
||||
|
||||
# Disable flaky test detection for fuzzing.
|
||||
test:fuzz --flaky_test_attempts=1
|
||||
|
||||
# Better caching
|
||||
build:nostamp --nostamp
|
||||
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh
|
||||
@@ -30,4 +30,4 @@ comment:
|
||||
|
||||
ignore:
|
||||
- "**/*.pb.go"
|
||||
- "**/*_mock.go"
|
||||
- "**/testing/**/*"
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
version = 1
|
||||
|
||||
exclude_patterns = [
|
||||
"tools/analyzers/**",
|
||||
"validator/keymanager/remote/keymanager_test.go",
|
||||
"validator/web/site_data.go"
|
||||
]
|
||||
|
||||
[[analyzers]]
|
||||
name = "go"
|
||||
enabled = true
|
||||
|
||||
[analyzers.meta]
|
||||
import_paths = ["github.com/prysmaticlabs/prysm"]
|
||||
[analyzers.meta]
|
||||
import_paths = ["github.com/prysmaticlabs/prysm"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "test-coverage"
|
||||
@@ -14,3 +20,7 @@ enabled = true
|
||||
[[analyzers]]
|
||||
name = "shell"
|
||||
enabled = true
|
||||
|
||||
[[analyzers]]
|
||||
name = "secrets"
|
||||
enabled = true
|
||||
|
||||
12
.github/workflows/go.yml
vendored
12
.github/workflows/go.yml
vendored
@@ -24,7 +24,17 @@ jobs:
|
||||
uses: ./.github/actions/gofmt
|
||||
with:
|
||||
path: ./
|
||||
|
||||
- name: GoImports checker
|
||||
id: goimports
|
||||
uses: Jerome1337/goimports-action@v1.0.2
|
||||
with:
|
||||
goimports-path: ./
|
||||
|
||||
- name: Gosec security scanner
|
||||
uses: securego/gosec@master
|
||||
with:
|
||||
args: '-exclude-dir=crypto/bls/herumi ./...'
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
@@ -44,7 +54,7 @@ jobs:
|
||||
|
||||
- name: Build
|
||||
# Use blst tag to allow go and bazel builds for blst.
|
||||
run: go build -v --tags=blst_enabled ./...
|
||||
run: go build -v ./...
|
||||
|
||||
# Tests run via Bazel for now...
|
||||
# - name: Test
|
||||
|
||||
29
BUILD.bazel
29
BUILD.bazel
@@ -14,6 +14,7 @@ exports_files([
|
||||
|
||||
# gazelle:prefix github.com/prysmaticlabs/prysm
|
||||
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
|
||||
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
|
||||
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
|
||||
gazelle(
|
||||
name = "gazelle",
|
||||
@@ -23,7 +24,16 @@ gazelle(
|
||||
# Protobuf compiler (non-gRPC)
|
||||
alias(
|
||||
name = "proto_compiler",
|
||||
actual = "@io_bazel_rules_go//proto:gogofast_proto",
|
||||
actual = "@io_bazel_rules_go//proto:go_proto",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
)
|
||||
|
||||
# Cast protobuf compiler (non-gRPC)
|
||||
alias(
|
||||
name = "cast_proto_compiler",
|
||||
actual = "@com_github_prysmaticlabs_protoc_gen_go_cast//:go_cast",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
@@ -32,21 +42,21 @@ alias(
|
||||
# Protobuf compiler (gRPC)
|
||||
alias(
|
||||
name = "grpc_proto_compiler",
|
||||
actual = "@io_bazel_rules_go//proto:gogofast_grpc",
|
||||
actual = "@io_bazel_rules_go//proto:go_grpc",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Protobuf gRPC compiler without gogoproto. Required for gRPC gateway.
|
||||
# Cast protobuf compiler (gRPC)
|
||||
alias(
|
||||
name = "grpc_nogogo_proto_compiler",
|
||||
actual = "@io_bazel_rules_go//proto:go_grpc",
|
||||
name = "cast_grpc_proto_compiler",
|
||||
actual = "@com_github_prysmaticlabs_protoc_gen_go_cast//:go_cast_grpc",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Protobuf gRPC gateway compiler
|
||||
alias(
|
||||
name = "grpc_gateway_proto_compiler",
|
||||
actual = "@com_github_grpc_ecosystem_grpc_gateway//protoc-gen-grpc-gateway:go_gen_grpc_gateway",
|
||||
actual = "@com_github_grpc_ecosystem_grpc_gateway_v2//protoc-gen-grpc-gateway:go_gen_grpc_gateway",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
@@ -111,6 +121,7 @@ nogo(
|
||||
"//tools/analyzers/shadowpredecl:go_tool_library",
|
||||
"//tools/analyzers/nop:go_tool_library",
|
||||
"//tools/analyzers/slicedirect:go_tool_library",
|
||||
"//tools/analyzers/interfacechecker:go_tool_library",
|
||||
"//tools/analyzers/ineffassign:go_tool_library",
|
||||
"//tools/analyzers/properpermissions:go_tool_library",
|
||||
] + select({
|
||||
@@ -147,3 +158,9 @@ string_setting(
|
||||
"libfuzzer",
|
||||
],
|
||||
)
|
||||
|
||||
sh_binary(
|
||||
name = "prysm_sh",
|
||||
srcs = ["prysm.sh"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
@@ -175,7 +175,7 @@ We consider two types of contributions to our repo and categorize them as follow
|
||||
|
||||
### Part-Time Contributors
|
||||
|
||||
Anyone can become a part-time contributor and help out on implementing eth2. The responsibilities of a part-time contributor include:
|
||||
Anyone can become a part-time contributor and help out on implementing Ethereum consensus. The responsibilities of a part-time contributor include:
|
||||
|
||||
- Engaging in Gitter conversations, asking the questions on how to begin contributing to the project
|
||||
- Opening up github issues to express interest in code to implement
|
||||
|
||||
@@ -33,8 +33,8 @@ generates SSZ marshal related code based on defined data structures. These gener
|
||||
also be updated and checked in as frequently.
|
||||
|
||||
```bash
|
||||
./scripts/update-go-pbs.sh
|
||||
./scripts/update-go-ssz.sh
|
||||
./hack/update-go-pbs.sh
|
||||
./hack/update-go-ssz.sh
|
||||
```
|
||||
|
||||
*Recommendation: Use go build only for local development and use bazel build for production.*
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Prysm Client Interoperability Guide
|
||||
|
||||
This README details how to setup Prysm for interop testing for usage with other Ethereum 2.0 clients.
|
||||
This README details how to setup Prysm for interop testing for usage with other Ethereum consensus clients.
|
||||
|
||||
## Installation & Setup
|
||||
|
||||
|
||||
12
README.md
12
README.md
@@ -1,11 +1,11 @@
|
||||
# Prysm: An Ethereum 2.0 Implementation Written in Go
|
||||
# Prysm: An Ethereum Consensus Implementation Written in Go
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/eth2.0-specs/tree/v1.0.0)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.0.0)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the Ethereum 2.0 specification, developed by [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
### Getting Started
|
||||
|
||||
@@ -13,15 +13,15 @@ A detailed set of installation and usage instructions as well as breakdowns of e
|
||||
|
||||
### Staking on Mainnet
|
||||
|
||||
To participate in staking, you can join the [official eth2 launchpad](https://launchpad.ethereum.org). The launchpad is the only recommended way to become a validator on mainnet. You can visualize the nodes in the network on [eth2stats.io](https://eth2stats.io), explore validator rewards/penalties via Bitfly's block explorer: [beaconcha.in](https://beaconcha.in), and follow the latest blocks added to the chain on [beaconscan](https://beaconscan.com).
|
||||
To participate in staking, you can join the [official eth2 launchpad](https://launchpad.ethereum.org). The launchpad is the only recommended way to become a validator on mainnet. You can explore validator rewards/penalties via Bitfly's block explorer: [beaconcha.in](https://beaconcha.in), and follow the latest blocks added to the chain on [beaconscan](https://beaconscan.com).
|
||||
|
||||
|
||||
## Contributing
|
||||
### Branches
|
||||
Prysm maintains two permanent branches:
|
||||
|
||||
* master: This points to the latest stable release. It is ideal for most users.
|
||||
* develop: This is used for development, it contains the latest PRs. Developers should base their PRs on this branch.
|
||||
* [master](https://github.com/prysmaticlabs/prysm/tree/master): This points to the latest stable release. It is ideal for most users.
|
||||
* [develop](https://github.com/prysmaticlabs/prysm/tree/develop): This is used for development, it contains the latest PRs. Developers should base their PRs on this branch.
|
||||
|
||||
### Guide
|
||||
Want to get involved? Check out our [Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/) to learn more!
|
||||
|
||||
11
SECURITY.md
Normal file
11
SECURITY.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
[Releases](https://github.com/prysmaticlabs/prysm/releases/) contains all available releases. We recommend using the [most recently released version](https://github.com/prysmaticlabs/prysm/releases/latest).
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please see our signed [security.txt](https://github.com/prysmaticlabs/prysm/blob/develop/.well-known/security.txt) for preferred encryption and reporting destination.
|
||||
|
||||
**Please do not file a public ticket** mentioning the vulnerability, as doing so could increase the likelihood of the vulnerability being used before a fix has been created, released and installed on the network.
|
||||
@@ -5,7 +5,7 @@ Effective as of Oct 14, 2020
|
||||
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Prysmatic Labs (referenced herein as “we” or “us”). If you do not agree to the Terms, do not download or use Prysm.
|
||||
|
||||
### About Prysm
|
||||
Prysm is a client implementation for Ethereum 2.0 Phase 0 protocol for a proof-of-stake blockchain. To participate in the network, a user must send ETH from the Eth1.0 chain into a validator deposit contract, which will queue in the user as a validator in the system. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
|
||||
Prysm is a client implementation for Ethereum consensus protocol for a proof-of-stake blockchain. To participate in the network, a user must send ETH from the Eth1.0 chain into a validator deposit contract, which will queue in the user as a validator in the system. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
|
||||
|
||||
### Licensing Terms
|
||||
Prysm is a fully open-source software program licensed pursuant to the GNU General Public License v3.0.
|
||||
@@ -13,7 +13,7 @@ Prysm is a fully open-source software program licensed pursuant to the GNU Gener
|
||||
The Prysmatic Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Prysmatic Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
|
||||
|
||||
### Risks of Operating Prysm
|
||||
The use of Prysm and acting as a validator on the Ethereum 2.0 network can lead to loss of money. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm including the security of your ETH and meeting any applicable minimum system requirements.
|
||||
The use of Prysm and acting as a validator on the Ethereum network can lead to loss of money. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm including the security of your ETH and meeting any applicable minimum system requirements.
|
||||
|
||||
Use of Prysm and the ability to receive rewards or penalties may be affected at any time by mistakes made by the user or other users, software problems such as bugs, errors, incorrectly constructed transactions, unsafe cryptographic libraries or malware affecting the network, technical failures in the hardware of a user, security problems experienced by a user and/or actions or inactions of third parties and/or events experienced by third parties, among other risks. We cannot and do not guarantee that any user of Prysm will make money, that the Prysm network will operate in accordance with the documentation or that transactions will be effective or secure.
|
||||
|
||||
|
||||
260
WORKSPACE
260
WORKSPACE
@@ -15,9 +15,9 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "com_grail_bazel_toolchain",
|
||||
sha256 = "b924b102adc0c3368d38a19bd971cb4fa75362a27bc363d0084b90ca6877d3f0",
|
||||
strip_prefix = "bazel-toolchain-0.5.7",
|
||||
urls = ["https://github.com/grailbio/bazel-toolchain/archive/0.5.7.tar.gz"],
|
||||
sha256 = "040b9d00b8a03e8a28e38159ad0f2d0e0de625d93f453a9f226971a8c47e757b",
|
||||
strip_prefix = "bazel-toolchain-5f82830f9d6a1941c3eb29683c1864ccf2862454",
|
||||
urls = ["https://github.com/grailbio/bazel-toolchain/archive/5f82830f9d6a1941c3eb29683c1864ccf2862454.tar.gz"],
|
||||
)
|
||||
|
||||
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
|
||||
@@ -60,10 +60,10 @@ bazel_skylib_workspace()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "1f4fc1d91826ec436ae04833430626f4cc02c20bb0a813c0c2f3c4c421307b1d",
|
||||
strip_prefix = "bazel-gazelle-e368a11b76e92932122d824970dc0ce5feb9c349",
|
||||
sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
|
||||
urls = [
|
||||
"https://github.com/bazelbuild/bazel-gazelle/archive/e368a11b76e92932122d824970dc0ce5feb9c349.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -76,9 +76,9 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_docker",
|
||||
sha256 = "1286175a94c0b1335efe1d75d22ea06e89742557d3fac2a0366f242a6eac6f5a",
|
||||
strip_prefix = "rules_docker-ba4310833230294fa69b7d6ea1787ac684631a7d",
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/archive/ba4310833230294fa69b7d6ea1787ac684631a7d.tar.gz"],
|
||||
sha256 = "59d5b42ac315e7eadffa944e86e90c2990110a1c8075f1cd145f487e999d22b3",
|
||||
strip_prefix = "rules_docker-0.17.0",
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.17.0/rules_docker-v0.17.0.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -88,11 +88,13 @@ http_archive(
|
||||
# Required until https://github.com/bazelbuild/rules_go/pull/2450 merges otherwise nilness
|
||||
# nogo check fails for certain third_party dependencies.
|
||||
"//third_party:io_bazel_rules_go.patch",
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "81eff5df9077783b18e93d0c7ff990d8ad7a3b8b3ca5b785e1c483aacdb342d7",
|
||||
sha256 = "7c10271940c6bce577d51a075ae77728964db285dac0a46614a7934dc34303e6",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.24.9/rules_go-v0.24.9.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.24.9/rules_go-v0.24.9.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.26.0/rules_go-v0.26.0.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.26.0/rules_go-v0.26.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -156,40 +158,10 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.15.6",
|
||||
go_version = "1.16.4",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
|
||||
|
||||
gazelle_dependencies()
|
||||
|
||||
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
|
||||
|
||||
go_embed_data_dependencies()
|
||||
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
|
||||
|
||||
gometalinter_dependencies()
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_docker//go:image.bzl",
|
||||
_go_image_repos = "repositories",
|
||||
)
|
||||
|
||||
# Golang images
|
||||
# This is using gcr.io/distroless/base
|
||||
_go_image_repos()
|
||||
|
||||
# CC images
|
||||
# This is using gcr.io/distroless/base
|
||||
load(
|
||||
"@io_bazel_rules_docker//cc:image.bzl",
|
||||
_cc_image_repos = "repositories",
|
||||
)
|
||||
|
||||
_cc_image_repos()
|
||||
|
||||
http_archive(
|
||||
name = "prysm_testnet_site",
|
||||
build_file_content = """
|
||||
@@ -211,99 +183,136 @@ http_archive(
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eth2_spec_tests_general",
|
||||
name = "eip3076_spec_tests",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.ssz",
|
||||
"**/*.json",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "91434d5fd5e1c6eb7b0174fed2afe25e09bddf00e1e4c431db931b2cee4e7773",
|
||||
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.1.0-beta.4"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
http_archive(
|
||||
name = "consensus_spec_tests_general",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.ssz_snappy",
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "ef5396e4b13995da9776eeb5ae346a2de90970c28da3c4f0dcaa4ab9f0ad1f93",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.0.0/general.tar.gz",
|
||||
sha256 = "d2d501453cf29777896a5f9ae52e93921f34330e57fcc5b55e4982d4795236b9",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eth2_spec_tests_minimal",
|
||||
name = "consensus_spec_tests_minimal",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.ssz",
|
||||
"**/*.ssz_snappy",
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "170551b441e7d54b73248372ad9ce8cb6c148810b5f1364637117a63f4f1c085",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.0.0/minimal.tar.gz",
|
||||
sha256 = "b152f1f03a4fdacd1882fe867bdfbd5067e74ed3c532bd01d81e7adf6cc3737a",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eth2_spec_tests_mainnet",
|
||||
name = "consensus_spec_tests_mainnet",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.ssz",
|
||||
"**/*.ssz_snappy",
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "b541a9979b4703fa5ee5d2182b0b5313c38efc54ae7eaec2eef793230a52ec83",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.0.0/mainnet.tar.gz",
|
||||
sha256 = "6046f89c679a9ffda217dee6f021eb7c4fe91aad6ff940fae4d2cf894cc61cbb",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "consensus_spec",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "spec_data",
|
||||
srcs = glob([
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "26a0a2c3022a3c5354f3204c7e441580df2e585dbde22a43a1d43f885b3a221c",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "bls_spec_tests",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "93c7d006e7c5b882cbd11dc9ec6c5d0e07f4a8c6b27a32f964eb17cf2db9763a",
|
||||
url = "https://github.com/ethereum/bls12-381-tests/releases/download/%s/bls_tests_yaml.tar.gz" % bls_test_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eth2_networks",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = glob([
|
||||
"shared/**/config.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "9dc47bf6b14aed7fac8833e35ab83a69131b43fa5789b3256bf1ac3d4861aeb8",
|
||||
strip_prefix = "eth2-networks-7fa1b868985ee24aad65567f9250cf7fa86f97b1",
|
||||
url = "https://github.com/eth2-clients/eth2-networks/archive/7fa1b868985ee24aad65567f9250cf7fa86f97b1.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_github_bazelbuild_buildtools",
|
||||
sha256 = "b5d7dbc6832f11b6468328a376de05959a1a9e4e9f5622499d3bab509c26b46a",
|
||||
strip_prefix = "buildtools-bf564b4925ab5876a3f64d8b90fab7f769013d42",
|
||||
url = "https://github.com/bazelbuild/buildtools/archive/bf564b4925ab5876a3f64d8b90fab7f769013d42.zip",
|
||||
sha256 = "7a182df18df1debabd9e36ae07c8edfa1378b8424a04561b674d933b965372b3",
|
||||
strip_prefix = "buildtools-f2aed9ee205d62d45c55cfabbfd26342f8526862",
|
||||
url = "https://github.com/bazelbuild/buildtools/archive/f2aed9ee205d62d45c55cfabbfd26342f8526862.zip",
|
||||
)
|
||||
|
||||
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
|
||||
|
||||
buildifier_dependencies()
|
||||
|
||||
git_repository(
|
||||
name = "com_google_protobuf",
|
||||
commit = "fde7cf7358ec7cd69e8db9be4f1fa6a5c431386a", # v3.13.0
|
||||
commit = "436bd7880e458532901c58f4d9d1ea23fa7edd52",
|
||||
remote = "https://github.com/protocolbuffers/protobuf",
|
||||
shallow_since = "1597443653 -0700",
|
||||
shallow_since = "1617835118 -0700",
|
||||
)
|
||||
|
||||
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
|
||||
|
||||
protobuf_deps()
|
||||
|
||||
# Group the sources of the library so that CMake rule have access to it
|
||||
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
|
||||
|
||||
http_archive(
|
||||
name = "rules_foreign_cc",
|
||||
sha256 = "b85ce66a3410f7370d1a9a61dfe3a29c7532b7637caeb2877d8d0dfd41d77abb",
|
||||
strip_prefix = "rules_foreign_cc-3515b20a2417c4dd51c8a4a8cac1f6ecf3c6d934",
|
||||
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/3515b20a2417c4dd51c8a4a8cac1f6ecf3c6d934.zip",
|
||||
)
|
||||
|
||||
load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
|
||||
|
||||
rules_foreign_cc_dependencies([
|
||||
"@prysm//:built_cmake_toolchain",
|
||||
])
|
||||
|
||||
http_archive(
|
||||
name = "librdkafka",
|
||||
build_file_content = all_content,
|
||||
sha256 = "3b99a36c082a67ef6295eabd4fb3e32ab0bff7c6b0d397d6352697335f4e57eb",
|
||||
strip_prefix = "librdkafka-1.4.2",
|
||||
urls = ["https://github.com/edenhill/librdkafka/archive/v1.4.2.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "sigp_beacon_fuzz_corpora",
|
||||
build_file = "//third_party:beacon-fuzz/corpora.BUILD",
|
||||
@@ -316,33 +325,6 @@ http_archive(
|
||||
|
||||
# External dependencies
|
||||
|
||||
http_archive(
|
||||
name = "sszgen", # Hack because we don't want to build this binary with libfuzzer, but need it to build.
|
||||
build_file_content = """
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_binary")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"sszgen/main.go",
|
||||
"sszgen/marshal.go",
|
||||
"sszgen/size.go",
|
||||
"sszgen/unmarshal.go",
|
||||
],
|
||||
importpath = "github.com/ferranbt/fastssz/sszgen",
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "sszgen",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
strip_prefix = "fastssz-06015a5d84f9e4eefe2c21377ca678fa8f1a1b09",
|
||||
urls = ["https://github.com/ferranbt/fastssz/archive/06015a5d84f9e4eefe2c21377ca678fa8f1a1b09.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "prysm_web_ui",
|
||||
build_file_content = """
|
||||
@@ -352,9 +334,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "edb80f3a695d84f6000f0e05abf7a4bbf207c03abb91219780ec97e7d6ad21c8",
|
||||
sha256 = "54ce527b83d092da01127f2e3816f4d5cfbab69354caba8537f1ea55889b6d7c",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.3/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.4/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -363,16 +345,50 @@ load("//:deps.bzl", "prysm_deps")
|
||||
# gazelle:repository_macro deps.bzl%prysm_deps
|
||||
prysm_deps()
|
||||
|
||||
load("@com_github_prysmaticlabs_go_ssz//:deps.bzl", "go_ssz_dependencies")
|
||||
|
||||
go_ssz_dependencies()
|
||||
|
||||
load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
|
||||
|
||||
bls_dependencies()
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_docker//go:image.bzl",
|
||||
_go_image_repos = "repositories",
|
||||
)
|
||||
|
||||
# Golang images
|
||||
# This is using gcr.io/distroless/base
|
||||
_go_image_repos()
|
||||
|
||||
# CC images
|
||||
# This is using gcr.io/distroless/base
|
||||
load(
|
||||
"@io_bazel_rules_docker//cc:image.bzl",
|
||||
_cc_image_repos = "repositories",
|
||||
)
|
||||
|
||||
_cc_image_repos()
|
||||
|
||||
load("@com_github_ethereum_go_ethereum//:deps.bzl", "geth_dependencies")
|
||||
|
||||
geth_dependencies()
|
||||
|
||||
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
|
||||
|
||||
go_embed_data_dependencies()
|
||||
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
|
||||
|
||||
gometalinter_dependencies()
|
||||
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
|
||||
|
||||
gazelle_dependencies()
|
||||
|
||||
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
|
||||
|
||||
buildifier_dependencies()
|
||||
|
||||
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
|
||||
|
||||
protobuf_deps()
|
||||
|
||||
# Do NOT add new go dependencies here! Refer to DEPENDENCIES.md!
|
||||
|
||||
52
api/gateway/BUILD.bazel
Normal file
52
api/gateway/BUILD.bazel
Normal file
@@ -0,0 +1,52 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"api_middleware.go",
|
||||
"api_middleware_processing.go",
|
||||
"api_middleware_structs.go",
|
||||
"gateway.go",
|
||||
"log.go",
|
||||
"param_handling.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/api/gateway",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//validator:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_rs_cors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_wealdtech_go_bytesutil//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//connectivity:go_default_library",
|
||||
"@org_golang_google_grpc//credentials:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"api_middleware_processing_test.go",
|
||||
"gateway_test.go",
|
||||
"param_handling_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
217
api/gateway/api_middleware.go
Normal file
217
api/gateway/api_middleware.go
Normal file
@@ -0,0 +1,217 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
// ApiProxyMiddleware is a proxy between an Ethereum consensus API HTTP client and grpc-gateway.
|
||||
// The purpose of the proxy is to handle HTTP requests and gRPC responses in such a way that:
|
||||
// - Ethereum consensus API requests can be handled by grpc-gateway correctly
|
||||
// - gRPC responses can be returned as spec-compliant Ethereum consensus API responses
|
||||
type ApiProxyMiddleware struct {
|
||||
GatewayAddress string
|
||||
ProxyAddress string
|
||||
EndpointCreator EndpointFactory
|
||||
router *mux.Router
|
||||
}
|
||||
|
||||
// EndpointFactory is responsible for creating new instances of Endpoint values.
|
||||
type EndpointFactory interface {
|
||||
Create(path string) (*Endpoint, error)
|
||||
Paths() []string
|
||||
IsNil() bool
|
||||
}
|
||||
|
||||
// Endpoint is a representation of an API HTTP endpoint that should be proxied by the middleware.
|
||||
type Endpoint struct {
|
||||
Path string // The path of the HTTP endpoint.
|
||||
PostRequest interface{} // The struct corresponding to the JSON structure used in a POST request.
|
||||
PostResponse interface{} // The struct corresponding to the JSON structure used in a POST response.
|
||||
RequestURLLiterals []string // Names of URL parameters that should not be base64-encoded.
|
||||
RequestQueryParams []QueryParam // Query parameters of the request.
|
||||
GetResponse interface{} // The struct corresponding to the JSON structure used in a GET response.
|
||||
Err ErrorJson // The struct corresponding to the error that should be returned in case of a request failure.
|
||||
Hooks HookCollection // A collection of functions that can be invoked at various stages of the request/response cycle.
|
||||
CustomHandlers []CustomHandler // Functions that will be executed instead of the default request/response behaviour.
|
||||
}
|
||||
|
||||
// DefaultEndpoint returns an Endpoint with default configuration, e.g. DefaultErrorJson for error handling.
|
||||
func DefaultEndpoint() Endpoint {
|
||||
return Endpoint{
|
||||
Err: &DefaultErrorJson{},
|
||||
}
|
||||
}
|
||||
|
||||
// QueryParam represents a single query parameter's metadata.
|
||||
type QueryParam struct {
|
||||
Name string
|
||||
Hex bool
|
||||
Enum bool
|
||||
}
|
||||
|
||||
// Hook is a function that can be invoked at various stages of the request/response cycle, leading to custom behaviour for a specific endpoint.
|
||||
type Hook = func(endpoint Endpoint, w http.ResponseWriter, req *http.Request) ErrorJson
|
||||
|
||||
// CustomHandler is a function that can be invoked at the very beginning of the request,
|
||||
// essentially replacing the whole default request/response logic with custom logic for a specific endpoint.
|
||||
type CustomHandler = func(m *ApiProxyMiddleware, endpoint Endpoint, w http.ResponseWriter, req *http.Request) (handled bool)
|
||||
|
||||
// HookCollection contains hooks that can be used to amend the default request/response cycle with custom logic for a specific endpoint.
|
||||
type HookCollection struct {
|
||||
OnPreDeserializeRequestBodyIntoContainer []Hook
|
||||
OnPostDeserializeRequestBodyIntoContainer []Hook
|
||||
OnPreDeserializeGrpcResponseBodyIntoContainer []func([]byte, interface{}) (bool, ErrorJson)
|
||||
OnPreSerializeMiddlewareResponseIntoJson []func(interface{}) (bool, []byte, ErrorJson)
|
||||
}
|
||||
|
||||
// fieldProcessor applies the processing function f to a value when the tag is present on the field.
|
||||
type fieldProcessor struct {
|
||||
tag string
|
||||
f func(value reflect.Value) error
|
||||
}
|
||||
|
||||
// Run starts the proxy, registering all proxy endpoints on ApiProxyMiddleware.ProxyAddress.
|
||||
func (m *ApiProxyMiddleware) Run() error {
|
||||
m.router = mux.NewRouter()
|
||||
|
||||
for _, path := range m.EndpointCreator.Paths() {
|
||||
m.handleApiPath(path, m.EndpointCreator)
|
||||
}
|
||||
|
||||
return http.ListenAndServe(m.ProxyAddress, m.router)
|
||||
}
|
||||
|
||||
func (m *ApiProxyMiddleware) handleApiPath(path string, endpointFactory EndpointFactory) {
|
||||
m.router.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) {
|
||||
endpoint, err := endpointFactory.Create(path)
|
||||
if err != nil {
|
||||
errJson := InternalServerErrorWithMessage(err, "could not create endpoint")
|
||||
WriteError(w, errJson, nil)
|
||||
}
|
||||
|
||||
for _, handler := range endpoint.CustomHandlers {
|
||||
if handler(m, *endpoint, w, req) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if req.Method == "POST" {
|
||||
for _, hook := range endpoint.Hooks.OnPreDeserializeRequestBodyIntoContainer {
|
||||
if errJson := hook(*endpoint, w, req); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if errJson := DeserializeRequestBodyIntoContainer(req.Body, endpoint.PostRequest); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
for _, hook := range endpoint.Hooks.OnPostDeserializeRequestBodyIntoContainer {
|
||||
if errJson := hook(*endpoint, w, req); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if errJson := ProcessRequestContainerFields(endpoint.PostRequest); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
if errJson := SetRequestBodyToRequestContainer(endpoint.PostRequest, req); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if errJson := m.PrepareRequestForProxying(*endpoint, req); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
grpcResponse, errJson := ProxyRequest(req)
|
||||
if errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
grpcResponseBody, errJson := ReadGrpcResponseBody(grpcResponse.Body)
|
||||
if errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
|
||||
var responseJson []byte
|
||||
if !GrpcResponseIsEmpty(grpcResponseBody) {
|
||||
if errJson := DeserializeGrpcResponseBodyIntoErrorJson(endpoint.Err, grpcResponseBody); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
if endpoint.Err.Msg() != "" {
|
||||
HandleGrpcResponseError(endpoint.Err, grpcResponse, w)
|
||||
return
|
||||
}
|
||||
var response interface{}
|
||||
if req.Method == "GET" {
|
||||
response = endpoint.GetResponse
|
||||
} else {
|
||||
response = endpoint.PostResponse
|
||||
}
|
||||
|
||||
runDefault := true
|
||||
for _, hook := range endpoint.Hooks.OnPreDeserializeGrpcResponseBodyIntoContainer {
|
||||
ok, errJson := hook(grpcResponseBody, response)
|
||||
if errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
if ok {
|
||||
runDefault = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if runDefault {
|
||||
if errJson := DeserializeGrpcResponseBodyIntoContainer(grpcResponseBody, response); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if errJson := ProcessMiddlewareResponseFields(response); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
|
||||
var ok bool
|
||||
runDefault = true
|
||||
for _, hook := range endpoint.Hooks.OnPreSerializeMiddlewareResponseIntoJson {
|
||||
ok, responseJson, errJson = hook(response)
|
||||
if errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
if ok {
|
||||
runDefault = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if runDefault {
|
||||
responseJson, errJson = SerializeMiddlewareResponseIntoJson(response)
|
||||
if errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if errJson := WriteMiddlewareResponseHeadersAndBody(grpcResponse, responseJson, w); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
if errJson := Cleanup(grpcResponse.Body); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
320
api/gateway/api_middleware_processing.go
Normal file
320
api/gateway/api_middleware_processing.go
Normal file
@@ -0,0 +1,320 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/api/grpc"
|
||||
"github.com/wealdtech/go-bytesutil"
|
||||
)
|
||||
|
||||
// DeserializeRequestBodyIntoContainer deserializes the request's body into an endpoint-specific struct.
|
||||
func DeserializeRequestBodyIntoContainer(body io.Reader, requestContainer interface{}) ErrorJson {
|
||||
if err := json.NewDecoder(body).Decode(&requestContainer); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not decode request body")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessRequestContainerFields processes fields of an endpoint-specific container according to field tags.
|
||||
func ProcessRequestContainerFields(requestContainer interface{}) ErrorJson {
|
||||
if err := processField(requestContainer, []fieldProcessor{
|
||||
{
|
||||
tag: "hex",
|
||||
f: hexToBase64Processor,
|
||||
},
|
||||
}); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not process request data")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetRequestBodyToRequestContainer makes the endpoint-specific container the new body of the request.
|
||||
func SetRequestBodyToRequestContainer(requestContainer interface{}, req *http.Request) ErrorJson {
|
||||
// Serialize the struct, which now includes a base64-encoded value, into JSON.
|
||||
j, err := json.Marshal(requestContainer)
|
||||
if err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not marshal request")
|
||||
}
|
||||
// Set the body to the new JSON.
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(j))
|
||||
req.Header.Set("Content-Length", strconv.Itoa(len(j)))
|
||||
req.ContentLength = int64(len(j))
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrepareRequestForProxying applies additional logic to the request so that it can be correctly proxied to grpc-gateway.
|
||||
func (m *ApiProxyMiddleware) PrepareRequestForProxying(endpoint Endpoint, req *http.Request) ErrorJson {
|
||||
req.URL.Scheme = "http"
|
||||
req.URL.Host = m.GatewayAddress
|
||||
req.RequestURI = ""
|
||||
if errJson := HandleURLParameters(endpoint.Path, req, endpoint.RequestURLLiterals); errJson != nil {
|
||||
return errJson
|
||||
}
|
||||
return HandleQueryParameters(req, endpoint.RequestQueryParams)
|
||||
}
|
||||
|
||||
// ProxyRequest proxies the request to grpc-gateway.
|
||||
func ProxyRequest(req *http.Request) (*http.Response, ErrorJson) {
|
||||
// We do not use http.DefaultClient because it does not have any timeout.
|
||||
netClient := &http.Client{Timeout: time.Minute * 2}
|
||||
grpcResp, err := netClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, InternalServerErrorWithMessage(err, "could not proxy request")
|
||||
}
|
||||
if grpcResp == nil {
|
||||
return nil, &DefaultErrorJson{Message: "nil response from gRPC-gateway", Code: http.StatusInternalServerError}
|
||||
}
|
||||
return grpcResp, nil
|
||||
}
|
||||
|
||||
// ReadGrpcResponseBody reads the body from the grpc-gateway's response.
|
||||
func ReadGrpcResponseBody(r io.Reader) ([]byte, ErrorJson) {
|
||||
body, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, InternalServerErrorWithMessage(err, "could not read response body")
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// DeserializeGrpcResponseBodyIntoErrorJson deserializes the body from the grpc-gateway's response into an error struct.
|
||||
// The struct can be later examined to check if the request resulted in an error.
|
||||
func DeserializeGrpcResponseBodyIntoErrorJson(errJson ErrorJson, body []byte) ErrorJson {
|
||||
if err := json.Unmarshal(body, errJson); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not unmarshal error")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleGrpcResponseError acts on an error that resulted from a grpc-gateway's response.
|
||||
func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, w http.ResponseWriter) {
|
||||
// Something went wrong, but the request completed, meaning we can write headers and the error message.
|
||||
for h, vs := range resp.Header {
|
||||
for _, v := range vs {
|
||||
w.Header().Set(h, v)
|
||||
}
|
||||
}
|
||||
// Set code to HTTP code because unmarshalled body contained gRPC code.
|
||||
errJson.SetCode(resp.StatusCode)
|
||||
WriteError(w, errJson, resp.Header)
|
||||
}
|
||||
|
||||
// GrpcResponseIsEmpty determines whether the grpc-gateway's response body contains no data.
|
||||
func GrpcResponseIsEmpty(grpcResponseBody []byte) bool {
|
||||
return len(grpcResponseBody) == 0 || string(grpcResponseBody) == "{}"
|
||||
}
|
||||
|
||||
// DeserializeGrpcResponseBodyIntoContainer deserializes the grpc-gateway's response body into an endpoint-specific struct.
|
||||
func DeserializeGrpcResponseBodyIntoContainer(body []byte, responseContainer interface{}) ErrorJson {
|
||||
if err := json.Unmarshal(body, &responseContainer); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not unmarshal response")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessMiddlewareResponseFields processes fields of an endpoint-specific container according to field tags.
|
||||
func ProcessMiddlewareResponseFields(responseContainer interface{}) ErrorJson {
|
||||
if err := processField(responseContainer, []fieldProcessor{
|
||||
{
|
||||
tag: "hex",
|
||||
f: base64ToHexProcessor,
|
||||
},
|
||||
{
|
||||
tag: "enum",
|
||||
f: enumToLowercaseProcessor,
|
||||
},
|
||||
{
|
||||
tag: "time",
|
||||
f: timeToUnixProcessor,
|
||||
},
|
||||
}); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not process response data")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SerializeMiddlewareResponseIntoJson serializes the endpoint-specific response struct into a JSON representation.
|
||||
func SerializeMiddlewareResponseIntoJson(responseContainer interface{}) (jsonResponse []byte, errJson ErrorJson) {
|
||||
j, err := json.Marshal(responseContainer)
|
||||
if err != nil {
|
||||
return nil, InternalServerErrorWithMessage(err, "could not marshal response")
|
||||
}
|
||||
return j, nil
|
||||
}
|
||||
|
||||
// WriteMiddlewareResponseHeadersAndBody populates headers and the body of the final response.
|
||||
func WriteMiddlewareResponseHeadersAndBody(grpcResp *http.Response, responseJson []byte, w http.ResponseWriter) ErrorJson {
|
||||
var statusCodeHeader string
|
||||
for h, vs := range grpcResp.Header {
|
||||
// We don't want to expose any gRPC metadata in the HTTP response, so we skip forwarding metadata headers.
|
||||
if strings.HasPrefix(h, "Grpc-Metadata") {
|
||||
if h == "Grpc-Metadata-"+grpc.HttpCodeMetadataKey {
|
||||
statusCodeHeader = vs[0]
|
||||
}
|
||||
} else {
|
||||
for _, v := range vs {
|
||||
w.Header().Set(h, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !GrpcResponseIsEmpty(responseJson) {
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(responseJson)))
|
||||
if statusCodeHeader != "" {
|
||||
code, err := strconv.Atoi(statusCodeHeader)
|
||||
if err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not parse status code")
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
} else {
|
||||
w.WriteHeader(grpcResp.StatusCode)
|
||||
}
|
||||
if _, err := io.Copy(w, ioutil.NopCloser(bytes.NewReader(responseJson))); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not write response message")
|
||||
}
|
||||
} else {
|
||||
w.Header().Set("Content-Length", "0")
|
||||
w.WriteHeader(grpcResp.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteError writes the error by manipulating headers and the body of the final response.
|
||||
func WriteError(w http.ResponseWriter, errJson ErrorJson, responseHeader http.Header) {
|
||||
// Include custom error in the error JSON.
|
||||
if responseHeader != nil {
|
||||
customError, ok := responseHeader["Grpc-Metadata-"+grpc.CustomErrorMetadataKey]
|
||||
if ok {
|
||||
// Assume header has only one value and read the 0 index.
|
||||
if err := json.Unmarshal([]byte(customError[0]), errJson); err != nil {
|
||||
log.WithError(err).Error("Could not unmarshal custom error message")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
j, err := json.Marshal(errJson)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not marshal error message")
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Length", strconv.Itoa(len(j)))
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(errJson.StatusCode())
|
||||
if _, err := io.Copy(w, ioutil.NopCloser(bytes.NewReader(j))); err != nil {
|
||||
log.WithError(err).Error("Could not write error message")
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup performs final cleanup on the initial response from grpc-gateway.
|
||||
func Cleanup(grpcResponseBody io.ReadCloser) ErrorJson {
|
||||
if err := grpcResponseBody.Close(); err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not close response body")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processField calls each processor function on any field that has the matching tag set.
|
||||
// It is a recursive function.
|
||||
func processField(s interface{}, processors []fieldProcessor) error {
|
||||
kind := reflect.TypeOf(s).Kind()
|
||||
if kind != reflect.Ptr && kind != reflect.Slice && kind != reflect.Array {
|
||||
return fmt.Errorf("processing fields of kind '%v' is unsupported", kind)
|
||||
}
|
||||
|
||||
t := reflect.TypeOf(s).Elem()
|
||||
v := reflect.Indirect(reflect.ValueOf(s))
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
switch v.Field(i).Kind() {
|
||||
case reflect.Slice:
|
||||
sliceElem := t.Field(i).Type.Elem()
|
||||
kind := sliceElem.Kind()
|
||||
// Recursively process slices to struct pointers.
|
||||
if kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct {
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
if err := processField(v.Field(i).Index(j).Interface(), processors); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Process each string in string slices.
|
||||
if kind == reflect.String {
|
||||
for _, proc := range processors {
|
||||
_, hasTag := t.Field(i).Tag.Lookup(proc.tag)
|
||||
if hasTag {
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
if err := proc.f(v.Field(i).Index(j)); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// Recursively process struct pointers.
|
||||
case reflect.Ptr:
|
||||
if v.Field(i).Elem().Kind() == reflect.Struct {
|
||||
if err := processField(v.Field(i).Interface(), processors); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
default:
|
||||
field := t.Field(i)
|
||||
for _, proc := range processors {
|
||||
if _, hasTag := field.Tag.Lookup(proc.tag); hasTag {
|
||||
if err := proc.f(v.Field(i)); err != nil {
|
||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hexToBase64Processor(v reflect.Value) error {
|
||||
b, err := bytesutil.FromHexString(v.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.SetString(base64.StdEncoding.EncodeToString(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
func base64ToHexProcessor(v reflect.Value) error {
|
||||
if v.String() == "" {
|
||||
return nil
|
||||
}
|
||||
b, err := base64.StdEncoding.DecodeString(v.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.SetString(hexutil.Encode(b))
|
||||
return nil
|
||||
}
|
||||
|
||||
func enumToLowercaseProcessor(v reflect.Value) error {
|
||||
v.SetString(strings.ToLower(v.String()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func timeToUnixProcessor(v reflect.Value) error {
|
||||
t, err := time.Parse(time.RFC3339, v.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.SetString(strconv.FormatUint(uint64(t.Unix()), 10))
|
||||
return nil
|
||||
}
|
||||
414
api/gateway/api_middleware_processing_test.go
Normal file
414
api/gateway/api_middleware_processing_test.go
Normal file
@@ -0,0 +1,414 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/api/grpc"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
type testRequestContainer struct {
|
||||
TestString string
|
||||
TestHexString string `hex:"true"`
|
||||
}
|
||||
|
||||
func defaultRequestContainer() *testRequestContainer {
|
||||
return &testRequestContainer{
|
||||
TestString: "test string",
|
||||
TestHexString: "0x666F6F", // hex encoding of "foo"
|
||||
}
|
||||
}
|
||||
|
||||
type testResponseContainer struct {
|
||||
TestString string
|
||||
TestHex string `hex:"true"`
|
||||
TestEnum string `enum:"true"`
|
||||
TestTime string `time:"true"`
|
||||
}
|
||||
|
||||
func defaultResponseContainer() *testResponseContainer {
|
||||
return &testResponseContainer{
|
||||
TestString: "test string",
|
||||
TestHex: "Zm9v", // base64 encoding of "foo"
|
||||
TestEnum: "Test Enum",
|
||||
TestTime: "2006-01-02T15:04:05Z",
|
||||
}
|
||||
}
|
||||
|
||||
type testErrorJson struct {
|
||||
Message string
|
||||
Code int
|
||||
CustomField string
|
||||
}
|
||||
|
||||
// StatusCode returns the error's underlying error code.
|
||||
func (e *testErrorJson) StatusCode() int {
|
||||
return e.Code
|
||||
}
|
||||
|
||||
// Msg returns the error's underlying message.
|
||||
func (e *testErrorJson) Msg() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
// SetCode sets the error's underlying error code.
|
||||
func (e *testErrorJson) SetCode(code int) {
|
||||
e.Code = code
|
||||
}
|
||||
|
||||
func TestDeserializeRequestBodyIntoContainer(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
var bodyJson bytes.Buffer
|
||||
err := json.NewEncoder(&bodyJson).Encode(defaultRequestContainer())
|
||||
require.NoError(t, err)
|
||||
|
||||
container := &testRequestContainer{}
|
||||
errJson := DeserializeRequestBodyIntoContainer(&bodyJson, container)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "test string", container.TestString)
|
||||
})
|
||||
|
||||
t.Run("error", func(t *testing.T) {
|
||||
var bodyJson bytes.Buffer
|
||||
bodyJson.Write([]byte("foo"))
|
||||
errJson := DeserializeRequestBodyIntoContainer(&bodyJson, &testRequestContainer{})
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode request body"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessRequestContainerFields(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
container := defaultRequestContainer()
|
||||
|
||||
errJson := ProcessRequestContainerFields(container)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "Zm9v", container.TestHexString)
|
||||
})
|
||||
|
||||
t.Run("error", func(t *testing.T) {
|
||||
errJson := ProcessRequestContainerFields("foo")
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not process request data"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetRequestBodyToRequestContainer(t *testing.T) {
|
||||
var body bytes.Buffer
|
||||
request := httptest.NewRequest("GET", "http://foo.example", &body)
|
||||
|
||||
errJson := SetRequestBodyToRequestContainer(defaultRequestContainer(), request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
container := &testRequestContainer{}
|
||||
require.NoError(t, json.NewDecoder(request.Body).Decode(container))
|
||||
assert.Equal(t, "test string", container.TestString)
|
||||
contentLengthHeader, ok := request.Header["Content-Length"]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, 1, len(contentLengthHeader), "wrong number of header values")
|
||||
assert.Equal(t, "55", contentLengthHeader[0])
|
||||
assert.Equal(t, int64(55), request.ContentLength)
|
||||
}
|
||||
|
||||
func TestPrepareRequestForProxying(t *testing.T) {
|
||||
middleware := &ApiProxyMiddleware{
|
||||
GatewayAddress: "http://gateway.example",
|
||||
}
|
||||
// We will set some params to make the request more interesting.
|
||||
endpoint := Endpoint{
|
||||
Path: "/{url_param}",
|
||||
RequestURLLiterals: []string{"url_param"},
|
||||
RequestQueryParams: []QueryParam{{Name: "query_param"}},
|
||||
}
|
||||
var body bytes.Buffer
|
||||
request := httptest.NewRequest("GET", "http://foo.example?query_param=bar", &body)
|
||||
|
||||
errJson := middleware.PrepareRequestForProxying(endpoint, request)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "http", request.URL.Scheme)
|
||||
assert.Equal(t, middleware.GatewayAddress, request.URL.Host)
|
||||
assert.Equal(t, "", request.RequestURI)
|
||||
}
|
||||
|
||||
func TestReadGrpcResponseBody(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
b.Write([]byte("foo"))
|
||||
|
||||
body, jsonErr := ReadGrpcResponseBody(&b)
|
||||
require.Equal(t, true, jsonErr == nil)
|
||||
assert.Equal(t, "foo", string(body))
|
||||
}
|
||||
|
||||
func TestDeserializeGrpcResponseBodyIntoErrorJson(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
e := &testErrorJson{
|
||||
Message: "foo",
|
||||
Code: 500,
|
||||
}
|
||||
body, err := json.Marshal(e)
|
||||
require.NoError(t, err)
|
||||
|
||||
eToDeserialize := &testErrorJson{}
|
||||
errJson := DeserializeGrpcResponseBodyIntoErrorJson(eToDeserialize, body)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "foo", eToDeserialize.Msg())
|
||||
assert.Equal(t, 500, eToDeserialize.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("error", func(t *testing.T) {
|
||||
errJson := DeserializeGrpcResponseBodyIntoErrorJson(nil, nil)
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not unmarshal error"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestHandleGrpcResponseError(t *testing.T) {
|
||||
response := &http.Response{
|
||||
StatusCode: 400,
|
||||
Header: http.Header{
|
||||
"Foo": []string{"foo"},
|
||||
"Bar": []string{"bar"},
|
||||
},
|
||||
}
|
||||
writer := httptest.NewRecorder()
|
||||
errJson := &testErrorJson{
|
||||
Message: "foo",
|
||||
Code: 500,
|
||||
}
|
||||
|
||||
HandleGrpcResponseError(errJson, response, writer)
|
||||
v, ok := writer.Header()["Foo"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "foo", v[0])
|
||||
v, ok = writer.Header()["Bar"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "bar", v[0])
|
||||
assert.Equal(t, 400, errJson.StatusCode())
|
||||
}
|
||||
|
||||
func TestGrpcResponseIsEmpty(t *testing.T) {
|
||||
t.Run("nil", func(t *testing.T) {
|
||||
assert.Equal(t, true, GrpcResponseIsEmpty(nil))
|
||||
})
|
||||
t.Run("empty_slice", func(t *testing.T) {
|
||||
assert.Equal(t, true, GrpcResponseIsEmpty(make([]byte, 0)))
|
||||
})
|
||||
t.Run("empty_brackets", func(t *testing.T) {
|
||||
assert.Equal(t, true, GrpcResponseIsEmpty([]byte("{}")))
|
||||
})
|
||||
t.Run("non_empty", func(t *testing.T) {
|
||||
assert.Equal(t, false, GrpcResponseIsEmpty([]byte("{\"foo\":\"bar\"})")))
|
||||
})
|
||||
}
|
||||
|
||||
func TestDeserializeGrpcResponseBodyIntoContainer(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
body, err := json.Marshal(defaultRequestContainer())
|
||||
require.NoError(t, err)
|
||||
|
||||
container := &testRequestContainer{}
|
||||
errJson := DeserializeGrpcResponseBodyIntoContainer(body, container)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "test string", container.TestString)
|
||||
})
|
||||
|
||||
t.Run("error", func(t *testing.T) {
|
||||
var bodyJson bytes.Buffer
|
||||
bodyJson.Write([]byte("foo"))
|
||||
errJson := DeserializeGrpcResponseBodyIntoContainer(bodyJson.Bytes(), &testRequestContainer{})
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not unmarshal response"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessMiddlewareResponseFields(t *testing.T) {
|
||||
t.Run("Ok", func(t *testing.T) {
|
||||
container := defaultResponseContainer()
|
||||
|
||||
errJson := ProcessMiddlewareResponseFields(container)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "0x666f6f", container.TestHex)
|
||||
assert.Equal(t, "test enum", container.TestEnum)
|
||||
assert.Equal(t, "1136214245", container.TestTime)
|
||||
})
|
||||
|
||||
t.Run("error", func(t *testing.T) {
|
||||
errJson := ProcessMiddlewareResponseFields("foo")
|
||||
require.NotNil(t, errJson)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not process response data"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
}
|
||||
|
||||
func TestSerializeMiddlewareResponseIntoJson(t *testing.T) {
|
||||
container := defaultResponseContainer()
|
||||
j, errJson := SerializeMiddlewareResponseIntoJson(container)
|
||||
assert.Equal(t, true, errJson == nil)
|
||||
cToDeserialize := &testResponseContainer{}
|
||||
require.NoError(t, json.Unmarshal(j, cToDeserialize))
|
||||
assert.Equal(t, "test string", cToDeserialize.TestString)
|
||||
}
|
||||
|
||||
func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
|
||||
t.Run("GET", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{
|
||||
"Foo": []string{"foo"},
|
||||
"Grpc-Metadata-" + grpc.HttpCodeMetadataKey: []string{"204"},
|
||||
},
|
||||
}
|
||||
container := defaultResponseContainer()
|
||||
responseJson, err := json.Marshal(container)
|
||||
require.NoError(t, err)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
v, ok := writer.Header()["Foo"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "foo", v[0])
|
||||
v, ok = writer.Header()["Content-Length"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "102", v[0])
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
|
||||
})
|
||||
|
||||
t.Run("GET_no_grpc_status_code_header", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{},
|
||||
StatusCode: 204,
|
||||
}
|
||||
container := defaultResponseContainer()
|
||||
responseJson, err := json.Marshal(container)
|
||||
require.NoError(t, err)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
})
|
||||
|
||||
t.Run("GET_invalid_status_code", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{},
|
||||
}
|
||||
|
||||
// Set invalid status code.
|
||||
response.Header["Grpc-Metadata-"+grpc.HttpCodeMetadataKey] = []string{"invalid"}
|
||||
|
||||
container := defaultResponseContainer()
|
||||
responseJson, err := json.Marshal(container)
|
||||
require.NoError(t, err)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
|
||||
require.Equal(t, false, errJson == nil)
|
||||
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not parse status code"))
|
||||
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
|
||||
})
|
||||
|
||||
t.Run("POST", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{},
|
||||
StatusCode: 204,
|
||||
}
|
||||
container := defaultResponseContainer()
|
||||
responseJson, err := json.Marshal(container)
|
||||
require.NoError(t, err)
|
||||
writer := httptest.NewRecorder()
|
||||
|
||||
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
})
|
||||
|
||||
t.Run("POST_with_response_body", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{},
|
||||
StatusCode: 204,
|
||||
}
|
||||
container := defaultResponseContainer()
|
||||
responseJson, err := json.Marshal(container)
|
||||
require.NoError(t, err)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
|
||||
})
|
||||
|
||||
t.Run("POST_with_empty_json_body", func(t *testing.T) {
|
||||
response := &http.Response{
|
||||
Header: http.Header{},
|
||||
StatusCode: 204,
|
||||
}
|
||||
responseJson, err := json.Marshal(struct{}{})
|
||||
require.NoError(t, err)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
errJson := WriteMiddlewareResponseHeadersAndBody(response, responseJson, writer)
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
assert.DeepEqual(t, []byte(nil), writer.Body.Bytes())
|
||||
assert.Equal(t, "0", writer.Header()["Content-Length"][0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteError(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
responseHeader := http.Header{
|
||||
"Grpc-Metadata-" + grpc.CustomErrorMetadataKey: []string{"{\"CustomField\":\"bar\"}"},
|
||||
}
|
||||
errJson := &testErrorJson{
|
||||
Message: "foo",
|
||||
Code: 500,
|
||||
}
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
WriteError(writer, errJson, responseHeader)
|
||||
v, ok := writer.Header()["Content-Length"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "48", v[0])
|
||||
v, ok = writer.Header()["Content-Type"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "application/json", v[0])
|
||||
assert.Equal(t, 500, writer.Code)
|
||||
eDeserialize := &testErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), eDeserialize))
|
||||
assert.Equal(t, "foo", eDeserialize.Message)
|
||||
assert.Equal(t, 500, eDeserialize.Code)
|
||||
assert.Equal(t, "bar", eDeserialize.CustomField)
|
||||
})
|
||||
|
||||
t.Run("invalid_custom_error_header", func(t *testing.T) {
|
||||
logHook := test.NewGlobal()
|
||||
|
||||
responseHeader := http.Header{
|
||||
"Grpc-Metadata-" + grpc.CustomErrorMetadataKey: []string{"invalid"},
|
||||
}
|
||||
|
||||
WriteError(httptest.NewRecorder(), &testErrorJson{}, responseHeader)
|
||||
assert.LogsContain(t, logHook, "Could not unmarshal custom error message")
|
||||
})
|
||||
}
|
||||
56
api/gateway/api_middleware_structs.go
Normal file
56
api/gateway/api_middleware_structs.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ---------------
|
||||
// Error handling.
|
||||
// ---------------
|
||||
|
||||
// ErrorJson describes common functionality of all JSON error representations.
|
||||
type ErrorJson interface {
|
||||
StatusCode() int
|
||||
SetCode(code int)
|
||||
Msg() string
|
||||
}
|
||||
|
||||
// DefaultErrorJson is a JSON representation of a simple error value, containing only a message and an error code.
|
||||
type DefaultErrorJson struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"code"`
|
||||
}
|
||||
|
||||
// InternalServerErrorWithMessage returns a DefaultErrorJson with 500 code and a custom message.
|
||||
func InternalServerErrorWithMessage(err error, message string) *DefaultErrorJson {
|
||||
e := errors.Wrapf(err, message)
|
||||
return &DefaultErrorJson{
|
||||
Message: e.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
|
||||
// InternalServerError returns a DefaultErrorJson with 500 code.
|
||||
func InternalServerError(err error) *DefaultErrorJson {
|
||||
return &DefaultErrorJson{
|
||||
Message: err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
|
||||
// StatusCode returns the error's underlying error code.
|
||||
func (e *DefaultErrorJson) StatusCode() int {
|
||||
return e.Code
|
||||
}
|
||||
|
||||
// Msg returns the error's underlying message.
|
||||
func (e *DefaultErrorJson) Msg() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
// SetCode sets the error's underlying error code.
|
||||
func (e *DefaultErrorJson) SetCode(code int) {
|
||||
e.Code = code
|
||||
}
|
||||
286
api/gateway/gateway.go
Normal file
286
api/gateway/gateway.go
Normal file
@@ -0,0 +1,286 @@
|
||||
// Package gateway defines a grpc-gateway server that serves HTTP-JSON traffic and acts a proxy between HTTP and gRPC.
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/runtime"
|
||||
"github.com/rs/cors"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
var _ runtime.Service = (*Gateway)(nil)
|
||||
|
||||
// PbMux serves grpc-gateway requests for selected patterns using registered protobuf handlers.
|
||||
type PbMux struct {
|
||||
Registrations []PbHandlerRegistration // Protobuf registrations to be registered in Mux.
|
||||
Patterns []string // URL patterns that will be handled by Mux.
|
||||
Mux *gwruntime.ServeMux // The mux that will be used for grpc-gateway requests.
|
||||
}
|
||||
|
||||
// PbHandlerRegistration is a function that registers a protobuf handler.
|
||||
type PbHandlerRegistration func(context.Context, *gwruntime.ServeMux, *grpc.ClientConn) error
|
||||
|
||||
// MuxHandler is a function that implements the mux handler functionality.
|
||||
type MuxHandler func(http.Handler, http.ResponseWriter, *http.Request)
|
||||
|
||||
// Gateway is the gRPC gateway to serve HTTP JSON traffic as a proxy and forward it to the gRPC server.
|
||||
type Gateway struct {
|
||||
conn *grpc.ClientConn
|
||||
pbHandlers []PbMux
|
||||
muxHandler MuxHandler
|
||||
maxCallRecvMsgSize uint64
|
||||
mux *http.ServeMux
|
||||
server *http.Server
|
||||
cancel context.CancelFunc
|
||||
remoteCert string
|
||||
gatewayAddr string
|
||||
apiMiddlewareAddr string
|
||||
apiMiddlewareEndpointFactory EndpointFactory
|
||||
ctx context.Context
|
||||
startFailure error
|
||||
remoteAddr string
|
||||
allowedOrigins []string
|
||||
}
|
||||
|
||||
// New returns a new instance of the Gateway.
|
||||
func New(
|
||||
ctx context.Context,
|
||||
pbHandlers []PbMux,
|
||||
muxHandler MuxHandler,
|
||||
remoteAddr,
|
||||
gatewayAddress string,
|
||||
) *Gateway {
|
||||
g := &Gateway{
|
||||
pbHandlers: pbHandlers,
|
||||
muxHandler: muxHandler,
|
||||
mux: http.NewServeMux(),
|
||||
gatewayAddr: gatewayAddress,
|
||||
ctx: ctx,
|
||||
remoteAddr: remoteAddr,
|
||||
allowedOrigins: []string{},
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
// WithMux allows adding a custom http.ServeMux to the gateway.
|
||||
func (g *Gateway) WithMux(m *http.ServeMux) *Gateway {
|
||||
g.mux = m
|
||||
return g
|
||||
}
|
||||
|
||||
// WithAllowedOrigins allows adding a set of allowed origins to the gateway.
|
||||
func (g *Gateway) WithAllowedOrigins(origins []string) *Gateway {
|
||||
g.allowedOrigins = origins
|
||||
return g
|
||||
}
|
||||
|
||||
// WithRemoteCert allows adding a custom certificate to the gateway,
|
||||
func (g *Gateway) WithRemoteCert(cert string) *Gateway {
|
||||
g.remoteCert = cert
|
||||
return g
|
||||
}
|
||||
|
||||
// WithMaxCallRecvMsgSize allows specifying the maximum allowed gRPC message size.
|
||||
func (g *Gateway) WithMaxCallRecvMsgSize(size uint64) *Gateway {
|
||||
g.maxCallRecvMsgSize = size
|
||||
return g
|
||||
}
|
||||
|
||||
// WithApiMiddleware allows adding API Middleware proxy to the gateway.
|
||||
func (g *Gateway) WithApiMiddleware(address string, endpointFactory EndpointFactory) *Gateway {
|
||||
g.apiMiddlewareAddr = address
|
||||
g.apiMiddlewareEndpointFactory = endpointFactory
|
||||
return g
|
||||
}
|
||||
|
||||
// Start the gateway service.
|
||||
func (g *Gateway) Start() {
|
||||
ctx, cancel := context.WithCancel(g.ctx)
|
||||
g.cancel = cancel
|
||||
|
||||
conn, err := g.dial(ctx, "tcp", g.remoteAddr)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to connect to gRPC server")
|
||||
g.startFailure = err
|
||||
return
|
||||
}
|
||||
g.conn = conn
|
||||
|
||||
for _, h := range g.pbHandlers {
|
||||
for _, r := range h.Registrations {
|
||||
if err := r(ctx, h.Mux, g.conn); err != nil {
|
||||
log.WithError(err).Error("Failed to register handler")
|
||||
g.startFailure = err
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, p := range h.Patterns {
|
||||
g.mux.Handle(p, h.Mux)
|
||||
}
|
||||
}
|
||||
|
||||
corsMux := g.corsMiddleware(g.mux)
|
||||
|
||||
if g.muxHandler != nil {
|
||||
g.mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
g.muxHandler(corsMux, w, r)
|
||||
})
|
||||
}
|
||||
|
||||
g.server = &http.Server{
|
||||
Addr: g.gatewayAddr,
|
||||
Handler: corsMux,
|
||||
}
|
||||
|
||||
go func() {
|
||||
log.WithField("address", g.gatewayAddr).Info("Starting gRPC gateway")
|
||||
if err := g.server.ListenAndServe(); err != http.ErrServerClosed {
|
||||
log.WithError(err).Error("Failed to start gRPC gateway")
|
||||
g.startFailure = err
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if g.apiMiddlewareAddr != "" && g.apiMiddlewareEndpointFactory != nil && !g.apiMiddlewareEndpointFactory.IsNil() {
|
||||
go g.registerApiMiddleware()
|
||||
}
|
||||
}
|
||||
|
||||
// Status of grpc gateway. Returns an error if this service is unhealthy.
|
||||
func (g *Gateway) Status() error {
|
||||
if g.startFailure != nil {
|
||||
return g.startFailure
|
||||
}
|
||||
|
||||
if s := g.conn.GetState(); s != connectivity.Ready {
|
||||
return fmt.Errorf("grpc server is %s", s)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the gateway with a graceful shutdown.
|
||||
func (g *Gateway) Stop() error {
|
||||
if g.server != nil {
|
||||
shutdownCtx, shutdownCancel := context.WithTimeout(g.ctx, 2*time.Second)
|
||||
defer shutdownCancel()
|
||||
if err := g.server.Shutdown(shutdownCtx); err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
log.Warn("Existing connections terminated")
|
||||
} else {
|
||||
log.WithError(err).Error("Failed to gracefully shut down server")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if g.cancel != nil {
|
||||
g.cancel()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *Gateway) corsMiddleware(h http.Handler) http.Handler {
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: g.allowedOrigins,
|
||||
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodOptions},
|
||||
AllowCredentials: true,
|
||||
MaxAge: 600,
|
||||
AllowedHeaders: []string{"*"},
|
||||
})
|
||||
return c.Handler(h)
|
||||
}
|
||||
|
||||
const swaggerDir = "proto/prysm/v1alpha1/"
|
||||
|
||||
// SwaggerServer returns swagger specification files located under "/swagger/"
|
||||
func SwaggerServer() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.HasSuffix(r.URL.Path, ".swagger.json") {
|
||||
log.Debugf("Not found: %s", r.URL.Path)
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("Serving %s\n", r.URL.Path)
|
||||
p := strings.TrimPrefix(r.URL.Path, "/swagger/")
|
||||
p = path.Join(swaggerDir, p)
|
||||
http.ServeFile(w, r, p)
|
||||
}
|
||||
}
|
||||
|
||||
// dial the gRPC server.
|
||||
func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientConn, error) {
|
||||
switch network {
|
||||
case "tcp":
|
||||
return g.dialTCP(ctx, addr)
|
||||
case "unix":
|
||||
return g.dialUnix(ctx, addr)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported network type %q", network)
|
||||
}
|
||||
}
|
||||
|
||||
// dialTCP creates a client connection via TCP.
|
||||
// "addr" must be a valid TCP address with a port number.
|
||||
func (g *Gateway) dialTCP(ctx context.Context, addr string) (*grpc.ClientConn, error) {
|
||||
security := grpc.WithInsecure()
|
||||
if len(g.remoteCert) > 0 {
|
||||
creds, err := credentials.NewClientTLSFromFile(g.remoteCert, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
security = grpc.WithTransportCredentials(creds)
|
||||
}
|
||||
opts := []grpc.DialOption{
|
||||
security,
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.maxCallRecvMsgSize))),
|
||||
}
|
||||
|
||||
return grpc.DialContext(ctx, addr, opts...)
|
||||
}
|
||||
|
||||
// dialUnix creates a client connection via a unix domain socket.
|
||||
// "addr" must be a valid path to the socket.
|
||||
func (g *Gateway) dialUnix(ctx context.Context, addr string) (*grpc.ClientConn, error) {
|
||||
d := func(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return net.DialTimeout("unix", addr, timeout)
|
||||
}
|
||||
f := func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
return d(addr, time.Until(deadline))
|
||||
}
|
||||
return d(addr, 0)
|
||||
}
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithContextDialer(f),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(g.maxCallRecvMsgSize))),
|
||||
}
|
||||
return grpc.DialContext(ctx, addr, opts...)
|
||||
}
|
||||
|
||||
func (g *Gateway) registerApiMiddleware() {
|
||||
proxy := &ApiProxyMiddleware{
|
||||
GatewayAddress: g.gatewayAddr,
|
||||
ProxyAddress: g.apiMiddlewareAddr,
|
||||
EndpointCreator: g.apiMiddlewareEndpointFactory,
|
||||
}
|
||||
log.WithField("API middleware address", g.apiMiddlewareAddr).Info("Starting API middleware")
|
||||
if err := proxy.Run(); err != http.ErrServerClosed {
|
||||
log.WithError(err).Error("Failed to start API middleware")
|
||||
g.startFailure = err
|
||||
return
|
||||
}
|
||||
}
|
||||
120
api/gateway/gateway_test.go
Normal file
120
api/gateway/gateway_test.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
type mockEndpointFactory struct {
|
||||
}
|
||||
|
||||
func (*mockEndpointFactory) Paths() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (*mockEndpointFactory) Create(_ string) (*Endpoint, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (*mockEndpointFactory) IsNil() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func TestGateway_Customized(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
cert := "cert"
|
||||
origins := []string{"origin"}
|
||||
size := uint64(100)
|
||||
middlewareAddr := "middleware"
|
||||
endpointFactory := &mockEndpointFactory{}
|
||||
|
||||
g := New(
|
||||
context.Background(),
|
||||
[]PbMux{},
|
||||
func(handler http.Handler, writer http.ResponseWriter, request *http.Request) {
|
||||
|
||||
},
|
||||
"",
|
||||
"",
|
||||
).WithMux(mux).
|
||||
WithRemoteCert(cert).
|
||||
WithAllowedOrigins(origins).
|
||||
WithMaxCallRecvMsgSize(size).
|
||||
WithApiMiddleware(middlewareAddr, endpointFactory)
|
||||
|
||||
assert.Equal(t, mux, g.mux)
|
||||
assert.Equal(t, cert, g.remoteCert)
|
||||
require.Equal(t, 1, len(g.allowedOrigins))
|
||||
assert.Equal(t, origins[0], g.allowedOrigins[0])
|
||||
assert.Equal(t, size, g.maxCallRecvMsgSize)
|
||||
assert.Equal(t, middlewareAddr, g.apiMiddlewareAddr)
|
||||
assert.Equal(t, endpointFactory, g.apiMiddlewareEndpointFactory)
|
||||
}
|
||||
|
||||
func TestGateway_StartStop(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
|
||||
gatewayPort := ctx.Int(flags.GRPCGatewayPort.Name)
|
||||
gatewayHost := ctx.String(flags.GRPCGatewayHost.Name)
|
||||
rpcHost := ctx.String(flags.RPCHost.Name)
|
||||
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
|
||||
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
|
||||
|
||||
g := New(
|
||||
ctx.Context,
|
||||
[]PbMux{},
|
||||
func(handler http.Handler, writer http.ResponseWriter, request *http.Request) {
|
||||
|
||||
},
|
||||
selfAddress,
|
||||
gatewayAddress,
|
||||
)
|
||||
|
||||
g.Start()
|
||||
go func() {
|
||||
require.LogsContain(t, hook, "Starting gRPC gateway")
|
||||
require.LogsDoNotContain(t, hook, "Starting API middleware")
|
||||
}()
|
||||
|
||||
err := g.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGateway_NilHandler_NotFoundHandlerRegistered(t *testing.T) {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
|
||||
gatewayPort := ctx.Int(flags.GRPCGatewayPort.Name)
|
||||
gatewayHost := ctx.String(flags.GRPCGatewayHost.Name)
|
||||
rpcHost := ctx.String(flags.RPCHost.Name)
|
||||
selfAddress := fmt.Sprintf("%s:%d", rpcHost, ctx.Int(flags.RPCPort.Name))
|
||||
gatewayAddress := fmt.Sprintf("%s:%d", gatewayHost, gatewayPort)
|
||||
|
||||
g := New(
|
||||
ctx.Context,
|
||||
[]PbMux{},
|
||||
/* muxHandler */ nil,
|
||||
selfAddress,
|
||||
gatewayAddress,
|
||||
)
|
||||
|
||||
writer := httptest.NewRecorder()
|
||||
g.mux.ServeHTTP(writer, &http.Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: "/foo"}})
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
}
|
||||
103
api/gateway/param_handling.go
Normal file
103
api/gateway/param_handling.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
butil "github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/wealdtech/go-bytesutil"
|
||||
)
|
||||
|
||||
// HandleURLParameters processes URL parameters, allowing parameterized URLs to be safely and correctly proxied to grpc-gateway.
|
||||
func HandleURLParameters(url string, req *http.Request, literals []string) ErrorJson {
|
||||
segments := strings.Split(url, "/")
|
||||
|
||||
segmentsLoop:
|
||||
for i, s := range segments {
|
||||
// We only care about segments which are parameterized.
|
||||
if isRequestParam(s) {
|
||||
// Don't do anything with parameters which should be forwarded literally to gRPC.
|
||||
for _, l := range literals {
|
||||
if s == "{"+l+"}" {
|
||||
continue segmentsLoop
|
||||
}
|
||||
}
|
||||
|
||||
routeVar := mux.Vars(req)[s[1:len(s)-1]]
|
||||
bRouteVar := []byte(routeVar)
|
||||
if butil.IsHex(bRouteVar) {
|
||||
var err error
|
||||
bRouteVar, err = bytesutil.FromHexString(string(bRouteVar))
|
||||
if err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not process URL parameter")
|
||||
}
|
||||
}
|
||||
// Converting hex to base64 may result in a value which malforms the URL.
|
||||
// We use URLEncoding to safely escape such values.
|
||||
base64RouteVar := base64.URLEncoding.EncodeToString(bRouteVar)
|
||||
|
||||
// Merge segments back into the full URL.
|
||||
splitPath := strings.Split(req.URL.Path, "/")
|
||||
splitPath[i] = base64RouteVar
|
||||
req.URL.Path = strings.Join(splitPath, "/")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleQueryParameters processes query parameters, allowing them to be safely and correctly proxied to grpc-gateway.
|
||||
func HandleQueryParameters(req *http.Request, params []QueryParam) ErrorJson {
|
||||
queryParams := req.URL.Query()
|
||||
|
||||
normalizeQueryValues(queryParams)
|
||||
|
||||
for key, vals := range queryParams {
|
||||
for _, p := range params {
|
||||
if key == p.Name {
|
||||
if p.Hex {
|
||||
queryParams.Del(key)
|
||||
for _, v := range vals {
|
||||
b := []byte(v)
|
||||
if butil.IsHex(b) {
|
||||
var err error
|
||||
b, err = bytesutil.FromHexString(v)
|
||||
if err != nil {
|
||||
return InternalServerErrorWithMessage(err, "could not process query parameter")
|
||||
}
|
||||
}
|
||||
queryParams.Add(key, base64.URLEncoding.EncodeToString(b))
|
||||
}
|
||||
}
|
||||
if p.Enum {
|
||||
queryParams.Del(key)
|
||||
for _, v := range vals {
|
||||
// gRPC expects uppercase enum values.
|
||||
queryParams.Add(key, strings.ToUpper(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
req.URL.RawQuery = queryParams.Encode()
|
||||
return nil
|
||||
}
|
||||
|
||||
// isRequestParam verifies whether the passed string is a request parameter.
|
||||
// Request parameters are enclosed in { and }.
|
||||
func isRequestParam(s string) bool {
|
||||
return len(s) > 2 && s[0] == '{' && s[len(s)-1] == '}'
|
||||
}
|
||||
|
||||
func normalizeQueryValues(queryParams url.Values) {
|
||||
// Replace comma-separated values with individual values.
|
||||
for key, vals := range queryParams {
|
||||
splitVals := make([]string, 0)
|
||||
for _, v := range vals {
|
||||
splitVals = append(splitVals, strings.Split(v, ",")...)
|
||||
}
|
||||
queryParams[key] = splitVals
|
||||
}
|
||||
}
|
||||
124
api/gateway/param_handling_test.go
Normal file
124
api/gateway/param_handling_test.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestHandleURLParameters(t *testing.T) {
|
||||
var body bytes.Buffer
|
||||
|
||||
t.Run("no_params", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example/bar", &body)
|
||||
|
||||
errJson := HandleURLParameters("/not_param", request, []string{})
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "/bar", request.URL.Path)
|
||||
})
|
||||
|
||||
t.Run("with_params", func(t *testing.T) {
|
||||
muxVars := make(map[string]string)
|
||||
muxVars["bar_param"] = "bar"
|
||||
muxVars["quux_param"] = "quux"
|
||||
request := httptest.NewRequest("GET", "http://foo.example/bar/baz/quux", &body)
|
||||
request = mux.SetURLVars(request, muxVars)
|
||||
|
||||
errJson := HandleURLParameters("/{bar_param}/not_param/{quux_param}", request, []string{})
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "/YmFy/baz/cXV1eA==", request.URL.Path)
|
||||
})
|
||||
|
||||
t.Run("with_literal", func(t *testing.T) {
|
||||
muxVars := make(map[string]string)
|
||||
muxVars["bar_param"] = "bar"
|
||||
request := httptest.NewRequest("GET", "http://foo.example/bar/baz", &body)
|
||||
request = mux.SetURLVars(request, muxVars)
|
||||
|
||||
errJson := HandleURLParameters("/{bar_param}/not_param/", request, []string{"bar_param"})
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "/bar/baz", request.URL.Path)
|
||||
})
|
||||
|
||||
t.Run("with_hex", func(t *testing.T) {
|
||||
muxVars := make(map[string]string)
|
||||
muxVars["hex_param"] = "0x626172"
|
||||
request := httptest.NewRequest("GET", "http://foo.example/0x626172/baz", &body)
|
||||
request = mux.SetURLVars(request, muxVars)
|
||||
|
||||
errJson := HandleURLParameters("/{hex_param}/not_param/", request, []string{})
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "/YmFy/baz", request.URL.Path)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHandleQueryParameters(t *testing.T) {
|
||||
var body bytes.Buffer
|
||||
|
||||
t.Run("regular_params", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example?bar=bar&baz=baz", &body)
|
||||
|
||||
errJson := HandleQueryParameters(request, []QueryParam{{Name: "bar"}, {Name: "baz"}})
|
||||
require.Equal(t, true, errJson == nil)
|
||||
query := request.URL.Query()
|
||||
v, ok := query["bar"]
|
||||
require.Equal(t, true, ok, "query param not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of query param values")
|
||||
assert.Equal(t, "bar", v[0])
|
||||
v, ok = query["baz"]
|
||||
require.Equal(t, true, ok, "query param not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of query param values")
|
||||
assert.Equal(t, "baz", v[0])
|
||||
})
|
||||
|
||||
t.Run("hex_and_enum_params", func(t *testing.T) {
|
||||
request := httptest.NewRequest("GET", "http://foo.example?hex=0x626172&baz=baz", &body)
|
||||
|
||||
errJson := HandleQueryParameters(request, []QueryParam{{Name: "hex", Hex: true}, {Name: "baz", Enum: true}})
|
||||
require.Equal(t, true, errJson == nil)
|
||||
query := request.URL.Query()
|
||||
v, ok := query["hex"]
|
||||
require.Equal(t, true, ok, "query param not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of query param values")
|
||||
assert.Equal(t, "YmFy", v[0])
|
||||
v, ok = query["baz"]
|
||||
require.Equal(t, true, ok, "query param not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of query param values")
|
||||
assert.Equal(t, "BAZ", v[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsRequestParam(t *testing.T) {
|
||||
tests := []struct {
|
||||
s string
|
||||
b bool
|
||||
}{
|
||||
{"", false},
|
||||
{"{", false},
|
||||
{"}", false},
|
||||
{"{}", false},
|
||||
{"{x}", true},
|
||||
{"{very_long_parameter_name_with_underscores}", true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b := isRequestParam(tt.s)
|
||||
assert.Equal(t, tt.b, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeQueryValues(t *testing.T) {
|
||||
input := make(map[string][]string)
|
||||
input["key"] = []string{"value1", "value2,value3,value4", "value5"}
|
||||
|
||||
normalizeQueryValues(input)
|
||||
require.Equal(t, 5, len(input["key"]))
|
||||
assert.Equal(t, "value1", input["key"][0])
|
||||
assert.Equal(t, "value2", input["key"][1])
|
||||
assert.Equal(t, "value3", input["key"][2])
|
||||
assert.Equal(t, "value4", input["key"][3])
|
||||
assert.Equal(t, "value5", input["key"][4])
|
||||
}
|
||||
30
api/grpc/BUILD.bazel
Normal file
30
api/grpc/BUILD.bazel
Normal file
@@ -0,0 +1,30 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"grpcutils.go",
|
||||
"parameters.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/api/grpc",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["grpcutils_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
],
|
||||
)
|
||||
96
api/grpc/grpcutils.go
Normal file
96
api/grpc/grpcutils.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// LogRequests logs the gRPC backend as well as request duration when the log level is set to debug
|
||||
// or higher.
|
||||
func LogRequests(
|
||||
ctx context.Context,
|
||||
method string, req,
|
||||
reply interface{},
|
||||
cc *grpc.ClientConn,
|
||||
invoker grpc.UnaryInvoker,
|
||||
opts ...grpc.CallOption,
|
||||
) error {
|
||||
// Shortcut when debug logging is not enabled.
|
||||
if logrus.GetLevel() < logrus.DebugLevel {
|
||||
return invoker(ctx, method, req, reply, cc, opts...)
|
||||
}
|
||||
|
||||
var header metadata.MD
|
||||
opts = append(
|
||||
opts,
|
||||
grpc.Header(&header),
|
||||
)
|
||||
start := time.Now()
|
||||
err := invoker(ctx, method, req, reply, cc, opts...)
|
||||
logrus.WithField("backend", header["x-backend"]).
|
||||
WithField("method", method).WithField("duration", time.Since(start)).
|
||||
Debug("gRPC request finished.")
|
||||
return err
|
||||
}
|
||||
|
||||
// LogStream prints the method at DEBUG level at the start of the stream.
|
||||
func LogStream(
|
||||
ctx context.Context,
|
||||
sd *grpc.StreamDesc,
|
||||
conn *grpc.ClientConn,
|
||||
method string,
|
||||
streamer grpc.Streamer,
|
||||
opts ...grpc.CallOption,
|
||||
) (grpc.ClientStream, error) {
|
||||
// Shortcut when debug logging is not enabled.
|
||||
if logrus.GetLevel() < logrus.DebugLevel {
|
||||
return streamer(ctx, sd, conn, method, opts...)
|
||||
}
|
||||
|
||||
var header metadata.MD
|
||||
opts = append(
|
||||
opts,
|
||||
grpc.Header(&header),
|
||||
)
|
||||
strm, err := streamer(ctx, sd, conn, method, opts...)
|
||||
logrus.WithField("backend", header["x-backend"]).
|
||||
WithField("method", method).
|
||||
Debug("gRPC stream started.")
|
||||
return strm, err
|
||||
}
|
||||
|
||||
// AppendHeaders parses the provided GRPC headers
|
||||
// and attaches them to the provided context.
|
||||
func AppendHeaders(parent context.Context, headers []string) context.Context {
|
||||
for _, h := range headers {
|
||||
if h != "" {
|
||||
keyValue := strings.Split(h, "=")
|
||||
if len(keyValue) < 2 {
|
||||
logrus.Warnf("Incorrect gRPC header flag format. Skipping %v", keyValue[0])
|
||||
continue
|
||||
}
|
||||
parent = metadata.AppendToOutgoingContext(parent, keyValue[0], strings.Join(keyValue[1:], "="))
|
||||
}
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
// AppendCustomErrorHeader sets a CustomErrorMetadataKey gRPC header on the passed in context,
|
||||
// using the passed in error data as the header's value. The data is serialized as JSON.
|
||||
func AppendCustomErrorHeader(ctx context.Context, errorData interface{}) error {
|
||||
j, err := json.Marshal(errorData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal error data into JSON: %w", err)
|
||||
}
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs(CustomErrorMetadataKey, string(j))); err != nil {
|
||||
return fmt.Errorf("could not set custom error header: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
78
api/grpc/grpcutils_test.go
Normal file
78
api/grpc/grpcutils_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
type customErrorData struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func TestAppendHeaders(t *testing.T) {
|
||||
t.Run("one_header", func(t *testing.T) {
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value1"})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
assert.Equal(t, "value1", md.Get("first")[0])
|
||||
})
|
||||
|
||||
t.Run("multiple_headers", func(t *testing.T) {
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value1", "second=value2"})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 2, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
assert.Equal(t, "value1", md.Get("first")[0])
|
||||
assert.Equal(t, "value2", md.Get("second")[0])
|
||||
})
|
||||
|
||||
t.Run("one_empty_header", func(t *testing.T) {
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value1", ""})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
assert.Equal(t, "value1", md.Get("first")[0])
|
||||
})
|
||||
|
||||
t.Run("incorrect_header", func(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value1", "second"})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
assert.Equal(t, "value1", md.Get("first")[0])
|
||||
assert.LogsContain(t, logHook, "Skipping second")
|
||||
})
|
||||
|
||||
t.Run("header_value_with_equal_sign", func(t *testing.T) {
|
||||
ctx := AppendHeaders(context.Background(), []string{"first=value=1"})
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
require.Equal(t, true, ok, "Failed to read context metadata")
|
||||
require.Equal(t, 1, md.Len(), "MetadataV0 contains wrong number of values")
|
||||
assert.Equal(t, "value=1", md.Get("first")[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestAppendCustomErrorHeader(t *testing.T) {
|
||||
stream := &runtime.ServerTransportStream{}
|
||||
ctx := grpc.NewContextWithServerTransportStream(context.Background(), stream)
|
||||
data := &customErrorData{Message: "foo"}
|
||||
require.NoError(t, AppendCustomErrorHeader(ctx, data))
|
||||
// The stream used in test setup sets the metadata key in lowercase.
|
||||
value, ok := stream.Header()[strings.ToLower(CustomErrorMetadataKey)]
|
||||
require.Equal(t, true, ok, "Failed to retrieve custom error metadata value")
|
||||
expected, err := json.Marshal(data)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, string(expected), value[0])
|
||||
|
||||
}
|
||||
8
api/grpc/parameters.go
Normal file
8
api/grpc/parameters.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package grpc
|
||||
|
||||
// CustomErrorMetadataKey is the name of the metadata key storing additional error information.
|
||||
// Metadata value is expected to be a byte-encoded JSON object.
|
||||
const CustomErrorMetadataKey = "Custom-Error"
|
||||
|
||||
// HttpCodeMetadataKey is the key to use when setting custom HTTP status codes in gRPC metadata.
|
||||
const HttpCodeMetadataKey = "X-Http-Code"
|
||||
@@ -1,10 +1,9 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["pagination.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/shared/pagination",
|
||||
importpath = "github.com/prysmaticlabs/prysm/api/pagination",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//shared/params:go_default_library",
|
||||
@@ -3,7 +3,7 @@ package pagination_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/pagination"
|
||||
"github.com/prysmaticlabs/prysm/api/pagination"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
@@ -1,26 +1,30 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"debounce.go",
|
||||
"every.go",
|
||||
"multilock.go",
|
||||
"scatter.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/shared/mputil",
|
||||
importpath = "github.com/prysmaticlabs/prysm/async",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"benchmark_test.go",
|
||||
"debounce_test.go",
|
||||
"every_test.go",
|
||||
"multilock_test.go",
|
||||
"scatter_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
@@ -1,10 +1,9 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["abool.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/shared/abool",
|
||||
importpath = "github.com/prysmaticlabs/prysm/async/abool",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
@@ -126,7 +126,7 @@ func TestToogleAfterOverflow(t *testing.T) {
|
||||
}
|
||||
|
||||
// make sure overflow happened
|
||||
var valueAfterToggle int32 = *(*int32)(v)
|
||||
var valueAfterToggle = *(*int32)(v)
|
||||
if valueAfterToggle >= valueBeforeToggle {
|
||||
t.Fatalf("Overflow does not happen as expected, before %d, after: %d", valueBeforeToggle, valueAfterToggle)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package mputil_test
|
||||
package async_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/mputil"
|
||||
"github.com/prysmaticlabs/prysm/async"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -52,7 +52,7 @@ func BenchmarkHash(b *testing.B) {
|
||||
func BenchmarkHashMP(b *testing.B) {
|
||||
output := make([][]byte, len(input))
|
||||
for i := 0; i < b.N; i++ {
|
||||
workerResults, err := mputil.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
|
||||
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
|
||||
return hash(input[offset : offset+entries]), nil
|
||||
})
|
||||
require.NoError(b, err)
|
||||
@@ -1,4 +1,4 @@
|
||||
package asyncutil
|
||||
package async
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,4 +1,4 @@
|
||||
package asyncutil
|
||||
package async_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/async"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
@@ -24,7 +25,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
})
|
||||
}()
|
||||
go func() {
|
||||
Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
timesHandled++
|
||||
})
|
||||
wg.Done()
|
||||
@@ -60,7 +61,7 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
})
|
||||
}()
|
||||
go func() {
|
||||
Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
timesHandled++
|
||||
})
|
||||
wg.Done()
|
||||
@@ -76,7 +77,7 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := 0
|
||||
go Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
timesHandled++
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
@@ -94,7 +95,7 @@ func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
interval := time.Second
|
||||
timesHandled := 0
|
||||
go Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
timesHandled++
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
@@ -1,5 +1,4 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@@ -7,9 +6,9 @@ go_library(
|
||||
"feed.go",
|
||||
"subscription.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/shared/event",
|
||||
importpath = "github.com/prysmaticlabs/prysm/async/event",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//shared/mclockutil:go_default_library"],
|
||||
deps = ["//time/mclock:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
@@ -19,7 +19,7 @@ package event_test
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
)
|
||||
|
||||
func ExampleFeed_acknowledgedEvents() {
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
)
|
||||
|
||||
// This example demonstrates how SubscriptionScope can be used to control the lifetime of
|
||||
@@ -19,7 +19,7 @@ package event_test
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
)
|
||||
|
||||
func ExampleNewSubscription() {
|
||||
@@ -98,6 +98,11 @@ func (f *Feed) typecheck(typ reflect.Type) bool {
|
||||
f.etype = typ
|
||||
return true
|
||||
}
|
||||
// In the event the feed's type is an actual interface, we
|
||||
// perform an interface conformance check here.
|
||||
if f.etype.Kind() == reflect.Interface && typ.Implements(f.etype) {
|
||||
return true
|
||||
}
|
||||
return f.etype == typ
|
||||
}
|
||||
|
||||
@@ -196,6 +201,7 @@ type feedSub struct {
|
||||
err chan error
|
||||
}
|
||||
|
||||
// Unsubscribe remove feed subscription.
|
||||
func (sub *feedSub) Unsubscribe() {
|
||||
sub.errOnce.Do(func() {
|
||||
sub.feed.remove(sub)
|
||||
@@ -203,6 +209,7 @@ func (sub *feedSub) Unsubscribe() {
|
||||
})
|
||||
}
|
||||
|
||||
// Err returns error channel.
|
||||
func (sub *feedSub) Err() <-chan error {
|
||||
return sub.err
|
||||
}
|
||||
@@ -159,7 +159,7 @@ func TestFeedSubscribeSameChannel(t *testing.T) {
|
||||
done.Wait()
|
||||
}
|
||||
|
||||
func TestFeedSubscribeBlockedPost(t *testing.T) {
|
||||
func TestFeedSubscribeBlockedPost(_ *testing.T) {
|
||||
var (
|
||||
feed Feed
|
||||
nsends = 2000
|
||||
@@ -192,7 +192,7 @@ func TestFeedSubscribeBlockedPost(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFeedUnsubscribeBlockedPost(t *testing.T) {
|
||||
func TestFeedUnsubscribeBlockedPost(_ *testing.T) {
|
||||
var (
|
||||
feed Feed
|
||||
nsends = 200
|
||||
@@ -229,7 +229,7 @@ func TestFeedUnsubscribeBlockedPost(t *testing.T) {
|
||||
|
||||
// Checks that unsubscribing a channel during Send works even if that
|
||||
// channel has already been sent on.
|
||||
func TestFeedUnsubscribeSentChan(t *testing.T) {
|
||||
func TestFeedUnsubscribeSentChan(_ *testing.T) {
|
||||
var (
|
||||
feed Feed
|
||||
ch1 = make(chan int)
|
||||
@@ -315,3 +315,193 @@ func BenchmarkFeedSend1000(b *testing.B) {
|
||||
b.StopTimer()
|
||||
done.Wait()
|
||||
}
|
||||
|
||||
func TestFeed_Send(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
evFeed *Feed
|
||||
testSetup func(fd *Feed, t *testing.T, o interface{})
|
||||
obj interface{}
|
||||
expectPanic bool
|
||||
}{
|
||||
{
|
||||
name: "normal struct",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedWithPointer, 1)
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeedWithPointer{
|
||||
a: new(uint64),
|
||||
b: new(string),
|
||||
},
|
||||
expectPanic: false,
|
||||
},
|
||||
{
|
||||
name: "un-implemented interface",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedIface, 1)
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeedWithPointer{
|
||||
a: new(uint64),
|
||||
b: new(string),
|
||||
},
|
||||
expectPanic: true,
|
||||
},
|
||||
{
|
||||
name: "semi-implemented interface",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedIface, 1)
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeed2{
|
||||
a: 0,
|
||||
b: "",
|
||||
c: []byte{'A'},
|
||||
},
|
||||
expectPanic: true,
|
||||
},
|
||||
{
|
||||
name: "fully-implemented interface",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedIface)
|
||||
// Make it unbuffered to allow message to
|
||||
// pass through
|
||||
go func() {
|
||||
a := <-testChan
|
||||
if !reflect.DeepEqual(a, o) {
|
||||
t.Errorf("Got = %v, want = %v", a, o)
|
||||
}
|
||||
}()
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeed{
|
||||
a: 0,
|
||||
b: "",
|
||||
},
|
||||
expectPanic: false,
|
||||
},
|
||||
{
|
||||
name: "fully-implemented interface with additional methods",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedIface)
|
||||
// Make it unbuffered to allow message to
|
||||
// pass through
|
||||
go func() {
|
||||
a := <-testChan
|
||||
if !reflect.DeepEqual(a, o) {
|
||||
t.Errorf("Got = %v, want = %v", a, o)
|
||||
}
|
||||
}()
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeed3{
|
||||
a: 0,
|
||||
b: "",
|
||||
c: []byte{'A'},
|
||||
d: []byte{'B'},
|
||||
},
|
||||
expectPanic: false,
|
||||
},
|
||||
{
|
||||
name: "concrete types implementing the same interface",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeed, 1)
|
||||
// Make it unbuffered to allow message to
|
||||
// pass through
|
||||
go func() {
|
||||
a := <-testChan
|
||||
if !reflect.DeepEqual(a, o) {
|
||||
t.Errorf("Got = %v, want = %v", a, o)
|
||||
}
|
||||
}()
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeed3{
|
||||
a: 0,
|
||||
b: "",
|
||||
c: []byte{'A'},
|
||||
d: []byte{'B'},
|
||||
},
|
||||
expectPanic: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if !tt.expectPanic {
|
||||
t.Errorf("panic triggered when unexpected: %v", r)
|
||||
}
|
||||
} else {
|
||||
if tt.expectPanic {
|
||||
t.Error("panic not triggered when expected")
|
||||
}
|
||||
}
|
||||
}()
|
||||
tt.testSetup(tt.evFeed, t, tt.obj)
|
||||
if gotNsent := tt.evFeed.Send(tt.obj); gotNsent != 1 {
|
||||
t.Errorf("Send() = %v, want %v", gotNsent, 1)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// The following objects below are a collection of different
|
||||
// struct types to test with.
|
||||
type testFeed struct {
|
||||
a uint64
|
||||
b string
|
||||
}
|
||||
|
||||
func (testFeed) method1() {
|
||||
|
||||
}
|
||||
|
||||
func (testFeed) method2() {
|
||||
|
||||
}
|
||||
|
||||
type testFeedWithPointer struct {
|
||||
a *uint64
|
||||
b *string
|
||||
}
|
||||
|
||||
type testFeed2 struct {
|
||||
a uint64
|
||||
b string
|
||||
c []byte
|
||||
}
|
||||
|
||||
func (testFeed2) method1() {
|
||||
|
||||
}
|
||||
|
||||
type testFeed3 struct {
|
||||
a uint64
|
||||
b string
|
||||
c, d []byte
|
||||
}
|
||||
|
||||
func (testFeed3) method1() {
|
||||
|
||||
}
|
||||
|
||||
func (testFeed3) method2() {
|
||||
|
||||
}
|
||||
|
||||
func (testFeed3) method3() {
|
||||
|
||||
}
|
||||
|
||||
type testFeedIface interface {
|
||||
method1()
|
||||
method2()
|
||||
}
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/mclockutil"
|
||||
"github.com/prysmaticlabs/prysm/time/mclock"
|
||||
)
|
||||
|
||||
// waitQuotient is divided against the max backoff time, in order to have N requests based on the full
|
||||
@@ -74,6 +74,7 @@ type funcSub struct {
|
||||
unsubscribed bool
|
||||
}
|
||||
|
||||
// Unsubscribe unsubscribes from subscription.
|
||||
func (s *funcSub) Unsubscribe() {
|
||||
s.mu.Lock()
|
||||
if s.unsubscribed {
|
||||
@@ -87,6 +88,7 @@ func (s *funcSub) Unsubscribe() {
|
||||
<-s.err
|
||||
}
|
||||
|
||||
// Err exposes error channel.
|
||||
func (s *funcSub) Err() <-chan error {
|
||||
return s.err
|
||||
}
|
||||
@@ -118,10 +120,11 @@ type resubscribeSub struct {
|
||||
err chan error
|
||||
unsub chan struct{}
|
||||
unsubOnce sync.Once
|
||||
lastTry mclockutil.AbsTime
|
||||
lastTry mclock.AbsTime
|
||||
waitTime, backoffMax time.Duration
|
||||
}
|
||||
|
||||
// Unsubscribe unsubscribes from subscription.
|
||||
func (s *resubscribeSub) Unsubscribe() {
|
||||
s.unsubOnce.Do(func() {
|
||||
s.unsub <- struct{}{}
|
||||
@@ -129,6 +132,7 @@ func (s *resubscribeSub) Unsubscribe() {
|
||||
})
|
||||
}
|
||||
|
||||
// Err exposes error channel.
|
||||
func (s *resubscribeSub) Err() <-chan error {
|
||||
return s.err
|
||||
}
|
||||
@@ -147,11 +151,11 @@ func (s *resubscribeSub) loop() {
|
||||
}
|
||||
|
||||
func (s *resubscribeSub) subscribe() Subscription {
|
||||
subscribed := make(chan error)
|
||||
subscribed := make(chan error, 1)
|
||||
var sub Subscription
|
||||
retry:
|
||||
for {
|
||||
s.lastTry = mclockutil.Now()
|
||||
s.lastTry = mclock.Now()
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
go func() {
|
||||
rsub, err := s.fn(ctx)
|
||||
@@ -190,7 +194,7 @@ func (s *resubscribeSub) waitForError(sub Subscription) bool {
|
||||
}
|
||||
|
||||
func (s *resubscribeSub) backoffWait() bool {
|
||||
if time.Duration(mclockutil.Now()-s.lastTry) > s.backoffMax {
|
||||
if time.Duration(mclock.Now()-s.lastTry) > s.backoffMax {
|
||||
s.waitTime = s.backoffMax / waitQuotient
|
||||
} else {
|
||||
s.waitTime *= 2
|
||||
@@ -267,6 +271,7 @@ func (sc *SubscriptionScope) Count() int {
|
||||
return len(sc.subs)
|
||||
}
|
||||
|
||||
// Unsubscribe unsubscribes from subscription.
|
||||
func (s *scopeSub) Unsubscribe() {
|
||||
s.s.Unsubscribe()
|
||||
s.sc.mu.Lock()
|
||||
@@ -274,6 +279,7 @@ func (s *scopeSub) Unsubscribe() {
|
||||
delete(s.sc.subs, s)
|
||||
}
|
||||
|
||||
// Err exposes error channel.
|
||||
func (s *scopeSub) Err() <-chan error {
|
||||
return s.s.Err()
|
||||
}
|
||||
@@ -19,6 +19,7 @@ package event
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -108,3 +109,18 @@ func TestResubscribeAbort(t *testing.T) {
|
||||
sub.Unsubscribe()
|
||||
require.NoError(t, <-done)
|
||||
}
|
||||
|
||||
func TestResubscribeNonBlocking(t *testing.T) {
|
||||
done := make(chan struct{})
|
||||
sub := Resubscribe(0, func(ctx context.Context) (Subscription, error) {
|
||||
<-done
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
resub, ok := sub.(*resubscribeSub)
|
||||
require.Equal(t, true, ok)
|
||||
currNum := runtime.NumGoroutine()
|
||||
resub.unsub <- struct{}{}
|
||||
done <- struct{}{}
|
||||
require.Equal(t, currNum-1, runtime.NumGoroutine())
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// Package runutil includes helpers for scheduling runnable, periodic functions.
|
||||
package runutil
|
||||
package async
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,18 +1,18 @@
|
||||
package runutil_test
|
||||
package async_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/runutil"
|
||||
"github.com/prysmaticlabs/prysm/async"
|
||||
)
|
||||
|
||||
func TestEveryRuns(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
i := 0
|
||||
runutil.RunEvery(ctx, 100*time.Millisecond, func() {
|
||||
async.RunEvery(ctx, 100*time.Millisecond, func() {
|
||||
i++
|
||||
})
|
||||
|
||||
@@ -10,7 +10,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package mputil
|
||||
package async
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
@@ -10,7 +10,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package mputil
|
||||
package async
|
||||
|
||||
import (
|
||||
"sync"
|
||||
@@ -312,15 +312,15 @@ func TestSyncCondCompatibility(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
cond := sync.NewCond(NewMultilock("A", "C"))
|
||||
|
||||
sharedRsc := "foo"
|
||||
var testValues = [3]string{"foo", "bar", "fizz!"}
|
||||
sharedRsc := testValues[0]
|
||||
|
||||
go func() {
|
||||
cond.L.Lock()
|
||||
for sharedRsc == "foo" {
|
||||
for sharedRsc == testValues[0] {
|
||||
cond.Wait()
|
||||
}
|
||||
sharedRsc = "fizz!"
|
||||
sharedRsc = testValues[2]
|
||||
cond.Broadcast()
|
||||
cond.L.Unlock()
|
||||
wg.Done()
|
||||
@@ -328,9 +328,9 @@ func TestSyncCondCompatibility(t *testing.T) {
|
||||
|
||||
go func() {
|
||||
cond.L.Lock()
|
||||
sharedRsc = "bar"
|
||||
sharedRsc = testValues[1]
|
||||
cond.Broadcast()
|
||||
for sharedRsc == "bar" {
|
||||
for sharedRsc == testValues[1] {
|
||||
cond.Wait()
|
||||
}
|
||||
cond.L.Unlock()
|
||||
@@ -338,5 +338,5 @@ func TestSyncCondCompatibility(t *testing.T) {
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
assert.Equal(t, "fizz!", sharedRsc)
|
||||
assert.Equal(t, testValues[2], sharedRsc)
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package mputil contains useful helpers for converting
|
||||
// multi-processor computation.
|
||||
package mputil
|
||||
package async
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -1,11 +1,11 @@
|
||||
package mputil_test
|
||||
package async_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/mputil"
|
||||
"github.com/prysmaticlabs/prysm/async"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
@@ -46,7 +46,7 @@ func TestDouble(t *testing.T) {
|
||||
inValues[i] = i
|
||||
}
|
||||
outValues := make([]int, test.inValues)
|
||||
workerResults, err := mputil.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
|
||||
workerResults, err := async.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
|
||||
extent := make([]int, entries)
|
||||
for i := 0; i < entries; i++ {
|
||||
extent[i] = inValues[offset+i] * 2
|
||||
@@ -72,7 +72,7 @@ func TestDouble(t *testing.T) {
|
||||
func TestMutex(t *testing.T) {
|
||||
totalRuns := 1048576
|
||||
val := 0
|
||||
_, err := mputil.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
|
||||
for i := 0; i < entries; i++ {
|
||||
mu.Lock()
|
||||
val++
|
||||
@@ -90,7 +90,7 @@ func TestMutex(t *testing.T) {
|
||||
func TestError(t *testing.T) {
|
||||
totalRuns := 1024
|
||||
val := 0
|
||||
_, err := mputil.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
|
||||
for i := 0; i < entries; i++ {
|
||||
mu.Lock()
|
||||
val++
|
||||
4
bazel.sh
4
bazel.sh
@@ -6,6 +6,6 @@
|
||||
|
||||
env -i \
|
||||
PATH=/usr/bin:/bin \
|
||||
HOME=$HOME \
|
||||
GOOGLE_APPLICATION_CREDENTIALS=$GOOGLE_APPLICATION_CREDENTIALS \
|
||||
HOME="$HOME" \
|
||||
GOOGLE_APPLICATION_CREDENTIALS="$GOOGLE_APPLICATION_CREDENTIALS" \
|
||||
bazel "$@"
|
||||
|
||||
@@ -1,138 +1,19 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_test")
|
||||
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle", "container_image")
|
||||
load("//tools:go_image.bzl", "go_image_alpine", "go_image_debug")
|
||||
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
|
||||
load("//tools:target_migration.bzl", "moved_targets")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"log.go",
|
||||
"main.go",
|
||||
"usage.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/node:go_default_library",
|
||||
"//shared/cmd:go_default_library",
|
||||
"//shared/debug:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/fileutil:go_default_library",
|
||||
"//shared/journald:go_default_library",
|
||||
"//shared/logutil:go_default_library",
|
||||
"//shared/maxprocs:go_default_library",
|
||||
"//shared/tos:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//log:go_default_library",
|
||||
"@com_github_ipfs_go_log_v2//:go_default_library",
|
||||
"@com_github_joonix_log//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_image(
|
||||
name = "image",
|
||||
base = select({
|
||||
"//tools:base_image_alpine": "//tools:alpine_cc_image",
|
||||
"//tools:base_image_cc": "//tools:cc_image",
|
||||
"//conditions:default": "//tools:cc_image",
|
||||
}),
|
||||
binary = ":beacon-chain",
|
||||
tags = ["manual"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
container_image(
|
||||
name = "image_with_creation_time",
|
||||
base = "image",
|
||||
stamp = True,
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest": ":image_with_creation_time",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest": ":image_with_creation_time",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
|
||||
},
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
go_image_debug(
|
||||
name = "image_debug",
|
||||
image = ":image",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle_debug",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-debug": ":image_debug",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-debug": ":image_debug",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest-debug": ":image_debug",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}-debug": ":image_debug",
|
||||
},
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
go_image_alpine(
|
||||
name = "image_alpine",
|
||||
image = ":image",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle_alpine",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-alpine": ":image_alpine",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-alpine": ":image_alpine",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest-alpine": ":image_alpine",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}-alpine": ":image_alpine",
|
||||
},
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images",
|
||||
bundle = ":image_bundle",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images_debug",
|
||||
bundle = ":image_bundle_debug",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images_alpine",
|
||||
bundle = ":image_bundle_alpine",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "beacon-chain",
|
||||
embed = [":go_default_library"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//endtoend:__pkg__",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["usage_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
moved_targets(
|
||||
[
|
||||
":push_images_debug",
|
||||
":push_images_alpine",
|
||||
":push_images",
|
||||
":image_bundle_debug",
|
||||
":image_debug",
|
||||
":image_bundle_alpine",
|
||||
":image_bundle",
|
||||
":image_with_creation_time",
|
||||
":image_alpine",
|
||||
":image",
|
||||
":go_default_test",
|
||||
":beacon-chain",
|
||||
],
|
||||
"//cmd/beacon-chain",
|
||||
)
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# Prysmatic Labs Beacon Chain Implementation
|
||||
|
||||
This is the main project folder for the beacon chain implementation of eth2 written in Go by [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
This is the main project folder for the beacon chain implementation of Ethereum written in Go by [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
|
||||
You can also read our main [README](https://github.com/prysmaticlabs/prysm/blob/master/README.md) and join our active chat room on Discord.
|
||||
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
|
||||
Also, read the official beacon chain [specification](https://github.com/ethereum/eth2.0-specs/blob/master/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at Prysmatic Labs.
|
||||
Also, read the official beacon chain [specification](https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at Prysmatic Labs.
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
"info.go",
|
||||
"init_sync_process_block.go",
|
||||
"log.go",
|
||||
@@ -22,20 +22,21 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//fuzz:__pkg__",
|
||||
"//testing/fuzz:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//async:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
@@ -45,21 +46,24 @@ go_library(
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/mputil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/slotutil:go_default_library",
|
||||
"//shared/timeutils:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_emicklei_dot//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
@@ -80,8 +84,11 @@ go_test(
|
||||
"blockchain_test.go",
|
||||
"chain_info_test.go",
|
||||
"checktags_test.go",
|
||||
"head_sync_committee_info_test.go",
|
||||
"head_test.go",
|
||||
"info_test.go",
|
||||
"init_test.go",
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
@@ -93,20 +100,21 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
gotags = ["develop"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//proto/beacon/db:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
@@ -114,12 +122,10 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_x_net//context:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -129,6 +135,7 @@ go_test(
|
||||
srcs = [
|
||||
"chain_info_norace_test.go",
|
||||
"checktags_test.go",
|
||||
"init_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
],
|
||||
@@ -144,18 +151,19 @@ go_test(
|
||||
race = "on",
|
||||
tags = ["race_on"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
@@ -163,11 +171,9 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_ssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_x_net//context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,15 +4,15 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ChainInfoFetcher defines a common interface for methods in blockchain service which
|
||||
@@ -22,15 +22,18 @@ type ChainInfoFetcher interface {
|
||||
FinalizationFetcher
|
||||
GenesisFetcher
|
||||
CanonicalFetcher
|
||||
ForkFetcher
|
||||
TimeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Eth2 data that's related to time.
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
type TimeFetcher interface {
|
||||
GenesisTime() time.Time
|
||||
CurrentSlot() uint64
|
||||
CurrentSlot() types.Slot
|
||||
}
|
||||
|
||||
// GenesisFetcher retrieves the eth2 data related to its genesis.
|
||||
// GenesisFetcher retrieves the Ethereum consensus data related to its genesis.
|
||||
type GenesisFetcher interface {
|
||||
GenesisValidatorRoot() [32]byte
|
||||
}
|
||||
@@ -38,20 +41,25 @@ type GenesisFetcher interface {
|
||||
// HeadFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves head related data.
|
||||
type HeadFetcher interface {
|
||||
HeadSlot() uint64
|
||||
HeadSlot() types.Slot
|
||||
HeadRoot(ctx context.Context) ([]byte, error)
|
||||
HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error)
|
||||
HeadState(ctx context.Context) (*state.BeaconState, error)
|
||||
HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error)
|
||||
HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error)
|
||||
HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error)
|
||||
HeadState(ctx context.Context) (state.BeaconState, error)
|
||||
HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error)
|
||||
HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error)
|
||||
HeadGenesisValidatorRoot() [32]byte
|
||||
HeadETH1Data() *ethpb.Eth1Data
|
||||
HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool)
|
||||
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error)
|
||||
ProtoArrayStore() *protoarray.Store
|
||||
ChainHeads() ([][32]byte, []types.Slot)
|
||||
HeadSyncCommitteeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
|
||||
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
|
||||
type ForkFetcher interface {
|
||||
CurrentFork() *pb.Fork
|
||||
CurrentFork() *ethpb.Fork
|
||||
}
|
||||
|
||||
// CanonicalFetcher retrieves the current chain's canonical information.
|
||||
@@ -74,7 +82,7 @@ func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
return state.CopyCheckpoint(s.finalizedCheckpt)
|
||||
return ethpb.CopyCheckpoint(s.finalizedCheckpt)
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from head state.
|
||||
@@ -83,7 +91,7 @@ func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
return state.CopyCheckpoint(s.justifiedCheckpt)
|
||||
return ethpb.CopyCheckpoint(s.justifiedCheckpt)
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt returns the previous justified checkpoint from head state.
|
||||
@@ -92,11 +100,11 @@ func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
return state.CopyCheckpoint(s.prevJustifiedCheckpt)
|
||||
return ethpb.CopyCheckpoint(s.prevJustifiedCheckpt)
|
||||
}
|
||||
|
||||
// HeadSlot returns the slot of the head of the chain.
|
||||
func (s *Service) HeadSlot() uint64 {
|
||||
func (s *Service) HeadSlot() types.Slot {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
@@ -117,15 +125,15 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
return r[:], nil
|
||||
}
|
||||
|
||||
b, err := s.beaconDB.HeadBlock(ctx)
|
||||
b, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == nil {
|
||||
if b == nil || b.IsNil() {
|
||||
return params.BeaconConfig().ZeroHash[:], nil
|
||||
}
|
||||
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
r, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -136,7 +144,7 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
// HeadBlock returns the head block of the chain.
|
||||
// If the head is nil from service struct,
|
||||
// it will attempt to get the head block from DB.
|
||||
func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
func (s *Service) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
@@ -144,13 +152,13 @@ func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, erro
|
||||
return s.headBlock(), nil
|
||||
}
|
||||
|
||||
return s.beaconDB.HeadBlock(ctx)
|
||||
return s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
}
|
||||
|
||||
// HeadState returns the head state of the chain.
|
||||
// If the head is nil from service struct,
|
||||
// it will attempt to get the head state from DB.
|
||||
func (s *Service) HeadState(ctx context.Context) (*state.BeaconState, error) {
|
||||
func (s *Service) HeadState(ctx context.Context) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.HeadState")
|
||||
defer span.End()
|
||||
s.headLock.RLock()
|
||||
@@ -163,22 +171,22 @@ func (s *Service) HeadState(ctx context.Context) (*state.BeaconState, error) {
|
||||
return s.headState(ctx), nil
|
||||
}
|
||||
|
||||
return s.stateGen.StateByRoot(ctx, s.headRoot())
|
||||
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
|
||||
}
|
||||
|
||||
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
|
||||
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error) {
|
||||
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return []uint64{}, nil
|
||||
return []types.ValidatorIndex{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(s.headState(ctx), epoch)
|
||||
}
|
||||
|
||||
// HeadSeed returns the seed from the head view of a given epoch.
|
||||
func (s *Service) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error) {
|
||||
func (s *Service) HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
@@ -214,7 +222,7 @@ func (s *Service) HeadETH1Data() *ethpb.Eth1Data {
|
||||
|
||||
// ProtoArrayStore returns the proto array store object.
|
||||
func (s *Service) ProtoArrayStore() *protoarray.Store {
|
||||
return s.forkChoiceStore.Store()
|
||||
return s.cfg.ForkChoiceStore.Store()
|
||||
}
|
||||
|
||||
// GenesisTime returns the genesis time of beacon chain.
|
||||
@@ -235,12 +243,12 @@ func (s *Service) GenesisValidatorRoot() [32]byte {
|
||||
}
|
||||
|
||||
// CurrentFork retrieves the latest fork information of the beacon chain.
|
||||
func (s *Service) CurrentFork() *pb.Fork {
|
||||
func (s *Service) CurrentFork() *ethpb.Fork {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return &pb.Fork{
|
||||
return ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
@@ -251,10 +259,52 @@ func (s *Service) CurrentFork() *pb.Fork {
|
||||
// IsCanonical returns true if the input block root is part of the canonical chain.
|
||||
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
// If the block has been finalized, the block will always be part of the canonical chain.
|
||||
if s.beaconDB.IsFinalizedBlock(ctx, blockRoot) {
|
||||
if s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
||||
return s.forkChoiceStore.IsCanonical(blockRoot), nil
|
||||
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
|
||||
}
|
||||
|
||||
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
||||
// Heads roots and heads slots are returned.
|
||||
func (s *Service) ChainHeads() ([][32]byte, []types.Slot) {
|
||||
nodes := s.ProtoArrayStore().Nodes()
|
||||
|
||||
// Deliberate choice to not preallocate space for below.
|
||||
// Heads cant be more than 2-3 in the worst case where pre-allocation will be 64 to begin with.
|
||||
headsRoots := make([][32]byte, 0)
|
||||
headsSlots := make([]types.Slot, 0)
|
||||
|
||||
nonExistentNode := ^uint64(0)
|
||||
for _, node := range nodes {
|
||||
// Possible heads have no children.
|
||||
if node.BestDescendant() == nonExistentNode && node.BestChild() == nonExistentNode {
|
||||
headsRoots = append(headsRoots, node.Root())
|
||||
headsSlots = append(headsSlots, node.Slot())
|
||||
}
|
||||
}
|
||||
|
||||
return headsRoots, headsSlots
|
||||
}
|
||||
|
||||
// HeadPublicKeyToValidatorIndex returns the validator index of the `pubkey` in current head state.
|
||||
func (s *Service) HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.headState(ctx).ValidatorIndexByPubkey(pubKey)
|
||||
}
|
||||
|
||||
// HeadValidatorIndexToPublicKey returns the pubkey of the validator `index` in current head state.
|
||||
func (s *Service) HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
v, err := s.headValidatorAtIndex(index)
|
||||
if err != nil {
|
||||
return [48]byte{}, err
|
||||
}
|
||||
return v.PublicKey(), nil
|
||||
}
|
||||
|
||||
@@ -4,16 +4,17 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
cfg: &Config{BeaconDB: beaconDB},
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
@@ -24,9 +25,8 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
||||
func TestHeadRoot_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
stateGen: stategen.New(beaconDB),
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
@@ -38,9 +38,8 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
||||
func TestHeadBlock_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
head: &head{block: ðpb.SignedBeaconBlock{}},
|
||||
stateGen: stategen.New(beaconDB),
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{block: wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{})},
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
@@ -52,8 +51,7 @@ func TestHeadBlock_DataRace(t *testing.T) {
|
||||
func TestHeadState_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB),
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
|
||||
@@ -5,18 +5,19 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Ensure Service implements chain info interface.
|
||||
@@ -103,10 +104,10 @@ func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
c.head = &head{slot: 100, state: s}
|
||||
assert.Equal(t, uint64(100), c.HeadSlot())
|
||||
assert.Equal(t, types.Slot(100), c.HeadSlot())
|
||||
}
|
||||
|
||||
func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
@@ -119,13 +120,13 @@ func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
|
||||
func TestHeadRoot_UseDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := &Service{beaconDB: beaconDB}
|
||||
c := &Service{cfg: &Config{BeaconDB: beaconDB}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := testutil.NewBeaconBlock()
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), b))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Root: br[:]}))
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:]}))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(context.Background(), br))
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -135,18 +136,18 @@ func TestHeadRoot_UseDB(t *testing.T) {
|
||||
func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c.head = &head{block: b, state: s}
|
||||
c.head = &head{block: wrapper.WrappedPhase0SignedBeaconBlock(b), state: s}
|
||||
|
||||
recevied, err := c.HeadBlock(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, b, recevied, "Incorrect head block received")
|
||||
assert.DeepEqual(t, b, recevied.Proto(), "Incorrect head block received")
|
||||
}
|
||||
|
||||
func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
@@ -162,8 +163,8 @@ func TestGenesisTime_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||
f := &pb.Fork{Epoch: 999}
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{Fork: f})
|
||||
f := ðpb.Fork{Epoch: 999}
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Fork: f})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
@@ -173,7 +174,7 @@ func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentFork_NilHeadSTate(t *testing.T) {
|
||||
f := &pb.Fork{
|
||||
f := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
@@ -188,7 +189,7 @@ func TestGenesisValidatorRoot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
assert.Equal(t, [32]byte{}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
|
||||
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
|
||||
require.NoError(t, err)
|
||||
c.head = &head{state: s}
|
||||
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
|
||||
@@ -202,7 +203,7 @@ func TestHeadETH1Data_Nil(t *testing.T) {
|
||||
|
||||
func TestHeadETH1Data_CanRetrieve(t *testing.T) {
|
||||
d := ðpb.Eth1Data{DepositCount: 999}
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{Eth1Data: d})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Eth1Data: d})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
@@ -220,7 +221,7 @@ func TestIsCanonical_Ok(t *testing.T) {
|
||||
blk.Block.Slot = 0
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, blk))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
|
||||
can, err := c.IsCanonical(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -277,7 +278,51 @@ func TestService_HeadGenesisValidatorRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_ProtoArrayStore(t *testing.T) {
|
||||
c := &Service{forkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
c := &Service{cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
|
||||
p := c.ProtoArrayStore()
|
||||
require.Equal(t, 0, int(p.FinalizedEpoch()))
|
||||
}
|
||||
|
||||
func TestService_ChainHeads(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, [32]byte{}, 0, 0))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
|
||||
require.DeepEqual(t, []types.Slot{102, 103, 104}, slots)
|
||||
}
|
||||
|
||||
func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
_, e := c.HeadPublicKeyToValidatorIndex(context.Background(), [48]byte{})
|
||||
require.Equal(t, false, e)
|
||||
|
||||
v, err := s.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
i, e := c.HeadPublicKeyToValidatorIndex(context.Background(), bytesutil.ToBytes48(v.PublicKey))
|
||||
require.Equal(t, true, e)
|
||||
require.Equal(t, types.ValidatorIndex(0), i)
|
||||
}
|
||||
|
||||
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err := s.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, bytesutil.ToBytes48(v.PublicKey), p)
|
||||
}
|
||||
|
||||
@@ -6,24 +6,29 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// This defines the current chain service's view of head.
|
||||
type head struct {
|
||||
slot uint64 // current head slot.
|
||||
root [32]byte // current head root.
|
||||
block *ethpb.SignedBeaconBlock // current head block.
|
||||
state *stateTrie.BeaconState // current head state.
|
||||
slot types.Slot // current head slot.
|
||||
root [32]byte // current head root.
|
||||
block block.SignedBeaconBlock // current head block.
|
||||
state state.BeaconState // current head state.
|
||||
}
|
||||
|
||||
// Determined the head from the fork choice service and saves its new data
|
||||
@@ -56,18 +61,18 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
// If the fork choice store is missing justified block info, a node should
|
||||
// re-initiate fork choice store using the latest justified info.
|
||||
// This recovers a fatal condition and should not happen in run time.
|
||||
if !s.forkChoiceStore.HasNode(headStartRoot) {
|
||||
jb, err := s.beaconDB.Block(ctx, headStartRoot)
|
||||
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
|
||||
jb, err := s.cfg.BeaconDB.Block(ctx, headStartRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.forkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block, headStartRoot, f, j); err != nil {
|
||||
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
headRoot, err := s.forkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -93,43 +98,57 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
|
||||
// If the head state is not available, just return nil.
|
||||
// There's nothing to cache
|
||||
if !s.beaconDB.HasStateSummary(ctx, headRoot) {
|
||||
if !s.cfg.BeaconDB.HasStateSummary(ctx, headRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the new head block from DB.
|
||||
newHeadBlock, err := s.beaconDB.Block(ctx, headRoot)
|
||||
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if newHeadBlock == nil || newHeadBlock.Block == nil {
|
||||
if newHeadBlock == nil || newHeadBlock.IsNil() || newHeadBlock.Block().IsNil() {
|
||||
return errors.New("cannot save nil head block")
|
||||
}
|
||||
|
||||
// Get the new head state from cached state or DB.
|
||||
newHeadState, err := s.stateGen.StateByRoot(ctx, headRoot)
|
||||
newHeadState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
if newHeadState == nil {
|
||||
if newHeadState == nil || newHeadState.IsNil() {
|
||||
return errors.New("cannot save nil head state")
|
||||
}
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
headSlot := s.HeadSlot()
|
||||
if bytesutil.ToBytes32(newHeadBlock.Block.ParentRoot) != bytesutil.ToBytes32(r) {
|
||||
newHeadSlot := newHeadBlock.Block().Slot()
|
||||
oldHeadRoot := s.headRoot()
|
||||
oldStateRoot := s.headBlock().Block().StateRoot()
|
||||
newStateRoot := newHeadBlock.Block().StateRoot()
|
||||
if bytesutil.ToBytes32(newHeadBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadBlock.Block.Slot),
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
}).Debug("Chain reorg occurred")
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
absoluteSlotDifference := slots.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Reorg,
|
||||
Data: &statefeed.ReorgData{
|
||||
NewSlot: newHeadBlock.Block.Slot,
|
||||
OldSlot: headSlot,
|
||||
Data: ðpbv1.EventChainReorg{
|
||||
Slot: newHeadSlot,
|
||||
Depth: absoluteSlotDifference,
|
||||
OldHeadBlock: oldHeadRoot[:],
|
||||
NewHeadBlock: headRoot[:],
|
||||
OldHeadState: oldStateRoot,
|
||||
NewHeadState: newStateRoot,
|
||||
Epoch: core.SlotToEpoch(newHeadSlot),
|
||||
},
|
||||
})
|
||||
|
||||
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(r)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reorgCount.Inc()
|
||||
}
|
||||
|
||||
@@ -137,17 +156,28 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
s.setHead(headRoot, newHeadBlock, newHeadState)
|
||||
|
||||
// Save the new head root to DB.
|
||||
if err := s.beaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head root in DB")
|
||||
}
|
||||
|
||||
// Forward an event capturing a new chain head over a common event feed
|
||||
// done in a goroutine to avoid blocking the critical runtime main routine.
|
||||
go func() {
|
||||
if err := s.notifyNewHeadEvent(newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not notify event feed of new chain head")
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called to update canonical root mapping. It does not save head block
|
||||
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
|
||||
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, r [32]byte, hs *stateTrie.BeaconState) error {
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b block.SignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return err
|
||||
}
|
||||
cachedHeadRoot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head root from cache")
|
||||
@@ -156,24 +186,20 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock,
|
||||
return nil
|
||||
}
|
||||
|
||||
if b == nil || b.Block == nil {
|
||||
return errors.New("cannot save nil head block")
|
||||
}
|
||||
|
||||
s.setHeadInitialSync(r, stateTrie.CopySignedBeaconBlock(b), hs)
|
||||
s.setHeadInitialSync(r, b.Copy(), hs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This sets head view object which is used to track the head slot, root, block and state.
|
||||
func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *stateTrie.BeaconState) {
|
||||
func (s *Service) setHead(root [32]byte, block block.SignedBeaconBlock, state state.BeaconState) {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
// This does a full copy of the block and state.
|
||||
s.head = &head{
|
||||
slot: block.Block.Slot,
|
||||
slot: block.Block().Slot(),
|
||||
root: root,
|
||||
block: stateTrie.CopySignedBeaconBlock(block),
|
||||
block: block.Copy(),
|
||||
state: state.Copy(),
|
||||
}
|
||||
}
|
||||
@@ -181,22 +207,22 @@ func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *
|
||||
// This sets head view object which is used to track the head slot, root, block and state. The method
|
||||
// assumes that state being passed into the method will not be modified by any other alternate
|
||||
// caller which holds the state's reference.
|
||||
func (s *Service) setHeadInitialSync(root [32]byte, block *ethpb.SignedBeaconBlock, state *stateTrie.BeaconState) {
|
||||
func (s *Service) setHeadInitialSync(root [32]byte, block block.SignedBeaconBlock, state state.BeaconState) {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
// This does a full copy of the block only.
|
||||
s.head = &head{
|
||||
slot: block.Block.Slot,
|
||||
slot: block.Block().Slot(),
|
||||
root: root,
|
||||
block: stateTrie.CopySignedBeaconBlock(block),
|
||||
block: block.Copy(),
|
||||
state: state,
|
||||
}
|
||||
}
|
||||
|
||||
// This returns the head slot.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headSlot() uint64 {
|
||||
func (s *Service) headSlot() types.Slot {
|
||||
return s.head.slot
|
||||
}
|
||||
|
||||
@@ -214,14 +240,14 @@ func (s *Service) headRoot() [32]byte {
|
||||
// This returns the head block.
|
||||
// It does a full copy on head block for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headBlock() *ethpb.SignedBeaconBlock {
|
||||
return stateTrie.CopySignedBeaconBlock(s.head.block)
|
||||
func (s *Service) headBlock() block.SignedBeaconBlock {
|
||||
return s.head.block.Copy()
|
||||
}
|
||||
|
||||
// This returns the head state.
|
||||
// It does a full copy on head state for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headState(ctx context.Context) *stateTrie.BeaconState {
|
||||
func (s *Service) headState(ctx context.Context) state.BeaconState {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
defer span.End()
|
||||
|
||||
@@ -234,12 +260,11 @@ func (s *Service) headGenesisValidatorRoot() [32]byte {
|
||||
return bytesutil.ToBytes32(s.head.state.GenesisValidatorRoot())
|
||||
}
|
||||
|
||||
// HasHeadState returns true if head state exists.
|
||||
func (s *Service) HasHeadState() bool {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.hasHeadState()
|
||||
// This returns the validator referenced by the provided index in
|
||||
// the head state.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headValidatorAtIndex(index types.ValidatorIndex) (state.ReadOnlyValidator, error) {
|
||||
return s.head.state.ValidatorAtIndexReadOnly(index)
|
||||
}
|
||||
|
||||
// Returns true if head state exists.
|
||||
@@ -250,33 +275,33 @@ func (s *Service) hasHeadState() bool {
|
||||
|
||||
// This caches justified state balances to be used for fork choice.
|
||||
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.clearInitSyncBlocks()
|
||||
|
||||
var justifiedState *stateTrie.BeaconState
|
||||
var justifiedState state.BeaconState
|
||||
var err error
|
||||
if justifiedRoot == s.genesisRoot {
|
||||
justifiedState, err = s.beaconDB.GenesisState(ctx)
|
||||
justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
justifiedState, err = s.stateGen.StateByRoot(ctx, justifiedRoot)
|
||||
justifiedState, err = s.cfg.StateGen.StateByRoot(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if justifiedState == nil {
|
||||
if justifiedState == nil || justifiedState.IsNil() {
|
||||
return errors.New("justified state can't be nil")
|
||||
}
|
||||
|
||||
epoch := helpers.CurrentEpoch(justifiedState)
|
||||
epoch := core.CurrentEpoch(justifiedState)
|
||||
|
||||
justifiedBalances := make([]uint64, justifiedState.NumValidators())
|
||||
if err := justifiedState.ReadFromEveryValidator(func(idx int, val stateTrie.ReadOnlyValidator) error {
|
||||
if err := justifiedState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
justifiedBalances[idx] = val.EffectiveBalance()
|
||||
} else {
|
||||
@@ -298,3 +323,90 @@ func (s *Service) getJustifiedBalances() []uint64 {
|
||||
defer s.justifiedBalancesLock.RUnlock()
|
||||
return s.justifiedBalances
|
||||
}
|
||||
|
||||
// Notifies a common event feed of a new chain head event. Called right after a new
|
||||
// chain head is determined, set, and saved to disk.
|
||||
func (s *Service) notifyNewHeadEvent(
|
||||
newHeadSlot types.Slot,
|
||||
newHeadState state.BeaconState,
|
||||
newHeadStateRoot,
|
||||
newHeadRoot []byte,
|
||||
) error {
|
||||
previousDutyDependentRoot := s.genesisRoot[:]
|
||||
currentDutyDependentRoot := s.genesisRoot[:]
|
||||
|
||||
var previousDutyEpoch types.Epoch
|
||||
currentDutyEpoch := core.SlotToEpoch(newHeadSlot)
|
||||
if currentDutyEpoch > 0 {
|
||||
previousDutyEpoch = currentDutyEpoch.Sub(1)
|
||||
}
|
||||
currentDutySlot, err := core.StartSlot(currentDutyEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty slot")
|
||||
}
|
||||
previousDutySlot, err := core.StartSlot(previousDutyEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty slot")
|
||||
}
|
||||
if currentDutySlot > 0 {
|
||||
currentDutyDependentRoot, err = helpers.BlockRootAtSlot(newHeadState, currentDutySlot-1)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty dependent root")
|
||||
}
|
||||
}
|
||||
if previousDutySlot > 0 {
|
||||
previousDutyDependentRoot, err = helpers.BlockRootAtSlot(newHeadState, previousDutySlot-1)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty dependent root")
|
||||
}
|
||||
}
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.NewHead,
|
||||
Data: ðpbv1.EventHead{
|
||||
Slot: newHeadSlot,
|
||||
Block: newHeadRoot,
|
||||
State: newHeadStateRoot,
|
||||
EpochTransition: core.IsEpochEnd(newHeadSlot),
|
||||
PreviousDutyDependentRoot: previousDutyDependentRoot,
|
||||
CurrentDutyDependentRoot: currentDutyDependentRoot,
|
||||
},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// This saves the attestations inside the beacon block with respect to root `orphanedRoot` back into the
|
||||
// attestation pool. It also filters out the attestations that is one epoch older as a
|
||||
// defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte) error {
|
||||
if !features.Get().CorrectlyInsertOrphanedAtts {
|
||||
return nil
|
||||
}
|
||||
|
||||
orphanedBlk, err := s.cfg.BeaconDB.Block(ctx, orphanedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if orphanedBlk == nil || orphanedBlk.IsNil() {
|
||||
return errors.New("orphaned block can't be nil")
|
||||
}
|
||||
|
||||
for _, a := range orphanedBlk.Block().Body().Attestations() {
|
||||
// Is the attestation one epoch older.
|
||||
if a.Data.Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
|
||||
continue
|
||||
}
|
||||
if helpers.IsAggregated(a) {
|
||||
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
saveOrphanedAttCount.Inc()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
173
beacon-chain/blockchain/head_sync_committee_info.go
Normal file
173
beacon-chain/blockchain/head_sync_committee_info.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/async"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// Initialize the state cache for sync committees.
|
||||
var syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
|
||||
|
||||
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
|
||||
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
|
||||
type HeadSyncCommitteeFetcher interface {
|
||||
HeadSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error)
|
||||
HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error)
|
||||
}
|
||||
|
||||
// HeadDomainFetcher is the interface that wraps the head sync domain related functions.
|
||||
// The head sync committee domain functions return callers domain data with respect to slot and head state.
|
||||
type HeadDomainFetcher interface {
|
||||
HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error)
|
||||
HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
|
||||
HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
|
||||
}
|
||||
|
||||
// HeadSyncCommitteeDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
||||
func (s *Service) HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommittee)
|
||||
}
|
||||
|
||||
// HeadSyncSelectionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
||||
func (s *Service) HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommitteeSelectionProof)
|
||||
}
|
||||
|
||||
// HeadSyncContributionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
||||
func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainContributionAndProof)
|
||||
}
|
||||
|
||||
// HeadSyncCommitteeIndices returns the sync committee index position using the head state. Input `slot` is taken in consideration
|
||||
// where validator's duty for `slot - 1` is used for block inclusion in `slot`. That means when a validator is at epoch boundary
|
||||
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the valiator will be considered using next period sync committee.
|
||||
//
|
||||
// Spec definition:
|
||||
// Being assigned to a sync committee for a given slot means that the validator produces and broadcasts signatures for slot - 1 for inclusion in slot.
|
||||
// This means that when assigned to an epoch sync committee signatures must be produced and broadcast for slots on range
|
||||
// [compute_start_slot_at_epoch(epoch) - 1, compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH - 1)
|
||||
// rather than for the range
|
||||
// [compute_start_slot_at_epoch(epoch), compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH)
|
||||
func (s *Service) HeadSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
||||
nextSlotEpoch := core.SlotToEpoch(slot + 1)
|
||||
currentEpoch := core.SlotToEpoch(slot)
|
||||
|
||||
switch {
|
||||
case core.SyncCommitteePeriod(nextSlotEpoch) == core.SyncCommitteePeriod(currentEpoch):
|
||||
return s.headCurrentSyncCommitteeIndices(ctx, index, slot)
|
||||
// At sync committee period boundary, validator should sample the next epoch sync committee.
|
||||
case core.SyncCommitteePeriod(nextSlotEpoch) == core.SyncCommitteePeriod(currentEpoch)+1:
|
||||
return s.headNextSyncCommitteeIndices(ctx, index, slot)
|
||||
default:
|
||||
// Impossible condition.
|
||||
return nil, errors.New("could get calculate sync subcommittee based on the period")
|
||||
}
|
||||
}
|
||||
|
||||
// headCurrentSyncCommitteeIndices returns the input validator `index`'s position indices in the current sync committee with respect to `slot`.
|
||||
// Head state advanced up to `slot` is used for calculation.
|
||||
func (s *Service) headCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return helpers.CurrentPeriodSyncSubcommitteeIndices(headState, index)
|
||||
}
|
||||
|
||||
// headNextSyncCommitteeIndices returns the input validator `index`'s position indices in the next sync committee with respect to `slot`.
|
||||
// Head state advanced up to `slot` is used for calculation.
|
||||
func (s *Service) headNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return helpers.NextPeriodSyncSubcommitteeIndices(headState, index)
|
||||
}
|
||||
|
||||
// HeadSyncCommitteePubKeys returns the head sync committee public keys with respect to `slot` and subcommittee index `committeeIndex`.
|
||||
// Head state advanced up to `slot` is used for calculation.
|
||||
func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error) {
|
||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nextSlotEpoch := core.SlotToEpoch(headState.Slot() + 1)
|
||||
currEpoch := core.SlotToEpoch(headState.Slot())
|
||||
|
||||
var syncCommittee *ethpb.SyncCommittee
|
||||
if currEpoch == nextSlotEpoch || core.SyncCommitteePeriod(currEpoch) == core.SyncCommitteePeriod(nextSlotEpoch) {
|
||||
syncCommittee, err = headState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
syncCommittee, err = headState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return altair.SyncSubCommitteePubkeys(syncCommittee, committeeIndex)
|
||||
}
|
||||
|
||||
// returns calculated domain using input `domain` and `slot`.
|
||||
func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, domain [4]byte) ([]byte, error) {
|
||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return helpers.Domain(headState.Fork(), core.SlotToEpoch(headState.Slot()), domain, headState.GenesisValidatorRoot())
|
||||
}
|
||||
|
||||
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.
|
||||
// For the cache miss, it processes head state up to slot and fill the cache with `slot` as key.
|
||||
func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot types.Slot) (state.BeaconState, error) {
|
||||
var headState state.BeaconState
|
||||
var err error
|
||||
mLock := async.NewMultilock(fmt.Sprintf("%s-%d", "syncHeadState", slot))
|
||||
mLock.Lock()
|
||||
defer mLock.Unlock()
|
||||
|
||||
// If there's already a head state exists with the request slot, we don't need to process slots.
|
||||
cachedState, err := syncCommitteeHeadStateCache.Get(slot)
|
||||
switch {
|
||||
case err == nil:
|
||||
syncHeadStateHit.Inc()
|
||||
headState = cachedState
|
||||
return headState, nil
|
||||
case errors.Is(err, cache.ErrNotFound):
|
||||
headState, err = s.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if headState == nil || headState.IsNil() {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
if slot > headState.Slot() {
|
||||
headState, err = transition.ProcessSlots(ctx, headState, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
syncHeadStateMiss.Inc()
|
||||
err = syncCommitteeHeadStateCache.Put(slot, headState)
|
||||
return headState, err
|
||||
default:
|
||||
// In the event, we encounter another error
|
||||
// we return it.
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
176
beacon-chain/blockchain/head_sync_committee_info_test.go
Normal file
176
beacon-chain/blockchain/head_sync_committee_info_test.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestService_headSyncCommitteeFetcher_Errors(t *testing.T) {
|
||||
beaconDB := dbtest.SetupDB(t)
|
||||
c := &Service{
|
||||
cfg: &Config{
|
||||
StateGen: stategen.New(beaconDB),
|
||||
},
|
||||
}
|
||||
c.head = &head{}
|
||||
_, err := c.headCurrentSyncCommitteeIndices(context.Background(), types.ValidatorIndex(0), types.Slot(0))
|
||||
require.ErrorContains(t, "nil state", err)
|
||||
|
||||
_, err = c.headNextSyncCommitteeIndices(context.Background(), types.ValidatorIndex(0), types.Slot(0))
|
||||
require.ErrorContains(t, "nil state", err)
|
||||
|
||||
_, err = c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(0), types.CommitteeIndex(0))
|
||||
require.ErrorContains(t, "nil state", err)
|
||||
}
|
||||
|
||||
func TestService_HeadDomainFetcher_Errors(t *testing.T) {
|
||||
beaconDB := dbtest.SetupDB(t)
|
||||
c := &Service{
|
||||
cfg: &Config{
|
||||
StateGen: stategen.New(beaconDB),
|
||||
},
|
||||
}
|
||||
c.head = &head{}
|
||||
_, err := c.HeadSyncCommitteeDomain(context.Background(), types.Slot(0))
|
||||
require.ErrorContains(t, "nil state", err)
|
||||
|
||||
_, err = c.HeadSyncSelectionProofDomain(context.Background(), types.Slot(0))
|
||||
require.ErrorContains(t, "nil state", err)
|
||||
|
||||
_, err = c.HeadSyncSelectionProofDomain(context.Background(), types.Slot(0))
|
||||
require.ErrorContains(t, "nil state", err)
|
||||
}
|
||||
|
||||
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Current period
|
||||
slot := 2*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
a, err := c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Current period where slot-2 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 2
|
||||
b, err := c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, a, b)
|
||||
|
||||
// Next period where slot-1 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 1
|
||||
b, err = c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, a, b)
|
||||
}
|
||||
|
||||
func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
indices, err := c.headCurrentSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default.
|
||||
require.Equal(t, 0, len(indices))
|
||||
}
|
||||
|
||||
func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
|
||||
require.NotEqual(t, 0, len(indices))
|
||||
}
|
||||
|
||||
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
|
||||
slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(slot), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Any subcommittee should match the subcommittee size.
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
require.Equal(t, int(subCommitteeSize), len(pubkeys))
|
||||
}
|
||||
|
||||
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := helpers.Domain(s.Fork(), core.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
}
|
||||
|
||||
func TestService_HeadSyncContributionProofDomain(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := helpers.Domain(s.Fork(), core.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
}
|
||||
|
||||
func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := helpers.Domain(s.Fork(), core.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
}
|
||||
|
||||
func TestSyncCommitteeHeadStateCache_RoundTrip(t *testing.T) {
|
||||
c := syncCommitteeHeadStateCache
|
||||
t.Cleanup(func() {
|
||||
syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
|
||||
})
|
||||
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
|
||||
require.NoError(t, beaconState.SetSlot(100))
|
||||
cachedState, err := c.Get(101)
|
||||
require.ErrorContains(t, cache.ErrNotFound.Error(), err)
|
||||
require.Equal(t, nil, cachedState)
|
||||
require.NoError(t, c.Put(101, beaconState))
|
||||
cachedState, err = c.Get(101)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, beaconState, cachedState)
|
||||
}
|
||||
@@ -4,10 +4,17 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
@@ -22,7 +29,7 @@ func TestSaveHead_Same(t *testing.T) {
|
||||
service.head = &head{slot: 0, root: r}
|
||||
|
||||
require.NoError(t, service.saveHead(context.Background(), r))
|
||||
assert.Equal(t, uint64(0), service.headSlot(), "Head did not stay the same")
|
||||
assert.Equal(t, types.Slot(0), service.headSlot(), "Head did not stay the same")
|
||||
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
|
||||
}
|
||||
|
||||
@@ -31,31 +38,40 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldRoot := [32]byte{'A'}
|
||||
service.head = &head{slot: 0, root: oldRoot}
|
||||
testutil.NewBeaconBlock()
|
||||
oldBlock := wrapper.WrappedPhase0SignedBeaconBlock(
|
||||
testutil.NewBeaconBlock(),
|
||||
)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.head = &head{
|
||||
slot: 0,
|
||||
root: oldRoot,
|
||||
block: oldBlock,
|
||||
}
|
||||
|
||||
newHeadSignedBlock := testutil.NewBeaconBlock()
|
||||
newHeadSignedBlock.Block.Slot = 1
|
||||
newHeadBlock := newHeadSignedBlock.Block
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.beaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot))
|
||||
|
||||
assert.Equal(t, uint64(1), service.HeadSlot(), "Head did not change")
|
||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
cachedRoot, err := service.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
if !bytes.Equal(cachedRoot, newRoot[:]) {
|
||||
t.Error("Head did not change")
|
||||
}
|
||||
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock(), "Head did not change")
|
||||
assert.DeepEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
assert.DeepEqual(t, cachedRoot, newRoot[:], "Head did not change")
|
||||
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock().Proto(), "Head did not change")
|
||||
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
}
|
||||
|
||||
func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
@@ -64,8 +80,17 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldRoot := [32]byte{'A'}
|
||||
service.head = &head{slot: 0, root: oldRoot}
|
||||
oldBlock := wrapper.WrappedPhase0SignedBeaconBlock(
|
||||
testutil.NewBeaconBlock(),
|
||||
)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.head = &head{
|
||||
slot: 0,
|
||||
root: oldRoot,
|
||||
block: oldBlock,
|
||||
}
|
||||
|
||||
reorgChainParent := [32]byte{'B'}
|
||||
newHeadSignedBlock := testutil.NewBeaconBlock()
|
||||
@@ -73,24 +98,25 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
|
||||
newHeadBlock := newHeadSignedBlock.Block
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.beaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot))
|
||||
|
||||
assert.Equal(t, uint64(1), service.HeadSlot(), "Head did not change")
|
||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
cachedRoot, err := service.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
if !bytes.Equal(cachedRoot, newRoot[:]) {
|
||||
t.Error("Head did not change")
|
||||
}
|
||||
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock(), "Head did not change")
|
||||
assert.DeepEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock().Proto(), "Head did not change")
|
||||
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
require.LogsContain(t, hook, "Chain reorg occurred")
|
||||
}
|
||||
|
||||
@@ -100,8 +126,8 @@ func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
|
||||
state, _ := testutil.DeterministicGenesisState(t, 100)
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.beaconDB.SaveState(context.Background(), state, r))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
|
||||
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
|
||||
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
|
||||
}
|
||||
@@ -111,7 +137,7 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), b))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -121,3 +147,121 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.updateHead(context.Background(), []uint64{}))
|
||||
}
|
||||
|
||||
func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
t.Run("genesis_state_root", func(t *testing.T) {
|
||||
bState, _ := testutil.DeterministicGenesisState(t, 10)
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
srv := &Service{
|
||||
cfg: &Config{
|
||||
StateNotifier: notifier,
|
||||
},
|
||||
genesisRoot: [32]byte{1},
|
||||
}
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
err := srv.notifyNewHeadEvent(1, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
require.NoError(t, err)
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
|
||||
eventHead, ok := events[0].Data.(*ethpbv1.EventHead)
|
||||
require.Equal(t, true, ok)
|
||||
wanted := ðpbv1.EventHead{
|
||||
Slot: 1,
|
||||
Block: newHeadRoot[:],
|
||||
State: newHeadStateRoot[:],
|
||||
EpochTransition: false,
|
||||
PreviousDutyDependentRoot: srv.genesisRoot[:],
|
||||
CurrentDutyDependentRoot: srv.genesisRoot[:],
|
||||
}
|
||||
require.DeepSSZEqual(t, wanted, eventHead)
|
||||
})
|
||||
t.Run("non_genesis_values", func(t *testing.T) {
|
||||
bState, _ := testutil.DeterministicGenesisState(t, 10)
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
genesisRoot := [32]byte{1}
|
||||
srv := &Service{
|
||||
cfg: &Config{
|
||||
StateNotifier: notifier,
|
||||
},
|
||||
genesisRoot: genesisRoot,
|
||||
}
|
||||
epoch1Start, err := core.StartSlot(1)
|
||||
require.NoError(t, err)
|
||||
epoch2Start, err := core.StartSlot(1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bState.SetSlot(epoch1Start))
|
||||
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
err = srv.notifyNewHeadEvent(epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
require.NoError(t, err)
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
|
||||
eventHead, ok := events[0].Data.(*ethpbv1.EventHead)
|
||||
require.Equal(t, true, ok)
|
||||
wanted := ðpbv1.EventHead{
|
||||
Slot: epoch2Start,
|
||||
Block: newHeadRoot[:],
|
||||
State: newHeadStateRoot[:],
|
||||
EpochTransition: false,
|
||||
PreviousDutyDependentRoot: genesisRoot[:],
|
||||
CurrentDutyDependentRoot: make([]byte, 32),
|
||||
}
|
||||
require.DeepSSZEqual(t, wanted, eventHead)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
CorrectlyInsertOrphanedAtts: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
b, err := testutil.GenerateFullBlock(genesis, keys, testutil.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now()
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r))
|
||||
|
||||
require.Equal(t, len(b.Block.Body.Attestations), service.cfg.AttPool.AggregatedAttestationCount())
|
||||
savedAtts := service.cfg.AttPool.AggregatedAttestations()
|
||||
atts := b.Block.Body.Attestations
|
||||
require.DeepSSZEqual(t, atts, savedAtts)
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
CorrectlyInsertOrphanedAtts: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
b, err := testutil.GenerateFullBlock(genesis, keys, testutil.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
require.NoError(t, service.saveOrphanedAtts(ctx, r))
|
||||
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
savedAtts := service.cfg.AttPool.AggregatedAttestations()
|
||||
atts := b.Block.Body.Attestations
|
||||
require.DeepNotSSZEqual(t, atts, savedAtts)
|
||||
}
|
||||
|
||||
@@ -39,13 +39,13 @@ func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
log.WithError(err).Error("Could not get head state")
|
||||
return
|
||||
}
|
||||
if headState == nil {
|
||||
if headState == nil || headState.IsNil() {
|
||||
if _, err := w.Write([]byte("Unavailable during initial syncing")); err != nil {
|
||||
log.WithError(err).Error("Failed to render p2p info page")
|
||||
}
|
||||
}
|
||||
|
||||
nodes := s.forkChoiceStore.Nodes()
|
||||
nodes := s.cfg.ForkChoiceStore.Nodes()
|
||||
|
||||
graph := dot.NewGraph(dot.Directed)
|
||||
graph.Attr("rankdir", "RL")
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -21,7 +22,8 @@ func TestService_TreeHandler(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
@@ -34,9 +36,9 @@ func TestService_TreeHandler(t *testing.T) {
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.forkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
|
||||
require.NoError(t, s.forkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
|
||||
s.setHead([32]byte{'a'}, testutil.NewBeaconBlock(), headState)
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
|
||||
s.setHead([32]byte{'a'}, wrapper.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()), headState)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler := http.HandlerFunc(s.TreeHandler)
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
)
|
||||
|
||||
// This saves a beacon block to the initial sync blocks cache.
|
||||
func (s *Service) saveInitSyncBlock(r [32]byte, b *ethpb.SignedBeaconBlock) {
|
||||
func (s *Service) saveInitSyncBlock(r [32]byte, b block.SignedBeaconBlock) {
|
||||
s.initSyncBlocksLock.Lock()
|
||||
defer s.initSyncBlocksLock.Unlock()
|
||||
s.initSyncBlocks[r] = b
|
||||
@@ -22,7 +22,7 @@ func (s *Service) hasInitSyncBlock(r [32]byte) bool {
|
||||
|
||||
// This retrieves a beacon block from the initial sync blocks cache using the root of
|
||||
// the block.
|
||||
func (s *Service) getInitSyncBlock(r [32]byte) *ethpb.SignedBeaconBlock {
|
||||
func (s *Service) getInitSyncBlock(r [32]byte) block.SignedBeaconBlock {
|
||||
s.initSyncBlocksLock.RLock()
|
||||
defer s.initSyncBlocksLock.RUnlock()
|
||||
b := s.initSyncBlocks[r]
|
||||
@@ -31,11 +31,11 @@ func (s *Service) getInitSyncBlock(r [32]byte) *ethpb.SignedBeaconBlock {
|
||||
|
||||
// This retrieves all the beacon blocks from the initial sync blocks cache, the returned
|
||||
// blocks are unordered.
|
||||
func (s *Service) getInitSyncBlocks() []*ethpb.SignedBeaconBlock {
|
||||
func (s *Service) getInitSyncBlocks() []block.SignedBeaconBlock {
|
||||
s.initSyncBlocksLock.RLock()
|
||||
defer s.initSyncBlocksLock.RUnlock()
|
||||
|
||||
blks := make([]*ethpb.SignedBeaconBlock, 0, len(s.initSyncBlocks))
|
||||
blks := make([]block.SignedBeaconBlock, 0, len(s.initSyncBlocks))
|
||||
for _, b := range s.initSyncBlocks {
|
||||
blks = append(blks, b)
|
||||
}
|
||||
@@ -46,5 +46,5 @@ func (s *Service) getInitSyncBlocks() []*ethpb.SignedBeaconBlock {
|
||||
func (s *Service) clearInitSyncBlocks() {
|
||||
s.initSyncBlocksLock.Lock()
|
||||
defer s.initSyncBlocksLock.Unlock()
|
||||
s.initSyncBlocks = make(map[[32]byte]*ethpb.SignedBeaconBlock)
|
||||
s.initSyncBlocks = make(map[[32]byte]block.SignedBeaconBlock)
|
||||
}
|
||||
|
||||
12
beacon-chain/blockchain/init_test.go
Normal file
12
beacon-chain/blockchain/init_test.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Override network name so that hardcoded genesis files are not loaded.
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.ConfigName = "test"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
@@ -3,33 +3,63 @@ package blockchain
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "blockchain")
|
||||
|
||||
// logs state transition related data every slot.
|
||||
func logStateTransitionData(b *ethpb.BeaconBlock) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestations": len(b.Body.Attestations),
|
||||
"deposits": len(b.Body.Deposits),
|
||||
"attesterSlashings": len(b.Body.AttesterSlashings),
|
||||
"proposerSlashings": len(b.Body.ProposerSlashings),
|
||||
"voluntaryExits": len(b.Body.VoluntaryExits),
|
||||
}).Info("Finished applying state transition")
|
||||
func logStateTransitionData(b block.BeaconBlock) {
|
||||
log := log.WithField("slot", b.Slot())
|
||||
if len(b.Body().Attestations()) > 0 {
|
||||
log = log.WithField("attestations", len(b.Body().Attestations()))
|
||||
}
|
||||
if len(b.Body().Deposits()) > 0 {
|
||||
log = log.WithField("deposits", len(b.Body().Deposits()))
|
||||
}
|
||||
if len(b.Body().AttesterSlashings()) > 0 {
|
||||
log = log.WithField("attesterSlashings", len(b.Body().AttesterSlashings()))
|
||||
}
|
||||
if len(b.Body().ProposerSlashings()) > 0 {
|
||||
log = log.WithField("proposerSlashings", len(b.Body().ProposerSlashings()))
|
||||
}
|
||||
if len(b.Body().VoluntaryExits()) > 0 {
|
||||
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
|
||||
}
|
||||
if b.Version() == version.Altair {
|
||||
agg, err := b.Body().SyncAggregate()
|
||||
if err == nil {
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
}
|
||||
|
||||
func logBlockSyncStatus(block *ethpb.BeaconBlock, blockRoot [32]byte, finalized *ethpb.Checkpoint) {
|
||||
func logBlockSyncStatus(block block.BeaconBlock, blockRoot [32]byte, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
|
||||
startTime, err := core.SlotToTime(genesisTime, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot,
|
||||
"slotInEpoch": block.Slot % params.BeaconConfig().SlotsPerEpoch,
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": helpers.SlotToEpoch(block.Slot),
|
||||
"epoch": core.SlotToEpoch(block.Slot()),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
}).Info("Synced new block")
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot,
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
|
||||
}).Debug("Sync new block times")
|
||||
return nil
|
||||
}
|
||||
|
||||
66
beacon-chain/blockchain/log_test.go
Normal file
66
beacon-chain/blockchain/log_test.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_logStateTransitionData(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
b block.BeaconBlock
|
||||
want string
|
||||
}{
|
||||
{name: "empty block body",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}),
|
||||
want: "\"Finished applying state transition\" prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has attestation",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{Attestations: []*ethpb.Attestation{{}}}}),
|
||||
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has deposit",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(
|
||||
ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
Attestations: []*ethpb.Attestation{{}},
|
||||
Deposits: []*ethpb.Deposit{{}}}}),
|
||||
want: "\"Finished applying state transition\" attestations=1 deposits=1 prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has attester slashing",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{{}}}}),
|
||||
want: "\"Finished applying state transition\" attesterSlashings=1 prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has proposer slashing",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{{}}}}),
|
||||
want: "\"Finished applying state transition\" prefix=blockchain proposerSlashings=1 slot=0",
|
||||
},
|
||||
{name: "has exit",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}}),
|
||||
want: "\"Finished applying state transition\" prefix=blockchain slot=0 voluntaryExits=1",
|
||||
},
|
||||
{name: "has everything",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
Attestations: []*ethpb.Attestation{{}},
|
||||
Deposits: []*ethpb.Deposit{{}},
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{{}},
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{{}},
|
||||
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}}),
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
hook := logTest.NewGlobal()
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
logStateTransitionData(tt.b)
|
||||
require.LogsContain(t, hook, tt.want)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -3,11 +3,16 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
@@ -58,6 +63,10 @@ var (
|
||||
Name: "beacon_previous_justified_root",
|
||||
Help: "Previous justified root of the processed state",
|
||||
})
|
||||
activeValidatorCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_current_active_validators",
|
||||
Help: "Current total active validators",
|
||||
})
|
||||
validatorsCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "validator_count",
|
||||
Help: "The total number of validators",
|
||||
@@ -74,6 +83,10 @@ var (
|
||||
Name: "current_eth1_data_deposit_count",
|
||||
Help: "The current eth1 deposit count in the last processed state eth1data field.",
|
||||
})
|
||||
processedDepositsCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "beacon_processed_deposits_total",
|
||||
Help: "Total number of deposits processed",
|
||||
})
|
||||
stateTrieReferences = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "field_references",
|
||||
Help: "The number of states a particular field is shared with.",
|
||||
@@ -95,9 +108,13 @@ var (
|
||||
Help: "The total amount of ether, in gwei, that has been used in voting attestation head of previous epoch",
|
||||
})
|
||||
reorgCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_reorg_total",
|
||||
Name: "beacon_reorgs_total",
|
||||
Help: "Count the number of times beacon chain has a reorg",
|
||||
})
|
||||
saveOrphanedAttCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "saved_orphaned_att_total",
|
||||
Help: "Count the number of times an orphaned attestation is saved",
|
||||
})
|
||||
attestationInclusionDelay = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "attestation_inclusion_delay_slots",
|
||||
@@ -105,10 +122,18 @@ var (
|
||||
Buckets: []float64{1, 2, 3, 4, 6, 32, 64},
|
||||
},
|
||||
)
|
||||
syncHeadStateMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sync_head_state_miss",
|
||||
Help: "The number of sync head state requests that are not present in the cache.",
|
||||
})
|
||||
syncHeadStateHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sync_head_state_hit",
|
||||
Help: "The number of sync head state requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
func reportSlotMetrics(stateSlot, headSlot, clockSlot uint64, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
func reportSlotMetrics(stateSlot, headSlot, clockSlot types.Slot, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
clockTimeSlot.Set(float64(clockSlot))
|
||||
beaconSlot.Set(float64(stateSlot))
|
||||
beaconHeadSlot.Set(float64(headSlot))
|
||||
@@ -119,8 +144,8 @@ func reportSlotMetrics(stateSlot, headSlot, clockSlot uint64, finalizedCheckpoin
|
||||
}
|
||||
|
||||
// reportEpochMetrics reports epoch related metrics.
|
||||
func reportEpochMetrics(ctx context.Context, postState, headState *stateTrie.BeaconState) error {
|
||||
currentEpoch := postState.Slot() / params.BeaconConfig().SlotsPerEpoch
|
||||
func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconState) error {
|
||||
currentEpoch := types.Epoch(postState.Slot() / params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
// Validator instances
|
||||
pendingInstances := 0
|
||||
@@ -139,7 +164,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState *stateTrie.Bea
|
||||
slashingEffectiveBalance := uint64(0)
|
||||
|
||||
for i, validator := range postState.Validators() {
|
||||
bal, err := postState.BalanceAtIndex(uint64(i))
|
||||
bal, err := postState.BalanceAtIndex(types.ValidatorIndex(i))
|
||||
if err != nil {
|
||||
log.Errorf("Could not load validator balance: %v", err)
|
||||
continue
|
||||
@@ -177,6 +202,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState *stateTrie.Bea
|
||||
activeBalance += exitingBalance + slashingBalance
|
||||
activeEffectiveBalance += exitingEffectiveBalance + slashingEffectiveBalance
|
||||
|
||||
activeValidatorCount.Set(float64(activeInstances))
|
||||
validatorsCount.WithLabelValues("Pending").Set(float64(pendingInstances))
|
||||
validatorsCount.WithLabelValues("Active").Set(float64(activeInstances))
|
||||
validatorsCount.WithLabelValues("Exiting").Set(float64(exitingInstances))
|
||||
@@ -203,15 +229,33 @@ func reportEpochMetrics(ctx context.Context, postState, headState *stateTrie.Bea
|
||||
beaconFinalizedEpoch.Set(float64(postState.FinalizedCheckpointEpoch()))
|
||||
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(postState.FinalizedCheckpoint().Root)))
|
||||
currentEth1DataDepositCount.Set(float64(postState.Eth1Data().DepositCount))
|
||||
processedDepositsCount.Set(float64(postState.Eth1DepositIndex() + 1))
|
||||
|
||||
// Validator participation should be viewed on the canonical chain.
|
||||
v, b, err := precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, b, err = precompute.ProcessAttestations(ctx, headState, v, b)
|
||||
if err != nil {
|
||||
return err
|
||||
var b *precompute.Balance
|
||||
var v []*precompute.Validator
|
||||
var err error
|
||||
switch headState.Version() {
|
||||
case version.Phase0:
|
||||
// Validator participation should be viewed on the canonical chain.
|
||||
v, b, err = precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, b, err = precompute.ProcessAttestations(ctx, headState, v, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case version.Altair:
|
||||
v, b, err = altair.InitializeEpochValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, b, err = altair.ProcessEpochParticipation(ctx, headState, b, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("invalid state type provided: %T", headState.InnerStateUnsafe())
|
||||
}
|
||||
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
|
||||
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))
|
||||
@@ -226,8 +270,8 @@ func reportEpochMetrics(ctx context.Context, postState, headState *stateTrie.Bea
|
||||
return nil
|
||||
}
|
||||
|
||||
func reportAttestationInclusion(blk *ethpb.BeaconBlock) {
|
||||
for _, att := range blk.Body.Attestations {
|
||||
attestationInclusionDelay.Observe(float64(blk.Slot - att.Data.Slot))
|
||||
func reportAttestationInclusion(blk block.BeaconBlock) {
|
||||
for _, att := range blk.Body().Attestations() {
|
||||
attestationInclusionDelay.Observe(float64(blk.Slot() - att.Data.Slot))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,23 +4,38 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||
s := testutil.NewBeaconState()
|
||||
h := testutil.NewBeaconState()
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
h, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.SetValidators(nil))
|
||||
err := reportEpochMetrics(context.Background(), s, h)
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "failed to initialize precompute: nil validators in state", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
|
||||
s := testutil.NewBeaconState()
|
||||
h := testutil.NewBeaconState()
|
||||
require.NoError(t, h.SetCurrentEpochAttestations([]*pb.PendingAttestation{{InclusionDelay: 0}}))
|
||||
err := reportEpochMetrics(context.Background(), s, h)
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
h, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 0}))
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "attestation with inclusion delay of 0", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_SlashedValidatorOutOfBound(t *testing.T) {
|
||||
h, _ := testutil.DeterministicGenesisState(t, 1)
|
||||
v, err := h.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
v.Slashed = true
|
||||
require.NoError(t, h.UpdateValidatorAtIndex(0, v))
|
||||
require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 1, Data: testutil.HydrateAttestationData(ð.AttestationData{})}))
|
||||
err = reportEpochMetrics(context.Background(), h, h)
|
||||
require.ErrorContains(t, "slot 0 out of bounds", err)
|
||||
}
|
||||
|
||||
@@ -2,25 +2,21 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
"github.com/prysmaticlabs/prysm/time"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ErrTargetRootNotInDB returns when the target block root of an attestation cannot be found in the
|
||||
// beacon database.
|
||||
var ErrTargetRootNotInDB = errors.New("target root does not exist in db")
|
||||
|
||||
// onAttestation is called whenever an attestation is received, verifies the attestation is valid and saves
|
||||
// it to the DB. As a stateless function, this does not hold nor delay attestation based on the spec descriptions.
|
||||
// The delay is handled by the caller in `processAttestation`.
|
||||
// The delay is handled by the caller in `processAttestations`.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def on_attestation(store: Store, attestation: Attestation) -> None:
|
||||
@@ -40,73 +36,68 @@ var ErrTargetRootNotInDB = errors.New("target root does not exist in db")
|
||||
//
|
||||
// # Update latest messages for attesting indices
|
||||
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
|
||||
// TODO(#6072): This code path is highly untested. Requires comprehensive tests and simpler refactoring.
|
||||
func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) ([]uint64, error) {
|
||||
func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
|
||||
defer span.End()
|
||||
|
||||
if a == nil {
|
||||
return nil, errors.New("nil attestation")
|
||||
if err := helpers.ValidateNilAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, errors.New("nil attestation.Data field")
|
||||
}
|
||||
if a.Data.Target == nil {
|
||||
return nil, errors.New("nil attestation.Data.Target field")
|
||||
if err := helpers.ValidateSlotTargetEpoch(a.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
tgt := ethpb.CopyCheckpoint(a.Data.Target)
|
||||
|
||||
tgt := stateTrie.CopyCheckpoint(a.Data.Target)
|
||||
|
||||
if helpers.SlotToEpoch(a.Data.Slot) != a.Data.Target.Epoch {
|
||||
return nil, fmt.Errorf("data slot is not in the same epoch as target %d != %d", helpers.SlotToEpoch(a.Data.Slot), a.Data.Target.Epoch)
|
||||
}
|
||||
|
||||
// Verify beacon node has seen the target block before.
|
||||
if !s.hasBlock(ctx, bytesutil.ToBytes32(tgt.Root)) {
|
||||
return nil, ErrTargetRootNotInDB
|
||||
}
|
||||
// Note that target root check is ignored here because it was performed in sync's validation pipeline:
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
// If missing target root were to fail in this method, it would have just failed in `getAttPreState`.
|
||||
|
||||
// Retrieve attestation's data beacon block pre state. Advance pre state to latest epoch if necessary and
|
||||
// save it to the cache.
|
||||
baseState, err := s.getAttPreState(ctx, tgt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
genesisTime := baseState.GenesisTime()
|
||||
|
||||
// Verify attestation target is from current epoch or previous epoch.
|
||||
if err := s.verifyAttTargetEpoch(ctx, genesisTime, uint64(timeutils.Now().Unix()), tgt); err != nil {
|
||||
return nil, err
|
||||
if err := s.verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Unix()), tgt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify attestation beacon block is known and not from the future.
|
||||
if err := s.verifyBeaconBlock(ctx, a.Data); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attestation beacon block")
|
||||
return errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
|
||||
// Verify LMG GHOST and FFG votes are consistent with each other.
|
||||
if err := s.verifyLMDFFGConsistent(ctx, a.Data.Target.Epoch, a.Data.Target.Root, a.Data.BeaconBlockRoot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attestation beacon block")
|
||||
}
|
||||
// Note that LMG GHOST and FFG consistency check is ignored because it was performed in sync's validation pipeline:
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
|
||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||
if err := helpers.VerifySlotTime(genesisTime, a.Data.Slot+1, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
return nil, err
|
||||
if err := core.VerifySlotTime(genesisTime, a.Data.Slot+1, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Use the target state to validate attestation and calculate the committees.
|
||||
indexedAtt, err := s.verifyAttestation(ctx, baseState, a)
|
||||
// Use the target state to verify attesting indices are valid.
|
||||
committee, err := helpers.BeaconCommitteeFromState(baseState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := attestation.IsValidAttestationIndices(ctx, indexedAtt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if indexedAtt.AttestingIndices == nil {
|
||||
return nil, errors.New("nil attesting indices")
|
||||
}
|
||||
// Note that signature verification is ignored here because it was performed in sync's validation pipeline:
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
// We assume trusted attestation in this function has verified signature.
|
||||
|
||||
// Update forkchoice store with the new attestation for updating weight.
|
||||
s.forkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
|
||||
return indexedAtt.AttestingIndices, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,70 +1,60 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/async"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/mputil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// getAttPreState retrieves the att pre state by either from the cache or the DB.
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
|
||||
// allowing us to behave smarter in terms of how this function is used concurrently.
|
||||
epochKey := strconv.FormatUint(c.Epoch, 10 /* base 10 */)
|
||||
lock := mputil.NewMultilock(string(c.Root) + epochKey)
|
||||
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
|
||||
lock := async.NewMultilock(string(c.Root) + epochKey)
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
cachedState, err := s.checkpointStateCache.StateByCheckpoint(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get cached checkpoint state")
|
||||
}
|
||||
if cachedState != nil {
|
||||
if cachedState != nil && !cachedState.IsNil() {
|
||||
return cachedState, nil
|
||||
}
|
||||
|
||||
baseState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
||||
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
|
||||
}
|
||||
|
||||
epochStartSlot, err := helpers.StartSlot(c.Epoch)
|
||||
epochStartSlot, err := core.StartSlot(c.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if epochStartSlot > baseState.Slot() {
|
||||
baseState = baseState.Copy()
|
||||
baseState, err = state.ProcessSlots(ctx, baseState, epochStartSlot)
|
||||
baseState, err = transition.ProcessSlots(ctx, baseState, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
|
||||
}
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
return baseState, nil
|
||||
}
|
||||
|
||||
// To avoid sharing the same state across checkpoint state cache and hot state cache,
|
||||
// we don't add the state to check point cache.
|
||||
has, err := s.stateGen.HasStateInCache(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !has {
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
// Sharing the same state across caches is perfectly fine here, the fetching
|
||||
// of attestation prestate is by far the most accessed state fetching pattern in
|
||||
// the beacon node. An extra state instance cached isn't an issue in the bigger
|
||||
// picture.
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
|
||||
}
|
||||
return baseState, nil
|
||||
|
||||
@@ -72,9 +62,9 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
func (s *Service) verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := (nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
currentEpoch := helpers.SlotToEpoch(currentSlot)
|
||||
var prevEpoch uint64
|
||||
currentSlot := types.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
|
||||
currentEpoch := core.SlotToEpoch(currentSlot)
|
||||
var prevEpoch types.Epoch
|
||||
// Prevents previous epoch under flow
|
||||
if currentEpoch > 1 {
|
||||
prevEpoch = currentEpoch - 1
|
||||
@@ -88,50 +78,20 @@ func (s *Service) verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime u
|
||||
// verifyBeaconBlock verifies beacon head block is known and not from the future.
|
||||
func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.AttestationData) error {
|
||||
r := bytesutil.ToBytes32(data.BeaconBlockRoot)
|
||||
b, err := s.beaconDB.Block(ctx, r)
|
||||
b, err := s.cfg.BeaconDB.Block(ctx, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the block does not exist in db, check again if block exists in initial sync block cache.
|
||||
// This could happen as the node first syncs to head.
|
||||
if b == nil && s.hasInitSyncBlock(r) {
|
||||
if (b == nil || b.IsNil()) && s.hasInitSyncBlock(r) {
|
||||
b = s.getInitSyncBlock(r)
|
||||
}
|
||||
if b == nil || b.Block == nil {
|
||||
return fmt.Errorf("beacon block %#x does not exist", bytesutil.Trunc(data.BeaconBlockRoot))
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return err
|
||||
}
|
||||
if b.Block.Slot > data.Slot {
|
||||
return fmt.Errorf("could not process attestation for future block, block.Slot=%d > attestation.Data.Slot=%d", b.Block.Slot, data.Slot)
|
||||
if b.Block().Slot() > data.Slot {
|
||||
return fmt.Errorf("could not process attestation for future block, block.Slot=%d > attestation.Data.Slot=%d", b.Block().Slot(), data.Slot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyLMDFFGConsistent verifies LMD GHOST and FFG votes are consistent with each other.
|
||||
func (s *Service) verifyLMDFFGConsistent(ctx context.Context, ffgEpoch uint64, ffgRoot, lmdRoot []byte) error {
|
||||
ffgSlot, err := helpers.StartSlot(ffgEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r, err := s.ancestor(ctx, lmdRoot, ffgSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(ffgRoot, r) {
|
||||
return errors.New("FFG and LMD votes are not consistent")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyAttestation validates input attestation is valid.
|
||||
func (s *Service) verifyAttestation(ctx context.Context, baseState *stateTrie.BeaconState, a *ethpb.Attestation) (*ethpb.IndexedAttestation, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(baseState, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indexedAtt := attestationutil.ConvertToIndexed(ctx, a, committee)
|
||||
if err := blocks.VerifyIndexedAttestation(ctx, baseState, indexedAtt); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify indexed attestation")
|
||||
}
|
||||
return indexedAtt, nil
|
||||
}
|
||||
|
||||
@@ -4,22 +4,23 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/prysmaticlabs/prysm/time"
|
||||
)
|
||||
|
||||
func TestStore_OnAttestation(t *testing.T) {
|
||||
func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
@@ -31,39 +32,41 @@ func TestStore_OnAttestation(t *testing.T) {
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = blockTree1(beaconDB, []byte{'g'})
|
||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithOutState := testutil.NewBeaconBlock()
|
||||
BlkWithOutState.Block.Slot = 0
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, BlkWithOutState))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithOutState)))
|
||||
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithStateBadAtt := testutil.NewBeaconBlock()
|
||||
BlkWithStateBadAtt.Block.Slot = 1
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, BlkWithStateBadAtt))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithStateBadAtt)))
|
||||
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
s := testutil.NewBeaconState()
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
||||
|
||||
BlkWithValidState := testutil.NewBeaconBlock()
|
||||
BlkWithValidState.Block.Slot = 2
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, BlkWithValidState))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithValidState)))
|
||||
|
||||
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s = testutil.NewBeaconState()
|
||||
err = s.SetFork(&pb.Fork{
|
||||
s, err = testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = s.SetFork(ðpb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -72,34 +75,29 @@ func TestStore_OnAttestation(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "attestation's data slot not aligned with target vote",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}},
|
||||
wantedErr: "data slot is not in the same epoch as target 1 != 0",
|
||||
},
|
||||
{
|
||||
name: "attestation's target root not in db",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)}}},
|
||||
wantedErr: "target root does not exist in db",
|
||||
a: testutil.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||
wantedErr: "slot 32 does not match target epoch 0",
|
||||
},
|
||||
{
|
||||
name: "no pre state for attestations's target block",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}},
|
||||
a: testutil.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
||||
wantedErr: "could not get pre state for epoch 0",
|
||||
},
|
||||
{
|
||||
name: "process attestation doesn't match current epoch",
|
||||
a: ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}},
|
||||
a: testutil.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||
Root: BlkWithStateBadAttRoot[:]}}}),
|
||||
wantedErr: "target epoch 100 does not match current epoch",
|
||||
},
|
||||
{
|
||||
name: "process nil attestation",
|
||||
a: nil,
|
||||
wantedErr: "nil attestation",
|
||||
wantedErr: "attestation can't be nil",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Data) in attestation",
|
||||
a: ðpb.Attestation{},
|
||||
wantedErr: "nil attestation.Data field",
|
||||
wantedErr: "attestation's data can't be nil",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Target) in attestation",
|
||||
@@ -112,13 +110,13 @@ func TestStore_OnAttestation(t *testing.T) {
|
||||
AggregationBits: make([]byte, 1),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
wantedErr: "nil attestation.Data.Target field",
|
||||
wantedErr: "attestation's target can't be nil",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := service.onAttestation(ctx, tt.a)
|
||||
err := service.onAttestation(ctx, tt.a)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
@@ -128,6 +126,31 @@ func TestStore_OnAttestation(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_OnAttestation_Ok(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
genesisState, pks := testutil.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(time.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
att, err := testutil.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.onAttestation(ctx, att[0]))
|
||||
}
|
||||
|
||||
func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
@@ -139,7 +162,8 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := testutil.NewBeaconState()
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = s.SetFinalizedCheckpoint(ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)})
|
||||
require.NoError(t, err)
|
||||
val := ðpb.Validator{
|
||||
@@ -151,7 +175,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
err = s.SetBalances([]uint64{0})
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{'g'}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, r))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
|
||||
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
@@ -160,16 +184,16 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
|
||||
r = bytesutil.ToBytes32([]byte{'A'})
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, 32)}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, 32)}))
|
||||
|
||||
s1, err := service.getAttPreState(ctx, cp1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, 32)}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, 32)}))
|
||||
s2, err := service.getAttPreState(ctx, cp2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
@@ -192,8 +216,8 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.prevFinalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, 32)}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, 32)}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, 32)}))
|
||||
s3, err := service.getAttPreState(ctx, cp3)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
|
||||
@@ -210,34 +234,32 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
epoch := uint64(1)
|
||||
epoch := types.Epoch(1)
|
||||
baseState, _ := testutil.DeterministicGenesisState(t, 1)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), 32)}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, returned.Slot(), checkpoint.Epoch*params.BeaconConfig().SlotsPerEpoch, "Incorrectly returned base state")
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
|
||||
|
||||
cached, err := service.checkpointStateCache.StateByCheckpoint(checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
|
||||
|
||||
epoch = uint64(2)
|
||||
epoch = 2
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), 32)}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
s, err := helpers.StartSlot(newCheckpoint.Epoch)
|
||||
s, err := core.StartSlot(newCheckpoint.Epoch)
|
||||
require.NoError(t, err)
|
||||
baseState, err = state.ProcessSlots(ctx, baseState, s)
|
||||
baseState, err = transition.ProcessSlots(ctx, baseState, s)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, returned.Slot(), baseState.Slot(), "Incorrectly returned base state")
|
||||
|
||||
cached, err = service.checkpointStateCache.StateByCheckpoint(newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(returned.InnerStateUnsafe(), cached.InnerStateUnsafe()) {
|
||||
t.Error("Incorrectly cached base state")
|
||||
}
|
||||
require.DeepSSZEqual(t, returned.InnerStateUnsafe(), cached.InnerStateUnsafe())
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
@@ -248,7 +270,7 @@ func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)}))
|
||||
}
|
||||
|
||||
@@ -260,7 +282,7 @@ func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
}
|
||||
|
||||
@@ -272,7 +294,7 @@ func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := 2 * params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot
|
||||
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
err = service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
assert.ErrorContains(t, "target epoch 0 does not match current epoch 2 or prev epoch 1", err)
|
||||
}
|
||||
@@ -286,7 +308,7 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
d := testutil.HydrateAttestationData(ðpb.AttestationData{})
|
||||
assert.ErrorContains(t, "beacon block 0x000000000000 does not exist", service.verifyBeaconBlock(ctx, d))
|
||||
assert.ErrorContains(t, "signed beacon block can't be nil", service.verifyBeaconBlock(ctx, d))
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
@@ -299,7 +321,7 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
d := ðpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
|
||||
@@ -317,7 +339,7 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
d := ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
|
||||
@@ -325,54 +347,6 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := "FFG and LMD votes are not consistent"
|
||||
assert.ErrorContains(t, wanted, service.verifyLMDFFGConsistent(context.Background(), 1, []byte{'a'}, r33[:]))
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.verifyLMDFFGConsistent(context.Background(), 1, r32[:], r33[:])
|
||||
assert.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
|
||||
}
|
||||
|
||||
func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
@@ -383,7 +357,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -392,7 +366,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -410,7 +384,7 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -419,7 +393,7 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -448,10 +422,10 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, [32]byte{}, 0, 0))
|
||||
|
||||
_, err = service.forkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -3,23 +3,34 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// A custom slot deadline for processing state slots in our cache.
|
||||
const slotDeadline = 5 * time.Second
|
||||
|
||||
// A custom deadline for deposit trie insertion.
|
||||
const depositDeadline = 20 * time.Second
|
||||
|
||||
// This defines size of the upper bound for initial sync block cache.
|
||||
var initialSyncBlockCacheSize = 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
// onBlock is called when a gossip block is received. It runs regular state transition on the block.
|
||||
// The block's signing root should be computed before calling this method to avoid redundant
|
||||
@@ -42,7 +53,8 @@ var initialSyncBlockCacheSize = 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||
//
|
||||
// # Check the block is valid and compute the post-state
|
||||
// state = state_transition(pre_state, signed_block, True)
|
||||
// state = pre_state.copy()
|
||||
// state_transition(state, signed_block, True)
|
||||
// # Add new block to the store
|
||||
// store.blocks[hash_tree_root(block)] = block
|
||||
// # Add new state for this block to the store
|
||||
@@ -71,36 +83,43 @@ var initialSyncBlockCacheSize = 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
|
||||
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
|
||||
if signed == nil || signed.Block == nil {
|
||||
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
|
||||
return errors.New("nil block")
|
||||
}
|
||||
b := signed.Block
|
||||
b := signed.Block()
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
set, postState, err := state.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, signed)
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
valid, err := set.Verify()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not batch verify signature")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("signature in block failed to verify")
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
|
||||
if features.Get().EnableNextSlotStateCache {
|
||||
go func() {
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Update justified check point.
|
||||
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
if err := s.updateJustified(ctx, postState); err != nil {
|
||||
@@ -108,42 +127,71 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock,
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
newFinalized := postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch
|
||||
if features.Get().UpdateHeadTimely {
|
||||
if newFinalized {
|
||||
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save new justified")
|
||||
}
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint()
|
||||
}
|
||||
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
|
||||
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
|
||||
return err
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: signed.Block().Slot(),
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: signed,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
if newFinalized {
|
||||
if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
|
||||
if err := s.forkChoiceStore.Prune(ctx, fRoot); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not prune proto array fork choice nodes")
|
||||
}
|
||||
|
||||
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save new justified")
|
||||
}
|
||||
|
||||
// Update deposit cache.
|
||||
finalizedState, err := s.stateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not fetch finalized state")
|
||||
}
|
||||
// We update the cache up to the last deposit index in the finalized block's state.
|
||||
// We can be confident that these deposits will be included in some block
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
|
||||
s.depositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
|
||||
if featureconfig.Get().EnablePruningDepositProofs {
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.depositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
if !features.Get().UpdateHeadTimely {
|
||||
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save new justified")
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
// Send an event regarding the new finalized checkpoint over a common event feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.FinalizedCheckpoint,
|
||||
Data: ðpbv1.EventFinalizedCheckpoint{
|
||||
Epoch: postState.FinalizedCheckpoint().Epoch,
|
||||
Block: postState.FinalizedCheckpoint().Root,
|
||||
State: signed.Block().StateRoot(),
|
||||
},
|
||||
})
|
||||
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
defer cancel()
|
||||
if err := s.insertFinalizedDeposits(depCtx, fRoot); err != nil {
|
||||
log.WithError(err).Error("Could not insert finalized deposits.")
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
defer reportAttestationInclusion(b)
|
||||
@@ -151,76 +199,7 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock,
|
||||
return s.handleEpochBoundary(ctx, postState)
|
||||
}
|
||||
|
||||
// onBlockInitialSyncStateTransition is called when an initial sync block is received.
|
||||
// It runs state transition on the block and without fork choice and post operation pool processes.
|
||||
// The block's signing root should be computed before calling this method to avoid redundant
|
||||
// computation in this method and methods it calls into.
|
||||
func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockInitialSyncStateTransition")
|
||||
defer span.End()
|
||||
|
||||
if signed == nil || signed.Block == nil {
|
||||
return errors.New("nil block")
|
||||
}
|
||||
|
||||
b := signed.Block
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.stateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(signed.Block.ParentRoot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if preState == nil {
|
||||
return fmt.Errorf("nil pre state for slot %d", b.Slot)
|
||||
}
|
||||
|
||||
// Exit early if the pre state slot is higher than incoming block's slot.
|
||||
if preState.Slot() >= signed.Block.Slot {
|
||||
return nil
|
||||
}
|
||||
|
||||
postState, err := state.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, true /* init sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
// Save the latest block as head in cache.
|
||||
if err := s.saveHeadNoDB(ctx, signed, blockRoot, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
|
||||
if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
}
|
||||
|
||||
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
if err := s.updateJustifiedInitSync(ctx, postState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
|
||||
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
|
||||
if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return s.handleEpochBoundary(ctx, postState)
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock,
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock,
|
||||
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
@@ -228,21 +207,21 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
if len(blks) == 0 || len(blockRoots) == 0 {
|
||||
return nil, nil, errors.New("no blocks provided")
|
||||
}
|
||||
if blks[0] == nil || blks[0].Block == nil {
|
||||
if blks[0] == nil || blks[0].IsNil() || blks[0].Block().IsNil() {
|
||||
return nil, nil, errors.New("nil block")
|
||||
}
|
||||
b := blks[0].Block
|
||||
b := blks[0].Block()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
preState, err := s.stateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot()))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if preState == nil {
|
||||
return nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot)
|
||||
if preState == nil || preState.IsNil() {
|
||||
return nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot())
|
||||
}
|
||||
|
||||
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
@@ -253,14 +232,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
Messages: [][32]byte{},
|
||||
}
|
||||
var set *bls.SignatureSet
|
||||
boundaries := make(map[[32]byte]*stateTrie.BeaconState)
|
||||
boundaries := make(map[[32]byte]state.BeaconState)
|
||||
for i, b := range blks {
|
||||
set, preState, err = state.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// Save potential boundary states.
|
||||
if helpers.IsEpochStart(preState.Slot()) {
|
||||
if core.IsEpochStart(preState.Slot()) {
|
||||
boundaries[blockRoots[i]] = preState.Copy()
|
||||
if err := s.handleEpochBoundary(ctx, preState); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not handle epoch boundary state")
|
||||
@@ -278,14 +257,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
return nil, nil, errors.New("batch block signature verification failed")
|
||||
}
|
||||
for r, st := range boundaries {
|
||||
if err := s.stateGen.SaveState(ctx, r, st); err != nil {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
// Also saves the last post state which to be used as pre state for the next batch.
|
||||
lastB := blks[len(blks)-1]
|
||||
lastBR := blockRoots[len(blockRoots)-1]
|
||||
if err := s.stateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := s.saveHeadNoDB(ctx, lastB, lastBR, preState); err != nil {
|
||||
@@ -296,16 +275,16 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
|
||||
// handles a block after the block's batch has been verified, where we can save blocks
|
||||
// their state summaries and split them off to relative hot/cold storage.
|
||||
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb.SignedBeaconBlock,
|
||||
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed block.SignedBeaconBlock,
|
||||
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
b := signed.Block
|
||||
b := signed.Block()
|
||||
|
||||
s.saveInitSyncBlock(blockRoot, signed)
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
|
||||
Slot: signed.Block.Slot,
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: signed.Block().Slot(),
|
||||
Root: blockRoot[:],
|
||||
}); err != nil {
|
||||
return err
|
||||
@@ -313,7 +292,7 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb
|
||||
|
||||
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
|
||||
if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
@@ -330,18 +309,30 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb
|
||||
if err := s.updateFinalized(ctx, fCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
if features.Get().UpdateHeadTimely {
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = fCheckpoint
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, postState *stateTrie.BeaconState) error {
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
defer span.End()
|
||||
|
||||
if postState.Slot()+1 == s.nextEpochBoundarySlot {
|
||||
// Update caches for the next epoch at epoch boundary slot - 1.
|
||||
if err := helpers.UpdateCommitteeCache(postState, helpers.NextEpoch(postState)); err != nil {
|
||||
if err := helpers.UpdateCommitteeCache(postState, core.NextEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(postState, helpers.NextEpoch(postState)); err != nil {
|
||||
copied := postState.Copy()
|
||||
copied, err := transition.ProcessSlots(ctx, copied, copied.Slot()+1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(copied); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if postState.Slot() >= s.nextEpochBoundarySlot {
|
||||
@@ -349,17 +340,17 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState *stateTrie.
|
||||
return err
|
||||
}
|
||||
var err error
|
||||
s.nextEpochBoundarySlot, err = helpers.StartSlot(helpers.NextEpoch(postState))
|
||||
s.nextEpochBoundarySlot, err = core.StartSlot(core.NextEpoch(postState))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update caches at epoch boundary slot.
|
||||
// The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1.
|
||||
if err := helpers.UpdateCommitteeCache(postState, helpers.CurrentEpoch(postState)); err != nil {
|
||||
if err := helpers.UpdateCommitteeCache(postState, core.CurrentEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(postState, helpers.CurrentEpoch(postState)); err != nil {
|
||||
if err := helpers.UpdateProposerIndicesInCache(postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -369,33 +360,39 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState *stateTrie.
|
||||
|
||||
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
|
||||
// to gain information on the most current chain.
|
||||
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock, root [32]byte,
|
||||
st *stateTrie.BeaconState) error {
|
||||
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk block.BeaconBlock, root [32]byte,
|
||||
st state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockAndAttestationsToForkChoiceStore")
|
||||
defer span.End()
|
||||
|
||||
fCheckpoint := st.FinalizedCheckpoint()
|
||||
jCheckpoint := st.CurrentJustifiedCheckpoint()
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body.Attestations {
|
||||
for _, a := range blk.Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(st, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indices := attestationutil.AttestingIndices(a.AggregationBits, committee)
|
||||
s.forkChoiceStore.ProcessAttestation(ctx, indices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
indices, err := attestation.AttestingIndices(a.AggregationBits, committee)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock,
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.BeaconBlock,
|
||||
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block to fork choice store.
|
||||
if err := s.forkChoiceStore.ProcessBlock(ctx,
|
||||
blk.Slot, root, bytesutil.ToBytes32(blk.ParentRoot), bytesutil.ToBytes32(blk.Body.Graffiti),
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()), bytesutil.ToBytes32(blk.Body().Graffiti()),
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
@@ -405,19 +402,49 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.B
|
||||
|
||||
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
|
||||
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b *ethpb.SignedBeaconBlock, st *stateTrie.BeaconState, initSync bool) error {
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
|
||||
defer span.End()
|
||||
if initSync {
|
||||
s.saveInitSyncBlock(r, b)
|
||||
} else if err := s.beaconDB.SaveBlock(ctx, b); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Block.Slot)
|
||||
} else if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Block().Slot())
|
||||
}
|
||||
if err := s.stateGen.SaveState(ctx, r, st); err != nil {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block, r, st); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block.Slot)
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This removes the attestations from the mem pool. It will only remove the attestations if input root `r` is canonical,
|
||||
// meaning the block `b` is part of the canonical chain.
|
||||
func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b block.SignedBeaconBlock) error {
|
||||
if !features.Get().CorrectlyPruneCanonicalAtts {
|
||||
return nil
|
||||
}
|
||||
|
||||
canonical, err := s.IsCanonical(ctx, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !canonical {
|
||||
return nil
|
||||
}
|
||||
|
||||
atts := b.Block().Body().Attestations()
|
||||
for _, att := range atts {
|
||||
if helpers.IsAggregated(att) {
|
||||
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,25 +6,29 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() uint64 {
|
||||
return helpers.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
func (s *Service) CurrentSlot() types.Slot {
|
||||
return core.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
func (s *Service) getBlockPreState(ctx context.Context, b block.BeaconBlock) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.getBlockPreState")
|
||||
defer span.End()
|
||||
|
||||
@@ -33,16 +37,16 @@ func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*
|
||||
return nil, err
|
||||
}
|
||||
|
||||
preState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
preState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot())
|
||||
}
|
||||
if preState == nil {
|
||||
return nil, errors.Wrapf(err, "nil pre state for slot %d", b.Slot)
|
||||
if preState == nil || preState.IsNil() {
|
||||
return nil, errors.Wrapf(err, "nil pre state for slot %d", b.Slot())
|
||||
}
|
||||
|
||||
// Verify block slot time is not from the future.
|
||||
if err := helpers.VerifySlotTime(preState.GenesisTime(), b.Slot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
if err := core.VerifySlotTime(preState.GenesisTime(), b.Slot(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -55,28 +59,28 @@ func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*
|
||||
}
|
||||
|
||||
// verifyBlkPreState validates input block has a valid pre-state.
|
||||
func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
func (s *Service) verifyBlkPreState(ctx context.Context, b block.BeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.verifyBlkPreState")
|
||||
defer span.End()
|
||||
|
||||
parentRoot := bytesutil.ToBytes32(b.ParentRoot)
|
||||
parentRoot := bytesutil.ToBytes32(b.ParentRoot())
|
||||
// Loosen the check to HasBlock because state summary gets saved in batches
|
||||
// during initial syncing. There's no risk given a state summary object is just a
|
||||
// a subset of the block object.
|
||||
if !s.beaconDB.HasStateSummary(ctx, parentRoot) && !s.beaconDB.HasBlock(ctx, parentRoot) {
|
||||
if !s.cfg.BeaconDB.HasStateSummary(ctx, parentRoot) && !s.cfg.BeaconDB.HasBlock(ctx, parentRoot) {
|
||||
return errors.New("could not reconstruct parent state")
|
||||
}
|
||||
|
||||
if err := s.VerifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
if err := s.VerifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
has, err := s.stateGen.HasState(ctx, parentRoot)
|
||||
has, err := s.cfg.StateGen.HasState(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !has {
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return errors.Wrap(err, "could not save initial sync blocks")
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
@@ -90,15 +94,15 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyBlkDescendant")
|
||||
defer span.End()
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
||||
finalizedBlkSigned, err := s.beaconDB.Block(ctx, fRoot)
|
||||
finalizedBlkSigned, err := s.cfg.BeaconDB.Block(ctx, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if finalizedBlkSigned == nil || finalizedBlkSigned.Block == nil {
|
||||
if finalizedBlkSigned == nil || finalizedBlkSigned.IsNil() || finalizedBlkSigned.Block().IsNil() {
|
||||
return errors.New("nil finalized block")
|
||||
}
|
||||
finalizedBlk := finalizedBlkSigned.Block
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot)
|
||||
finalizedBlk := finalizedBlkSigned.Block()
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block root")
|
||||
}
|
||||
@@ -108,9 +112,9 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
|
||||
|
||||
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
|
||||
err := fmt.Errorf("block %#x is not a descendent of the current finalized block slot %d, %#x != %#x",
|
||||
bytesutil.Trunc(root[:]), finalizedBlk.Slot, bytesutil.Trunc(bFinalizedRoot),
|
||||
bytesutil.Trunc(root[:]), finalizedBlk.Slot(), bytesutil.Trunc(bFinalizedRoot),
|
||||
bytesutil.Trunc(fRoot[:]))
|
||||
traceutil.AnnotateError(span, err)
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -118,13 +122,13 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
|
||||
|
||||
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
||||
// to current finalized slot.
|
||||
func (s *Service) verifyBlkFinalizedSlot(b *ethpb.BeaconBlock) error {
|
||||
finalizedSlot, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
func (s *Service) verifyBlkFinalizedSlot(b block.BeaconBlock) error {
|
||||
finalizedSlot, err := core.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if finalizedSlot >= b.Slot {
|
||||
return fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot, finalizedSlot)
|
||||
if finalizedSlot >= b.Slot() {
|
||||
return fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot(), finalizedSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -134,48 +138,51 @@ func (s *Service) verifyBlkFinalizedSlot(b *ethpb.BeaconBlock) error {
|
||||
// Otherwise, delay incorporation of new justified checkpoint until next epoch boundary.
|
||||
// See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion.
|
||||
func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustifiedCheckpt *ethpb.Checkpoint) (bool, error) {
|
||||
if helpers.SlotsSinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.shouldUpdateCurrentJustified")
|
||||
defer span.End()
|
||||
|
||||
if core.SlotsSinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
||||
return true, nil
|
||||
}
|
||||
var newJustifiedBlockSigned *ethpb.SignedBeaconBlock
|
||||
var newJustifiedBlockSigned block.SignedBeaconBlock
|
||||
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(newJustifiedCheckpt.Root))
|
||||
var err error
|
||||
if s.hasInitSyncBlock(justifiedRoot) {
|
||||
newJustifiedBlockSigned = s.getInitSyncBlock(justifiedRoot)
|
||||
} else {
|
||||
newJustifiedBlockSigned, err = s.beaconDB.Block(ctx, justifiedRoot)
|
||||
newJustifiedBlockSigned, err = s.cfg.BeaconDB.Block(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
if newJustifiedBlockSigned == nil || newJustifiedBlockSigned.Block == nil {
|
||||
if newJustifiedBlockSigned == nil || newJustifiedBlockSigned.IsNil() || newJustifiedBlockSigned.Block().IsNil() {
|
||||
return false, errors.New("nil new justified block")
|
||||
}
|
||||
|
||||
newJustifiedBlock := newJustifiedBlockSigned.Block
|
||||
jSlot, err := helpers.StartSlot(s.justifiedCheckpt.Epoch)
|
||||
newJustifiedBlock := newJustifiedBlockSigned.Block()
|
||||
jSlot, err := core.StartSlot(s.justifiedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newJustifiedBlock.Slot <= jSlot {
|
||||
if newJustifiedBlock.Slot() <= jSlot {
|
||||
return false, nil
|
||||
}
|
||||
var justifiedBlockSigned *ethpb.SignedBeaconBlock
|
||||
var justifiedBlockSigned block.SignedBeaconBlock
|
||||
cachedJustifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if s.hasInitSyncBlock(cachedJustifiedRoot) {
|
||||
justifiedBlockSigned = s.getInitSyncBlock(cachedJustifiedRoot)
|
||||
} else {
|
||||
justifiedBlockSigned, err = s.beaconDB.Block(ctx, cachedJustifiedRoot)
|
||||
justifiedBlockSigned, err = s.cfg.BeaconDB.Block(ctx, cachedJustifiedRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
if justifiedBlockSigned == nil || justifiedBlockSigned.Block == nil {
|
||||
if justifiedBlockSigned == nil || justifiedBlockSigned.IsNil() || justifiedBlockSigned.Block().IsNil() {
|
||||
return false, errors.New("nil justified block")
|
||||
}
|
||||
justifiedBlock := justifiedBlockSigned.Block
|
||||
b, err := s.ancestor(ctx, justifiedRoot[:], justifiedBlock.Slot)
|
||||
justifiedBlock := justifiedBlockSigned.Block()
|
||||
b, err := s.ancestor(ctx, justifiedRoot[:], justifiedBlock.Slot())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -185,7 +192,10 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *Service) updateJustified(ctx context.Context, state *stateTrie.BeaconState) error {
|
||||
func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.updateJustified")
|
||||
defer span.End()
|
||||
|
||||
cpt := state.CurrentJustifiedCheckpoint()
|
||||
if cpt.Epoch > s.bestJustifiedCheckpt.Epoch {
|
||||
s.bestJustifiedCheckpt = cpt
|
||||
@@ -203,7 +213,7 @@ func (s *Service) updateJustified(ctx context.Context, state *stateTrie.BeaconSt
|
||||
}
|
||||
}
|
||||
|
||||
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
|
||||
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cpt)
|
||||
}
|
||||
|
||||
// This caches input checkpoint as justified for the service struct. It rotates current justified to previous justified,
|
||||
@@ -216,26 +226,30 @@ func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpo
|
||||
return err
|
||||
}
|
||||
|
||||
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cp)
|
||||
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp)
|
||||
}
|
||||
|
||||
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.updateFinalized")
|
||||
defer span.End()
|
||||
|
||||
// Blocks need to be saved so that we can retrieve finalized block from
|
||||
// DB when migrating states.
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
|
||||
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, cp); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, cp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = cp
|
||||
if !features.Get().UpdateHeadTimely {
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = cp
|
||||
}
|
||||
|
||||
fRoot := bytesutil.ToBytes32(cp.Root)
|
||||
if err := s.stateGen.MigrateToCold(ctx, fRoot); err != nil {
|
||||
if err := s.cfg.StateGen.MigrateToCold(ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not migrate to cold")
|
||||
}
|
||||
|
||||
@@ -254,7 +268,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
// else:
|
||||
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
|
||||
// return root
|
||||
func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte, error) {
|
||||
func (s *Service) ancestor(ctx context.Context, root []byte, slot types.Slot) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
|
||||
defer span.End()
|
||||
|
||||
@@ -275,18 +289,18 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byt
|
||||
}
|
||||
|
||||
// This retrieves an ancestor root using fork choice store. The look up is looping through the a flat array structure.
|
||||
func (s *Service) ancestorByForkChoiceStore(ctx context.Context, r [32]byte, slot uint64) ([]byte, error) {
|
||||
func (s *Service) ancestorByForkChoiceStore(ctx context.Context, r [32]byte, slot types.Slot) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByForkChoiceStore")
|
||||
defer span.End()
|
||||
|
||||
if !s.forkChoiceStore.HasParent(r) {
|
||||
if !s.cfg.ForkChoiceStore.HasParent(r) {
|
||||
return nil, errors.New("could not find root in fork choice store")
|
||||
}
|
||||
return s.forkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
return s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
}
|
||||
|
||||
// This retrieves an ancestor root using DB. The look up is recursively looking up DB. Slower than `ancestorByForkChoiceStore`.
|
||||
func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot uint64) ([]byte, error) {
|
||||
func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot types.Slot) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByDB")
|
||||
defer span.End()
|
||||
|
||||
@@ -295,7 +309,7 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot uint64) ([]
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
signed, err := s.beaconDB.Block(ctx, r)
|
||||
signed, err := s.cfg.BeaconDB.Block(ctx, r)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get ancestor block")
|
||||
}
|
||||
@@ -304,15 +318,15 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot uint64) ([]
|
||||
signed = s.getInitSyncBlock(r)
|
||||
}
|
||||
|
||||
if signed == nil || signed.Block == nil {
|
||||
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
|
||||
return nil, errors.New("nil block")
|
||||
}
|
||||
b := signed.Block
|
||||
if b.Slot == slot || b.Slot < slot {
|
||||
b := signed.Block()
|
||||
if b.Slot() == slot || b.Slot() < slot {
|
||||
return r[:], nil
|
||||
}
|
||||
|
||||
return s.ancestorByDB(ctx, bytesutil.ToBytes32(b.ParentRoot), slot)
|
||||
return s.ancestorByDB(ctx, bytesutil.ToBytes32(b.ParentRoot()), slot)
|
||||
}
|
||||
|
||||
// This updates justified check point in store, if the new justified is later than stored justified or
|
||||
@@ -330,16 +344,16 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot uint64) ([]
|
||||
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
|
||||
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *stateTrie.BeaconState) error {
|
||||
func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.BeaconState) error {
|
||||
// Update justified if it's different than the one cached in the store.
|
||||
if !attestationutil.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
|
||||
if !attestation.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
|
||||
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
s.justifiedCheckpt = state.CurrentJustifiedCheckpoint()
|
||||
return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
}
|
||||
|
||||
// Update justified if store justified is not in chain with finalized check point.
|
||||
finalizedSlot, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
finalizedSlot, err := core.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -360,31 +374,31 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *state
|
||||
|
||||
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.BeaconBlock,
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk block.BeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
pendingNodes := make([]*ethpb.BeaconBlock, 0)
|
||||
pendingNodes := make([]block.BeaconBlock, 0)
|
||||
pendingRoots := make([][32]byte, 0)
|
||||
|
||||
parentRoot := bytesutil.ToBytes32(blk.ParentRoot)
|
||||
slot := blk.Slot
|
||||
parentRoot := bytesutil.ToBytes32(blk.ParentRoot())
|
||||
slot := blk.Slot()
|
||||
// Fork choice only matters from last finalized slot.
|
||||
fSlot, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
fSlot, err := core.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
higherThanFinalized := slot > fSlot
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
for !s.forkChoiceStore.HasNode(parentRoot) && s.beaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
|
||||
b, err := s.beaconDB.Block(ctx, parentRoot)
|
||||
for !s.cfg.ForkChoiceStore.HasNode(parentRoot) && s.cfg.BeaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
|
||||
b, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pendingNodes = append(pendingNodes, b.Block)
|
||||
pendingNodes = append(pendingNodes, b.Block())
|
||||
copiedRoot := parentRoot
|
||||
pendingRoots = append(pendingRoots, copiedRoot)
|
||||
parentRoot = bytesutil.ToBytes32(b.Block.ParentRoot)
|
||||
slot = b.Block.Slot
|
||||
parentRoot = bytesutil.ToBytes32(b.Block().ParentRoot())
|
||||
slot = b.Block().Slot()
|
||||
higherThanFinalized = slot > fSlot
|
||||
}
|
||||
|
||||
@@ -393,8 +407,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.
|
||||
for i := len(pendingNodes) - 1; i >= 0; i-- {
|
||||
b := pendingNodes[i]
|
||||
r := pendingRoots[i]
|
||||
if err := s.forkChoiceStore.ProcessBlock(ctx,
|
||||
b.Slot, r, bytesutil.ToBytes32(b.ParentRoot), bytesutil.ToBytes32(b.Body.Graffiti),
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()), bytesutil.ToBytes32(b.Body().Graffiti()),
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
@@ -404,15 +418,37 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.
|
||||
return nil
|
||||
}
|
||||
|
||||
// inserts finalized deposits into our finalized deposit trie.
|
||||
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertFinalizedDeposits")
|
||||
defer span.End()
|
||||
|
||||
// Update deposit cache.
|
||||
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not fetch finalized state")
|
||||
}
|
||||
// We update the cache up to the last deposit index in the finalized block's state.
|
||||
// We can be confident that these deposits will be included in some block
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
|
||||
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The deletes input attestations from the attestation pool, so proposers don't include them in a block for the future.
|
||||
func (s *Service) deletePoolAtts(atts []*ethpb.Attestation) error {
|
||||
for _, att := range atts {
|
||||
if helpers.IsAggregated(att) {
|
||||
if err := s.attPool.DeleteAggregatedAttestation(att); err != nil {
|
||||
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.attPool.DeleteUnaggregatedAttestation(att); err != nil {
|
||||
if err := s.cfg.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,27 +3,34 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
)
|
||||
|
||||
func TestStore_OnBlock(t *testing.T) {
|
||||
@@ -39,29 +46,30 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st := testutil.NewBeaconState()
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(beaconDB, validGenesisRoot[:])
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
random := testutil.NewBeaconBlock()
|
||||
random.Block.Slot = 1
|
||||
random.Block.ParentRoot = validGenesisRoot[:]
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, random))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(random)))
|
||||
randomParentRoot, err := random.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
|
||||
randomParentRoot2 := roots[1]
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk *ethpb.SignedBeaconBlock
|
||||
s *stateTrie.BeaconState
|
||||
s state.BeaconState
|
||||
time uint64
|
||||
wantErrString string
|
||||
}{
|
||||
@@ -76,7 +84,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
blk: func() *ethpb.SignedBeaconBlock {
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.ParentRoot = randomParentRoot2
|
||||
b.Block.Slot = params.BeaconConfig().FarFutureEpoch
|
||||
b.Block.Slot = params.BeaconConfig().FarFutureSlot
|
||||
return b
|
||||
}(),
|
||||
s: st.Copy(),
|
||||
@@ -115,7 +123,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
|
||||
root, err := tt.blk.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
err = service.onBlock(ctx, tt.blk, root)
|
||||
err = service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.blk), root)
|
||||
assert.ErrorContains(t, tt.wantErrString, err)
|
||||
})
|
||||
}
|
||||
@@ -134,40 +142,42 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Root: gRoot[:],
|
||||
}
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
service.saveInitSyncBlock(gRoot, genesis)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
|
||||
|
||||
st, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
|
||||
bState := st.Copy()
|
||||
|
||||
var blks []*ethpb.SignedBeaconBlock
|
||||
var blks []block.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var firstState *stateTrie.BeaconState
|
||||
var firstState state.BeaconState
|
||||
for i := 1; i < 10; i++ {
|
||||
b, err := testutil.GenerateFullBlock(bState, keys, testutil.DefaultBlockGenConfig(), uint64(i))
|
||||
b, err := testutil.GenerateFullBlock(bState, keys, testutil.DefaultBlockGenConfig(), types.Slot(i))
|
||||
require.NoError(t, err)
|
||||
bState, err = state.ExecuteStateTransition(ctx, bState, b)
|
||||
bState, err = transition.ExecuteStateTransition(ctx, bState, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
require.NoError(t, err)
|
||||
if i == 1 {
|
||||
firstState = bState.Copy()
|
||||
}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(root, b)
|
||||
blks = append(blks, b)
|
||||
service.saveInitSyncBlock(root, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
blks = append(blks, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
blkRoots = append(blkRoots, root)
|
||||
}
|
||||
|
||||
blks[0].Block.ParentRoot = gRoot[:]
|
||||
rBlock, err := blks[0].PbPhase0Block()
|
||||
assert.NoError(t, err)
|
||||
rBlock.Block.ParentRoot = gRoot[:]
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
|
||||
require.NoError(t, service.stateGen.SaveState(ctx, blkRoots[0], firstState))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
|
||||
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -195,10 +205,10 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
|
||||
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, newJustifiedBlk))
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, lastJustifiedBlk))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
|
||||
|
||||
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
||||
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
||||
update, err = service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
@@ -223,10 +233,10 @@ func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
|
||||
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, newJustifiedBlk))
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, lastJustifiedBlk))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
|
||||
|
||||
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
||||
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
||||
|
||||
@@ -246,26 +256,26 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Root: gRoot[:],
|
||||
}
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
service.saveInitSyncBlock(gRoot, genesis)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ParentRoot = gRoot[:]
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: gRoot[:]}))
|
||||
require.NoError(t, service.stateGen.SaveState(ctx, gRoot, s))
|
||||
require.NoError(t, service.verifyBlkPreState(ctx, b.Block))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: 1, Root: gRoot[:]}))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
|
||||
require.NoError(t, service.verifyBlkPreState(ctx, wrapper.WrappedPhase0BeaconBlock(b.Block)))
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
@@ -281,28 +291,28 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Root: gRoot[:],
|
||||
}
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
service.saveInitSyncBlock(gRoot, genesis)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
err = service.verifyBlkPreState(ctx, b.Block)
|
||||
err = service.verifyBlkPreState(ctx, wrapper.WrappedPhase0BeaconBlock(b.Block))
|
||||
wanted := "could not reconstruct parent state"
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
b.Block.ParentRoot = gRoot[:]
|
||||
s, err := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 1})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Slot: 1})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: gRoot[:]}))
|
||||
require.NoError(t, service.stateGen.SaveState(ctx, gRoot, s))
|
||||
require.NoError(t, service.verifyBlkPreState(ctx, b.Block))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: 1, Root: gRoot[:]}))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
|
||||
require.NoError(t, service.verifyBlkPreState(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b).Block()))
|
||||
}
|
||||
|
||||
func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
@@ -314,16 +324,18 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
signedBlock := testutil.NewBeaconBlock()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, signedBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(signedBlock)))
|
||||
r, err := signedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st.Copy(), r))
|
||||
|
||||
// Could update
|
||||
s := testutil.NewBeaconState()
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetCurrentJustifiedCheckpoint(ðpb.Checkpoint{Epoch: 1, Root: r[:]}))
|
||||
require.NoError(t, service.updateJustified(context.Background(), s))
|
||||
|
||||
@@ -333,7 +345,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
service.bestJustifiedCheckpt.Epoch = 2
|
||||
require.NoError(t, service.updateJustified(context.Background(), s))
|
||||
|
||||
assert.Equal(t, uint64(2), service.bestJustifiedCheckpt.Epoch, "Incorrect justified epoch in service")
|
||||
assert.Equal(t, types.Epoch(2), service.bestJustifiedCheckpt.Epoch, "Incorrect justified epoch in service")
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
@@ -343,18 +355,19 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
@@ -363,14 +376,14 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
block.Block.ParentRoot = roots[8]
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), block.Block, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 5, len(service.forkChoiceStore.Nodes()), "Miss match nodes")
|
||||
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
|
||||
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
|
||||
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
|
||||
assert.Equal(t, 5, len(service.cfg.ForkChoiceStore.Nodes()), "Miss match nodes")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
@@ -380,18 +393,19 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
@@ -400,16 +414,16 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
block.Block.ParentRoot = roots[8]
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), block.Block, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 5, len(service.forkChoiceStore.Nodes()), "Miss match nodes")
|
||||
assert.Equal(t, 5, len(service.cfg.ForkChoiceStore.Nodes()), "Miss match nodes")
|
||||
// Ensure all roots and their respective blocks exist.
|
||||
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
|
||||
for i, rt := range wantedRoots {
|
||||
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
|
||||
assert.Equal(t, true, service.beaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
|
||||
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -420,46 +434,47 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
// Set finalized epoch to 1.
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1}
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
|
||||
// Define a tree branch, slot 63 <- 64 <- 65
|
||||
b63 := testutil.NewBeaconBlock()
|
||||
b63.Block.Slot = 63
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b63))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b63)))
|
||||
r63, err := b63.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b64 := testutil.NewBeaconBlock()
|
||||
b64.Block.Slot = 64
|
||||
b64.Block.ParentRoot = r63[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b64))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b64)))
|
||||
r64, err := b64.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b65 := testutil.NewBeaconBlock()
|
||||
b65.Block.Slot = 65
|
||||
b65.Block.ParentRoot = r64[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b65))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b65)))
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), b65.Block, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b65).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be 2 nodes, block 65 and block 64.
|
||||
assert.Equal(t, 2, len(service.forkChoiceStore.Nodes()), "Miss match nodes")
|
||||
assert.Equal(t, 2, len(service.cfg.ForkChoiceStore.Nodes()), "Miss match nodes")
|
||||
|
||||
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
|
||||
assert.Equal(t, true, service.forkChoiceStore.HasNode(r63), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r63), "Didn't save node")
|
||||
}
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
@@ -467,7 +482,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
// (B1, and B3 are all from the same slots)
|
||||
func blockTree1(beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
genesisRoot = bytesutil.PadTo(genesisRoot, 32)
|
||||
b0 := testutil.NewBeaconBlock()
|
||||
b0.Block.Slot = 0
|
||||
@@ -525,13 +540,14 @@ func blockTree1(beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, b := range []*ethpb.SignedBeaconBlock{b0, b1, b3, b4, b5, b6, b7, b8} {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
if err := beaconDB.SaveBlock(context.Background(), beaconBlock); err != nil {
|
||||
if err := beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
|
||||
@@ -551,10 +567,10 @@ func blockTree1(beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
}
|
||||
|
||||
func TestCurrentSlot_HandlesOverflow(t *testing.T) {
|
||||
svc := Service{genesisTime: timeutils.Now().Add(1 * time.Hour)}
|
||||
svc := Service{genesisTime: prysmTime.Now().Add(1 * time.Hour)}
|
||||
|
||||
slot := svc.CurrentSlot()
|
||||
require.Equal(t, uint64(0), slot, "Unexpected slot")
|
||||
require.Equal(t, types.Slot(0), slot, "Unexpected slot")
|
||||
}
|
||||
func TestAncestorByDB_CtxErr(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -593,7 +609,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), beaconBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock)))
|
||||
}
|
||||
|
||||
// Slots 100 to 200 are skip slots. Requesting root at 150 will yield root at 100. The last physical block.
|
||||
@@ -638,7 +654,7 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), [32]byte{}, 0, 0)) // Saves blocks to fork choice store.
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), [32]byte{}, 0, 0)) // Saves blocks to fork choice store.
|
||||
}
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
@@ -675,10 +691,10 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), beaconBlock)) // Saves blocks to DB.
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock))) // Saves blocks to DB.
|
||||
}
|
||||
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, [32]byte{}, 0, 0))
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
@@ -741,14 +757,16 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
beaconState := testutil.NewBeaconState()
|
||||
beaconState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(test.args.stateCheckPoint))
|
||||
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
require.NoError(t, err)
|
||||
service.justifiedCheckpt = test.args.cachedCheckPoint
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
|
||||
genesisState := testutil.NewBeaconState()
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, genesisState, bytesutil.ToBytes32(test.want.Root)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
|
||||
genesisState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, bytesutil.ToBytes32(test.want.Root)))
|
||||
|
||||
if test.args.diffFinalizedCheckPoint {
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
@@ -765,14 +783,14 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), beaconBlock))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock)))
|
||||
}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: []byte{'c'}, Epoch: 1}
|
||||
service.justifiedCheckpt.Root = r100[:]
|
||||
}
|
||||
|
||||
require.NoError(t, service.finalizedImpliesNewJustified(ctx, beaconState))
|
||||
assert.Equal(t, true, attestationutil.CheckPointIsEqual(test.want, service.justifiedCheckpt), "Did not get wanted check point")
|
||||
assert.Equal(t, true, attestation.CheckPointIsEqual(test.want, service.justifiedCheckpt), "Did not get wanted check point")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -784,14 +802,14 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
b.Block.Slot = 1
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
b1.Block.Body.Graffiti = bytesutil.PadTo([]byte{'a'}, 32)
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b1))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b1)))
|
||||
|
||||
type args struct {
|
||||
parentRoot [32]byte
|
||||
@@ -858,11 +876,11 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
gBlk := testutil.NewBeaconBlock()
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, gBlk))
|
||||
require.NoError(t, service.beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: gRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(gBlk)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: gRoot[:]}))
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, beaconState, gRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, beaconState, gRoot))
|
||||
service.genesisRoot = gRoot
|
||||
currentCp := ðpb.Checkpoint{Epoch: 1}
|
||||
service.justifiedCheckpt = currentCp
|
||||
@@ -870,11 +888,11 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.updateJustifiedInitSync(ctx, newCp))
|
||||
|
||||
assert.DeepEqual(t, currentCp, service.prevJustifiedCheckpt, "Incorrect previous justified checkpoint")
|
||||
assert.DeepEqual(t, newCp, service.CurrentJustifiedCheckpt(), "Incorrect current justified checkpoint in cache")
|
||||
cp, err := service.beaconDB.JustifiedCheckpoint(ctx)
|
||||
assert.DeepSSZEqual(t, currentCp, service.prevJustifiedCheckpt, "Incorrect previous justified checkpoint")
|
||||
assert.DeepSSZEqual(t, newCp, service.CurrentJustifiedCheckpt(), "Incorrect current justified checkpoint in cache")
|
||||
cp, err := service.cfg.BeaconDB.JustifiedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, newCp, cp, "Incorrect current justified checkpoint in db")
|
||||
assert.DeepSSZEqual(t, newCp, cp, "Incorrect current justified checkpoint in db")
|
||||
}
|
||||
|
||||
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
@@ -883,9 +901,11 @@ func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := testutil.NewBeaconState()
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
service.head = &head{}
|
||||
service.head = &head{state: (*v1.BeaconState)(nil)}
|
||||
|
||||
require.ErrorContains(t, "failed to initialize precompute: nil inner state", service.handleEpochBoundary(ctx, s))
|
||||
}
|
||||
|
||||
@@ -912,28 +932,118 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := testutil.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gBlk, err := service.beaconDB.GenesisBlock(ctx)
|
||||
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
|
||||
testState := gs.Copy()
|
||||
for i := uint64(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
blk, err := testutil.GenerateFullBlock(testState, keys, testutil.DefaultBlockGenConfig(), i)
|
||||
require.NoError(t, err)
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, blk, r))
|
||||
testState, err = service.stateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r))
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, uint64(3), service.CurrentJustifiedCheckpt().Epoch)
|
||||
require.Equal(t, uint64(2), service.FinalizedCheckpt().Epoch)
|
||||
require.Equal(t, types.Epoch(3), service.CurrentJustifiedCheckpt().Epoch)
|
||||
require.Equal(t, types.Epoch(2), service.FinalizedCheckpt().Epoch)
|
||||
}
|
||||
|
||||
func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
zeroSig := [96]byte{}
|
||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
root := []byte(strconv.Itoa(int(i)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.FromBytes48([48]byte{}),
|
||||
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
|
||||
Amount: 0,
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
}
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 9, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(109))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveBlockAttestationsInPool_Canonical(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
CorrectlyPruneCanonicalAtts: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
b, err := testutil.GenerateFullBlock(genesis, keys, testutil.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, r))
|
||||
|
||||
atts := b.Block.Body.Attestations
|
||||
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
|
||||
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
func TestRemoveBlockAttestationsInPool_NonCanonical(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
CorrectlyPruneCanonicalAtts: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
b, err := testutil.GenerateFullBlock(genesis, keys, testutil.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
atts := b.Block.Body.Attestations
|
||||
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
|
||||
require.NoError(t, service.pruneCanonicalAttsFromPool(ctx, r, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
require.Equal(t, 1, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
@@ -7,13 +7,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -21,7 +22,7 @@ import (
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
type AttestationReceiver interface {
|
||||
ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error
|
||||
AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*state.BeaconState, error)
|
||||
AttestationPreState(ctx context.Context, att *ethpb.Attestation) (state.BeaconState, error)
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
|
||||
VerifyFinalizedConsistency(ctx context.Context, root []byte) error
|
||||
}
|
||||
@@ -35,26 +36,20 @@ func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Att
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.ReceiveAttestationNoPubsub")
|
||||
defer span.End()
|
||||
|
||||
_, err := s.onAttestation(ctx, att)
|
||||
if err != nil {
|
||||
if err := s.onAttestation(ctx, att); err != nil {
|
||||
return errors.Wrap(err, "could not process attestation")
|
||||
}
|
||||
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttestationPreState returns the pre state of attestation.
|
||||
func (s *Service) AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*state.BeaconState, error) {
|
||||
ss, err := helpers.StartSlot(att.Data.Target.Epoch)
|
||||
func (s *Service) AttestationPreState(ctx context.Context, att *ethpb.Attestation) (state.BeaconState, error) {
|
||||
ss, err := core.StartSlot(att.Data.Target.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.ValidateSlotClock(ss, uint64(s.genesisTime.Unix())); err != nil {
|
||||
if err := core.ValidateSlotClock(ss, uint64(s.genesisTime.Unix())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.getAttPreState(ctx, att.Data.Target)
|
||||
@@ -62,7 +57,18 @@ func (s *Service) AttestationPreState(ctx context.Context, att *ethpb.Attestatio
|
||||
|
||||
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
|
||||
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestation) error {
|
||||
return s.verifyLMDFFGConsistent(ctx, a.Data.Target.Epoch, a.Data.Target.Root, a.Data.BeaconBlockRoot)
|
||||
targetSlot, err := core.StartSlot(a.Data.Target.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r, err := s.ancestor(ctx, a.Data.BeaconBlockRoot, targetSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(a.Data.Target.Root, r) {
|
||||
return errors.New("FFG and LMD votes are not consistent")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyFinalizedConsistency verifies input root is consistent with finalized store.
|
||||
@@ -71,12 +77,12 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
|
||||
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
|
||||
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
|
||||
// In this case, we could exit early to save on additional computation.
|
||||
if s.forkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
|
||||
if s.cfg.ForkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
f := s.FinalizedCheckpt()
|
||||
ss, err := helpers.StartSlot(f.Epoch)
|
||||
ss, err := core.StartSlot(f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -91,11 +97,11 @@ func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// This processes attestations from the attestation pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
|
||||
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- struct{}) {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
subscribedToStateEvents <- struct{}{}
|
||||
<-stateChannel
|
||||
stateSub.Unsubscribe()
|
||||
@@ -108,47 +114,59 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
|
||||
st := slotutil.GetSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-st.C():
|
||||
ctx := s.ctx
|
||||
atts := s.attPool.ForkchoiceAttestations()
|
||||
for _, a := range atts {
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.Data.Slot + 1
|
||||
if err := helpers.VerifySlotTime(uint64(s.genesisTime.Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
hasState := s.beaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
if !(hasState && hasBlock) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.attPool.DeleteForkchoiceAttestation(a); err != nil {
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.ReceiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": a.Data.Slot,
|
||||
"committeeIndex": a.Data.CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.Target.Root)),
|
||||
"aggregationCount": a.AggregationBits.Count(),
|
||||
}).WithError(err).Warn("Could not receive attestation in chain service")
|
||||
}
|
||||
// Continue when there's no fork choice attestation, there's nothing to process and update head.
|
||||
// This covers the condition when the node is still initial syncing to the head of the chain.
|
||||
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
|
||||
continue
|
||||
}
|
||||
s.processAttestations(s.ctx)
|
||||
if err := s.updateHead(s.ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestations(ctx context.Context) {
|
||||
atts := s.cfg.AttPool.ForkchoiceAttestations()
|
||||
for _, a := range atts {
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/fork-choice.md#validate_on_attestation
|
||||
nextSlot := a.Data.Slot + 1
|
||||
if err := core.VerifySlotTime(uint64(s.genesisTime.Unix()), nextSlot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
if !(hasState && hasBlock) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.cfg.AttPool.DeleteForkchoiceAttestation(a); err != nil {
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
if !helpers.VerifyCheckpointEpoch(a.Data.Target, s.genesisTime) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.ReceiveAttestationNoPubsub(ctx, a); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": a.Data.Slot,
|
||||
"committeeIndex": a.Data.CommitteeIndex,
|
||||
"beaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.BeaconBlockRoot)),
|
||||
"targetRoot": fmt.Sprintf("%#x", bytesutil.Trunc(a.Data.Target.Root)),
|
||||
"aggregationCount": a.AggregationBits.Count(),
|
||||
}).WithError(err).Warn("Could not process attestation for fork choice")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,11 +5,22 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
@@ -19,7 +30,94 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
chainService.genesisTime = time.Now()
|
||||
|
||||
e := helpers.MaxSlotBuffer/params.BeaconConfig().SlotsPerEpoch + 1
|
||||
e := types.Epoch(core.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
|
||||
_, err := chainService.AttestationPreState(context.Background(), ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Epoch: e}}})
|
||||
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := "FFG and LMD votes are not consistent"
|
||||
a := testutil.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = []byte{'a'}
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(context.Background(), a))
|
||||
}
|
||||
|
||||
func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
a := testutil.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = r32[:]
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
err = service.VerifyLmdFfgConsistency(context.Background(), a)
|
||||
require.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
|
||||
}
|
||||
|
||||
func TestProcessAttestations_Ok(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
AttPool: attestations.NewPool(),
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
require.NoError(t, err)
|
||||
genesisState, pks := testutil.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := testutil.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
service.processAttestations(ctx)
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
|
||||
}
|
||||
|
||||
@@ -4,24 +4,24 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/time"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
|
||||
var epochsSinceFinalitySaveHotStateDB = 100
|
||||
var epochsSinceFinalitySaveHotStateDB = types.Epoch(100)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service receive and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error
|
||||
ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedBeaconBlock, blkRoots [][32]byte) error
|
||||
ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, blockRoot [32]byte) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBeaconBlock, blkRoots [][32]byte) error
|
||||
HasInitSyncBlock(root [32]byte) bool
|
||||
}
|
||||
|
||||
@@ -30,36 +30,43 @@ type BlockReceiver interface {
|
||||
// 1. Validate block, apply state transition and update check points
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
blockCopy := stateTrie.CopySignedBeaconBlock(block)
|
||||
receivedTime := time.Now()
|
||||
blockCopy := block.Copy()
|
||||
|
||||
// Apply state transition on the new block.
|
||||
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
traceutil.AnnotateError(span, err)
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Update and save head block after fork choice.
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
if !features.Get().UpdateHeadTimely {
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
|
||||
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, block); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block().Slot(),
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: blockCopy,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block.Slot,
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: blockCopy,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Handle post block operations such as attestations and exits.
|
||||
if err := s.handlePostBlockOperations(blockCopy.Block); err != nil {
|
||||
if err := s.handlePostBlockOperations(blockCopy.Block()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -69,51 +76,14 @@ func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlo
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
|
||||
// Log block sync status.
|
||||
logBlockSyncStatus(blockCopy.Block, blockRoot, s.finalizedCheckpt)
|
||||
|
||||
// Log state transition data.
|
||||
logStateTransitionData(blockCopy.Block)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockInitialSync processes the input block for the purpose of initial syncing.
|
||||
// This method should only be used on blocks during initial syncing phase.
|
||||
func (s *Service) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockNoVerify")
|
||||
defer span.End()
|
||||
blockCopy := stateTrie.CopySignedBeaconBlock(block)
|
||||
|
||||
// Apply state transition on the new block.
|
||||
if err := s.onBlockInitialSyncStateTransition(ctx, blockCopy, blockRoot); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
traceutil.AnnotateError(span, err)
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, s.finalizedCheckpt, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block.Slot,
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: blockCopy,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
|
||||
// Log state transition data.
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blockCopy.Block.Slot,
|
||||
"attestations": len(blockCopy.Block.Body.Attestations),
|
||||
"deposits": len(blockCopy.Block.Body.Deposits),
|
||||
}).Debug("Finished applying state transition")
|
||||
logStateTransitionData(blockCopy.Block())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -121,7 +91,7 @@ func (s *Service) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.Sign
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedBeaconBlock, blkRoots [][32]byte) error {
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBeaconBlock, blkRoots [][32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -129,21 +99,21 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedB
|
||||
fCheckpoints, jCheckpoints, err := s.onBlockBatch(ctx, blocks, blkRoots)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
traceutil.AnnotateError(span, err)
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
for i, b := range blocks {
|
||||
blockCopy := stateTrie.CopySignedBeaconBlock(b)
|
||||
blockCopy := b.Copy()
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block.Slot,
|
||||
Slot: blockCopy.Block().Slot(),
|
||||
BlockRoot: blkRoots[i],
|
||||
SignedBlock: blockCopy,
|
||||
Verified: true,
|
||||
@@ -151,7 +121,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedB
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
}
|
||||
|
||||
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
|
||||
@@ -169,25 +139,25 @@ func (s *Service) HasInitSyncBlock(root [32]byte) bool {
|
||||
return s.hasInitSyncBlock(root)
|
||||
}
|
||||
|
||||
func (s *Service) handlePostBlockOperations(b *ethpb.BeaconBlock) error {
|
||||
func (s *Service) handlePostBlockOperations(b block.BeaconBlock) error {
|
||||
// Delete the processed block attestations from attestation pool.
|
||||
if err := s.deletePoolAtts(b.Body.Attestations); err != nil {
|
||||
if err := s.deletePoolAtts(b.Body().Attestations()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add block attestations to the fork choice pool to compute head.
|
||||
if err := s.attPool.SaveBlockAttestations(b.Body.Attestations); err != nil {
|
||||
if err := s.cfg.AttPool.SaveBlockAttestations(b.Body().Attestations()); err != nil {
|
||||
log.Errorf("Could not save block attestations for fork choice: %v", err)
|
||||
return nil
|
||||
}
|
||||
// Mark block exits as seen so we don't include same ones in future blocks.
|
||||
for _, e := range b.Body.VoluntaryExits {
|
||||
s.exitPool.MarkIncluded(e)
|
||||
for _, e := range b.Body().VoluntaryExits() {
|
||||
s.cfg.ExitPool.MarkIncluded(e)
|
||||
}
|
||||
|
||||
// Mark attester slashings as seen so we don't include same ones in future blocks.
|
||||
for _, as := range b.Body.AttesterSlashings {
|
||||
s.slashingPool.MarkIncludedAttesterSlashing(as)
|
||||
for _, as := range b.Body().AttesterSlashings() {
|
||||
s.cfg.SlashingPool.MarkIncludedAttesterSlashing(as)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -195,17 +165,17 @@ func (s *Service) handlePostBlockOperations(b *ethpb.BeaconBlock) error {
|
||||
// This checks whether it's time to start saving hot state to DB.
|
||||
// It's time when there's `epochsSinceFinalitySaveHotStateDB` epochs of non-finality.
|
||||
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
currentEpoch := helpers.SlotToEpoch(s.CurrentSlot())
|
||||
currentEpoch := core.SlotToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality uint64
|
||||
var sinceFinality types.Epoch
|
||||
if currentEpoch > s.finalizedCheckpt.Epoch {
|
||||
sinceFinality = currentEpoch - s.finalizedCheckpt.Epoch
|
||||
}
|
||||
|
||||
if sinceFinality >= uint64(epochsSinceFinalitySaveHotStateDB) {
|
||||
s.stateGen.EnableSaveHotStateToDB(ctx)
|
||||
if sinceFinality >= epochsSinceFinalitySaveHotStateDB {
|
||||
s.cfg.StateGen.EnableSaveHotStateToDB(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.stateGen.DisableSaveHotStateToDB(ctx)
|
||||
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
|
||||
}
|
||||
|
||||
@@ -6,13 +6,18 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
@@ -25,7 +30,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot uint64) *ethpb.SignedBeaconBlock {
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
|
||||
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
|
||||
assert.NoError(t, err)
|
||||
return blk
|
||||
@@ -52,7 +57,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
if hs := s.head.state.Slot(); hs != 2 {
|
||||
t.Errorf("Unexpected state slot. Got %d but wanted %d", hs, 2)
|
||||
}
|
||||
if bs := s.head.block.Block.Slot; bs != 2 {
|
||||
if bs := s.head.block.Block().Slot(); bs != 2 {
|
||||
t.Errorf("Unexpected head block slot. Got %d but wanted %d", bs, 2)
|
||||
}
|
||||
},
|
||||
@@ -72,7 +77,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
if baCount := len(s.attPool.BlockAttestations()); baCount != 2 {
|
||||
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 2 {
|
||||
t.Errorf("Did not get the correct number of block attestations saved to the pool. "+
|
||||
"Got %d but wanted %d", baCount, 2)
|
||||
}
|
||||
@@ -92,7 +97,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
pending := s.exitPool.PendingExits(genesis, 1, true /* no limit */)
|
||||
pending := s.cfg.ExitPool.PendingExits(genesis, 1, true /* no limit */)
|
||||
if len(pending) != 0 {
|
||||
t.Errorf(
|
||||
"Did not mark the correct number of exits. Got %d pending but wanted %d",
|
||||
@@ -108,7 +113,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
},
|
||||
@@ -136,14 +141,14 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.beaconDB.GenesisBlock(ctx)
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.ReceiveBlock(ctx, tt.args.block, root)
|
||||
err = s.ReceiveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.args.block), root)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
@@ -177,9 +182,9 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.beaconDB.GenesisBlock(ctx)
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
@@ -187,103 +192,22 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
require.NoError(t, s.ReceiveBlock(ctx, b, root))
|
||||
require.NoError(t, s.ReceiveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b), root))
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
// Verify fork choice has processed the block. (Genesis block and the new block)
|
||||
assert.Equal(t, 2, len(s.forkChoiceStore.Nodes()))
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockInitialSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot uint64) *ethpb.SignedBeaconBlock {
|
||||
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
|
||||
assert.NoError(t, err)
|
||||
return blk
|
||||
}
|
||||
|
||||
type args struct {
|
||||
block *ethpb.SignedBeaconBlock
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantedErr string
|
||||
check func(*testing.T, *Service)
|
||||
}{
|
||||
{
|
||||
name: "applies block with state transition",
|
||||
args: args{
|
||||
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 2 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, uint64(2), s.head.state.Slot(), "Incorrect head state slot")
|
||||
assert.Equal(t, uint64(2), s.head.block.Block.Slot, "Incorrect head block slot")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "notifies block processed on state feed",
|
||||
args: args{
|
||||
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
gBlk, err := s.beaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.ReceiveBlockInitialSync(ctx, tt.args.block, root)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
tt.check(t, s)
|
||||
}
|
||||
})
|
||||
}
|
||||
assert.Equal(t, 2, len(s.cfg.ForkChoiceStore.Nodes()))
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot uint64) *ethpb.SignedBeaconBlock {
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
|
||||
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
|
||||
assert.NoError(t, err)
|
||||
return blk
|
||||
@@ -304,8 +228,8 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 2 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, uint64(2), s.head.state.Slot(), "Incorrect head state slot")
|
||||
assert.Equal(t, uint64(2), s.head.block.Block.Slot, "Incorrect head block slot")
|
||||
assert.Equal(t, types.Slot(2), s.head.state.Slot(), "Incorrect head state slot")
|
||||
assert.Equal(t, types.Slot(2), s.head.block.Block().Slot(), "Incorrect head block slot")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -314,7 +238,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
},
|
||||
@@ -340,15 +264,15 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
gBlk, err := s.beaconDB.GenesisBlock(ctx)
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
blks := []*ethpb.SignedBeaconBlock{tt.args.block}
|
||||
blks := []block.SignedBeaconBlock{wrapper.WrappedPhase0SignedBeaconBlock(tt.args.block)}
|
||||
roots := [][32]byte{root}
|
||||
err = s.ReceiveBlockBatch(ctx, blks, roots)
|
||||
if tt.wantedErr != "" {
|
||||
@@ -361,6 +285,62 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockBatch_UpdateFinalizedCheckpoint(t *testing.T) {
|
||||
// Must enable head timely feature flag to test this.
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
UpdateHeadTimely: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
ctx := context.Background()
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
|
||||
// Generate 5 epochs worth of blocks.
|
||||
var blks []block.SignedBeaconBlock
|
||||
var roots [][32]byte
|
||||
copied := genesis.Copy()
|
||||
for i := types.Slot(1); i < params.BeaconConfig().SlotsPerEpoch*5; i++ {
|
||||
b, err := testutil.GenerateFullBlock(copied, keys, testutil.DefaultBlockGenConfig(), i)
|
||||
assert.NoError(t, err)
|
||||
copied, err = transition.ExecuteStateTransition(context.Background(), copied, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
assert.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
roots = append(roots, r)
|
||||
}
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: false},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
|
||||
// Process 5 epochs worth of blocks.
|
||||
require.NoError(t, s.ReceiveBlockBatch(ctx, blks, roots))
|
||||
|
||||
// Finalized epoch must be updated.
|
||||
require.Equal(t, types.Epoch(2), s.finalizedCheckpt.Epoch)
|
||||
}
|
||||
|
||||
func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
s, err := NewService(context.Background(), &Config{StateNotifier: &blockchainTesting.MockStateNotifier{}})
|
||||
require.NoError(t, err)
|
||||
@@ -368,7 +348,7 @@ func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
if s.HasInitSyncBlock(r) {
|
||||
t.Error("Should not have block")
|
||||
}
|
||||
s.saveInitSyncBlock(r, testutil.NewBeaconBlock())
|
||||
s.saveInitSyncBlock(r, wrapper.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()))
|
||||
if !s.HasInitSyncBlock(r) {
|
||||
t.Error("Should have block")
|
||||
}
|
||||
@@ -379,7 +359,7 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch * uint64(epochsSinceFinalitySaveHotStateDB)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Package blockchain defines the life-cycle of the blockchain at the core of
|
||||
// eth2, including processing of new blocks and attestations using casper
|
||||
// proof of stake.
|
||||
// Ethereum, including processing of new blocks and attestations using proof of stake.
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
@@ -11,16 +10,15 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
@@ -28,12 +26,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -45,58 +45,44 @@ const headSyncMinEpochsAfterCheckpoint = 128
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.HeadAccessDatabase
|
||||
depositCache *depositcache.DepositCache
|
||||
chainStartFetcher powchain.ChainStartFetcher
|
||||
attPool attestations.Pool
|
||||
slashingPool *slashings.Pool
|
||||
exitPool *voluntaryexits.Pool
|
||||
genesisTime time.Time
|
||||
p2p p2p.Broadcaster
|
||||
maxRoutines int
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
stateNotifier statefeed.Notifier
|
||||
genesisRoot [32]byte
|
||||
forkChoiceStore f.ForkChoicer
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
prevJustifiedCheckpt *ethpb.Checkpoint
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
nextEpochBoundarySlot uint64
|
||||
nextEpochBoundarySlot types.Slot
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
stateGen *stategen.State
|
||||
opsService *attestations.Service
|
||||
initSyncBlocks map[[32]byte]*ethpb.SignedBeaconBlock
|
||||
initSyncBlocks map[[32]byte]block.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances []uint64
|
||||
justifiedBalancesLock sync.RWMutex
|
||||
wsEpoch uint64
|
||||
wsRoot []byte
|
||||
wsVerified bool
|
||||
}
|
||||
|
||||
// Config options for the service.
|
||||
type Config struct {
|
||||
BeaconBlockBuf int
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache *depositcache.DepositCache
|
||||
AttPool attestations.Pool
|
||||
ExitPool *voluntaryexits.Pool
|
||||
SlashingPool *slashings.Pool
|
||||
P2p p2p.Broadcaster
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
OpsService *attestations.Service
|
||||
StateGen *stategen.State
|
||||
WspBlockRoot []byte
|
||||
WspEpoch uint64
|
||||
BeaconBlockBuf int
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache *depositcache.DepositCache
|
||||
AttPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
AttService *attestations.Service
|
||||
StateGen *stategen.State
|
||||
WeakSubjectivityCheckpt *ethpb.Checkpoint
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
@@ -104,26 +90,13 @@ type Config struct {
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
depositCache: cfg.DepositCache,
|
||||
chainStartFetcher: cfg.ChainStartFetcher,
|
||||
attPool: cfg.AttPool,
|
||||
exitPool: cfg.ExitPool,
|
||||
slashingPool: cfg.SlashingPool,
|
||||
p2p: cfg.P2p,
|
||||
maxRoutines: cfg.MaxRoutines,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
forkChoiceStore: cfg.ForkChoiceStore,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
opsService: cfg.OpsService,
|
||||
stateGen: cfg.StateGen,
|
||||
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
|
||||
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
|
||||
justifiedBalances: make([]uint64, 0),
|
||||
wsEpoch: cfg.WspEpoch,
|
||||
wsRoot: cfg.WspBlockRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -132,7 +105,7 @@ func (s *Service) Start() {
|
||||
// For running initial sync with state cache, in an event of restart, we use
|
||||
// last finalized check point as start point to sync instead of head
|
||||
// state. This is because we no longer save state every slot during sync.
|
||||
cp, err := s.beaconDB.FinalizedCheckpoint(s.ctx)
|
||||
cp, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
@@ -142,18 +115,18 @@ func (s *Service) Start() {
|
||||
// the finalized root is defined as zero hashes instead of genesis root hash.
|
||||
// We want to use genesis root to retrieve for state.
|
||||
if r == params.BeaconConfig().ZeroHash {
|
||||
genesisBlock, err := s.beaconDB.GenesisBlock(s.ctx)
|
||||
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
if genesisBlock != nil {
|
||||
r, err = genesisBlock.Block.HashTreeRoot()
|
||||
if genesisBlock != nil && !genesisBlock.IsNil() {
|
||||
r, err = genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not tree hash genesis block: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
beaconState, err := s.stateGen.StateByRoot(s.ctx, r)
|
||||
beaconState, err := s.cfg.StateGen.StateByRoot(s.ctx, r)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state by root: %v", err)
|
||||
}
|
||||
@@ -162,16 +135,16 @@ func (s *Service) Start() {
|
||||
attestationProcessorSubscribed := make(chan struct{}, 1)
|
||||
|
||||
// If the chain has already been initialized, simply start the block processing routine.
|
||||
if beaconState != nil {
|
||||
if beaconState != nil && !beaconState.IsNil() {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(beaconState.GenesisTime()), 0)
|
||||
s.opsService.SetGenesisTime(beaconState.GenesisTime())
|
||||
s.cfg.AttService.SetGenesisTime(beaconState.GenesisTime())
|
||||
if err := s.initializeChainInfo(s.ctx); err != nil {
|
||||
log.Fatalf("Could not set up chain info: %v", err)
|
||||
}
|
||||
|
||||
// We start a counter to genesis, if needed.
|
||||
gState, err := s.beaconDB.GenesisState(s.ctx)
|
||||
gState, err := s.cfg.BeaconDB.GenesisState(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not retrieve genesis state: %v", err)
|
||||
}
|
||||
@@ -179,37 +152,37 @@ func (s *Service) Start() {
|
||||
if err != nil {
|
||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
||||
}
|
||||
go slotutil.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
go slots.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
|
||||
justifiedCheckpoint, err := s.beaconDB.JustifiedCheckpoint(s.ctx)
|
||||
justifiedCheckpoint, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get justified checkpoint: %v", err)
|
||||
}
|
||||
finalizedCheckpoint, err := s.beaconDB.FinalizedCheckpoint(s.ctx)
|
||||
finalizedCheckpoint, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get finalized checkpoint: %v", err)
|
||||
}
|
||||
|
||||
// Resume fork choice.
|
||||
s.justifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.justifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(s.ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))); err != nil {
|
||||
log.Fatalf("Could not cache justified state balances: %v", err)
|
||||
}
|
||||
s.prevJustifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.bestJustifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.finalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.finalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.prevFinalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint)
|
||||
|
||||
ss, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
ss, err := core.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get start slot of finalized epoch: %v", err)
|
||||
}
|
||||
h := s.headBlock().Block
|
||||
if h.Slot > ss {
|
||||
h := s.headBlock().Block()
|
||||
if h.Slot() > ss {
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": ss,
|
||||
"endSlot": h.Slot,
|
||||
"endSlot": h.Slot(),
|
||||
}).Info("Loading blocks to fork choice store, this may take a while.")
|
||||
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, s.finalizedCheckpt, s.justifiedCheckpt); err != nil {
|
||||
log.Fatalf("Could not fill in fork choice store missing blocks: %v", err)
|
||||
@@ -221,7 +194,7 @@ func (s *Service) Start() {
|
||||
log.Fatalf("Could not verify weak subjectivity checkpoint: %v", err)
|
||||
}
|
||||
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: s.genesisTime,
|
||||
@@ -230,13 +203,13 @@ func (s *Service) Start() {
|
||||
})
|
||||
} else {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.chainStartFetcher == nil {
|
||||
if s.cfg.ChainStartFetcher == nil {
|
||||
log.Fatal("Not configured web3Service for POW chain")
|
||||
return // return need for TestStartUninitializedChainWithoutConfigPOWChain.
|
||||
}
|
||||
go func() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
<-attestationProcessorSubscribed
|
||||
for {
|
||||
@@ -263,14 +236,14 @@ func (s *Service) Start() {
|
||||
}()
|
||||
}
|
||||
|
||||
go s.processAttestation(attestationProcessorSubscribed)
|
||||
go s.processAttestationsRoutine(attestationProcessorSubscribed)
|
||||
}
|
||||
|
||||
// processChainStartTime initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.chainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.chainStartFetcher.ChainStartEth1Data())
|
||||
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
||||
if err != nil {
|
||||
log.Fatalf("Could not initialize beacon chain: %v", err)
|
||||
}
|
||||
@@ -279,11 +252,11 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
|
||||
if err != nil {
|
||||
log.Fatalf("Could not hash tree root genesis state: %v", err)
|
||||
}
|
||||
go slotutil.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
|
||||
|
||||
// We send out a state initialized event to the rest of the services
|
||||
// running in the beacon node.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: genesisTime,
|
||||
@@ -298,14 +271,14 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
|
||||
func (s *Service) initializeBeaconChain(
|
||||
ctx context.Context,
|
||||
genesisTime time.Time,
|
||||
preGenesisState *stateTrie.BeaconState,
|
||||
eth1data *ethpb.Eth1Data) (*stateTrie.BeaconState, error) {
|
||||
preGenesisState state.BeaconState,
|
||||
eth1data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.Service.initializeBeaconChain")
|
||||
defer span.End()
|
||||
s.genesisTime = genesisTime
|
||||
unixTime := uint64(genesisTime.Unix())
|
||||
|
||||
genesisState, err := state.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
genesisState, err := transition.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not initialize genesis state")
|
||||
}
|
||||
@@ -317,17 +290,17 @@ func (s *Service) initializeBeaconChain(
|
||||
log.Info("Initialized beacon chain genesis state")
|
||||
|
||||
// Clear out all pre-genesis data now that the state is initialized.
|
||||
s.chainStartFetcher.ClearPreGenesisData()
|
||||
s.cfg.ChainStartFetcher.ClearPreGenesisData()
|
||||
|
||||
// Update committee shuffled indices for genesis epoch.
|
||||
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
if err := helpers.UpdateProposerIndicesInCache(genesisState); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.opsService.SetGenesisTime(genesisState.GenesisTime())
|
||||
s.cfg.AttService.SetGenesisTime(genesisState.GenesisTime())
|
||||
|
||||
return genesisState, nil
|
||||
}
|
||||
@@ -336,14 +309,14 @@ func (s *Service) initializeBeaconChain(
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
|
||||
if s.stateGen != nil && s.head != nil && s.head.state != nil {
|
||||
if err := s.stateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
|
||||
if s.cfg.StateGen != nil && s.head != nil && s.head.state != nil {
|
||||
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Save initial sync cached blocks to the DB before stop.
|
||||
return s.beaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
|
||||
return s.cfg.BeaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
|
||||
}
|
||||
|
||||
// Status always returns nil unless there is an error condition that causes
|
||||
@@ -352,61 +325,43 @@ func (s *Service) Status() error {
|
||||
if s.genesisRoot == params.BeaconConfig().ZeroHash {
|
||||
return errors.New("genesis state has not been created")
|
||||
}
|
||||
if runtime.NumGoroutine() > s.maxRoutines {
|
||||
if runtime.NumGoroutine() > s.cfg.MaxRoutines {
|
||||
return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.BeaconState) error {
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState state.BeaconState) error {
|
||||
if err := s.cfg.BeaconDB.SaveGenesisData(ctx, genesisState); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis data")
|
||||
}
|
||||
genesisBlk := blocks.NewGenesisBlock(stateRoot[:])
|
||||
genesisBlkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
genesisBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil || genesisBlk == nil || genesisBlk.IsNil() {
|
||||
return fmt.Errorf("could not load genesis block: %v", err)
|
||||
}
|
||||
genesisBlkRoot, err := genesisBlk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
|
||||
if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis block")
|
||||
}
|
||||
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state")
|
||||
}
|
||||
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
|
||||
Slot: 0,
|
||||
Root: genesisBlkRoot[:],
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.stateGen.SaveFinalizedState(0, genesisBlkRoot, genesisState)
|
||||
|
||||
if err := s.beaconDB.SaveHeadBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head block root")
|
||||
}
|
||||
if err := s.beaconDB.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis block root")
|
||||
}
|
||||
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
|
||||
|
||||
// Finalized checkpoint at genesis is a zero hash.
|
||||
genesisCheckpoint := genesisState.FinalizedCheckpoint()
|
||||
|
||||
s.justifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.justifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(ctx, genesisBlkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
s.prevJustifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.bestJustifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.finalizedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.finalizedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
s.prevFinalizedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint)
|
||||
|
||||
if err := s.forkChoiceStore.ProcessBlock(ctx,
|
||||
genesisBlk.Block.Slot,
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
genesisBlk.Block().Slot(),
|
||||
genesisBlkRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
[32]byte{},
|
||||
@@ -416,26 +371,25 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.B
|
||||
}
|
||||
|
||||
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called to initialize chain info variables using the finalized checkpoint stored in DB
|
||||
func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
genesisBlock, err := s.beaconDB.GenesisBlock(ctx)
|
||||
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block from db")
|
||||
}
|
||||
if genesisBlock == nil {
|
||||
if genesisBlock == nil || genesisBlock.IsNil() {
|
||||
return errors.New("no genesis block in db")
|
||||
}
|
||||
genesisBlkRoot, err := genesisBlock.Block.HashTreeRoot()
|
||||
genesisBlkRoot, err := genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root of genesis block")
|
||||
}
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
|
||||
finalized, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint from db")
|
||||
}
|
||||
@@ -445,37 +399,37 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
return errors.New("no finalized epoch in the database")
|
||||
}
|
||||
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
var finalizedState *stateTrie.BeaconState
|
||||
var finalizedState state.BeaconState
|
||||
|
||||
finalizedState, err = s.stateGen.Resume(ctx)
|
||||
finalizedState, err = s.cfg.StateGen.Resume(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
|
||||
if flags.Get().HeadSync {
|
||||
headBlock, err := s.beaconDB.HeadBlock(ctx)
|
||||
headBlock, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head block")
|
||||
}
|
||||
headEpoch := helpers.SlotToEpoch(headBlock.Block.Slot)
|
||||
var epochsSinceFinality uint64
|
||||
headEpoch := core.SlotToEpoch(headBlock.Block().Slot())
|
||||
var epochsSinceFinality types.Epoch
|
||||
if headEpoch > finalized.Epoch {
|
||||
epochsSinceFinality = headEpoch - finalized.Epoch
|
||||
}
|
||||
// Head sync when node is far enough beyond known finalized epoch,
|
||||
// this becomes really useful during long period of non-finality.
|
||||
if epochsSinceFinality >= headSyncMinEpochsAfterCheckpoint {
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
headRoot, err := headBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash head block")
|
||||
}
|
||||
finalizedState, err := s.stateGen.Resume(ctx)
|
||||
finalizedState, err := s.cfg.StateGen.Resume(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
log.Infof("Regenerating state from the last checkpoint at slot %d to current head slot of %d."+
|
||||
"This process may take a while, please wait.", finalizedState.Slot(), headBlock.Block.Slot)
|
||||
headState, err := s.stateGen.StateByRoot(ctx, headRoot)
|
||||
"This process may take a while, please wait.", finalizedState.Slot(), headBlock.Block().Slot())
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state")
|
||||
}
|
||||
@@ -488,12 +442,12 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
finalizedBlock, err := s.beaconDB.Block(ctx, finalizedRoot)
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block from db")
|
||||
}
|
||||
|
||||
if finalizedState == nil || finalizedBlock == nil {
|
||||
if finalizedState == nil || finalizedState.IsNil() || finalizedBlock == nil || finalizedBlock.IsNil() {
|
||||
return errors.New("finalized state and block can't be nil")
|
||||
}
|
||||
s.setHead(finalizedRoot, finalizedBlock, finalizedState)
|
||||
@@ -505,7 +459,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
// information to fork choice service to initializes fork choice store.
|
||||
func (s *Service) resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
store := protoarray.New(justifiedCheckpoint.Epoch, finalizedCheckpoint.Epoch, bytesutil.ToBytes32(finalizedCheckpoint.Root))
|
||||
s.forkChoiceStore = store
|
||||
s.cfg.ForkChoiceStore = store
|
||||
}
|
||||
|
||||
// This returns true if block has been processed before. Two ways to verify the block has been processed:
|
||||
@@ -513,9 +467,9 @@ func (s *Service) resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint *eth
|
||||
// 2.) Check DB.
|
||||
// Checking 1.) is ten times faster than checking 2.)
|
||||
func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
|
||||
if s.forkChoiceStore.HasNode(root) {
|
||||
if s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return true
|
||||
}
|
||||
|
||||
return s.beaconDB.HasBlock(ctx, root)
|
||||
return s.cfg.BeaconDB.HasBlock(ctx, root)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ func init() {
|
||||
func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
cfg: &Config{BeaconDB: beaconDB},
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
|
||||
@@ -8,30 +8,33 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
protodb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type mockBeaconNode struct {
|
||||
@@ -60,6 +63,11 @@ func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
@@ -68,30 +76,32 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
var web3Service *powchain.Service
|
||||
var err error
|
||||
bState, _ := testutil.DeterministicGenesisState(t, 10)
|
||||
err = beaconDB.SavePowchainData(ctx, &protodb.ETH1ChainData{
|
||||
BeaconState: bState.InnerStateUnsafe(),
|
||||
Trie: &protodb.SparseMerkleTrie{},
|
||||
CurrentEth1Data: &protodb.LatestETH1Data{
|
||||
pbState, err := v1.ProtobufBeaconState(bState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
err = beaconDB.SavePowchainData(ctx, ðpb.ETH1ChainData{
|
||||
BeaconState: pbState,
|
||||
Trie: ðpb.SparseMerkleTrie{},
|
||||
CurrentEth1Data: ðpb.LatestETH1Data{
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
ChainstartData: &protodb.ChainStartData{
|
||||
ChainstartData: ðpb.ChainStartData{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
DepositCount: 0,
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
DepositContainers: []*protodb.DepositContainer{},
|
||||
DepositContainers: []*ethpb.DepositContainer{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
|
||||
BeaconDB: beaconDB,
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
HttpEndpoints: []string{endpoint},
|
||||
DepositContract: common.Address{},
|
||||
})
|
||||
require.NoError(t, err, "Unable to set up web3 service")
|
||||
|
||||
opsService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
@@ -107,7 +117,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
AttPool: attestations.NewPool(),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
|
||||
OpsService: opsService,
|
||||
AttService: attService,
|
||||
}
|
||||
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
@@ -130,8 +140,9 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
genesisBlk := testutil.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlk))
|
||||
s := testutil.NewBeaconState()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -159,8 +170,9 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
genesisBlk := testutil.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlk))
|
||||
s := testutil.NewBeaconState()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
@@ -190,7 +202,7 @@ func TestChainService_InitializeBeaconChain(t *testing.T) {
|
||||
trie, _, err := testutil.DepositTrieFromDeposits(deposits)
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot := trie.HashTreeRoot()
|
||||
genState, err := state.EmptyGenesisState()
|
||||
genState, err := transition.EmptyGenesisState()
|
||||
require.NoError(t, err)
|
||||
err = genState.SetEth1Data(ðpb.Eth1Data{
|
||||
DepositRoot: hashTreeRoot[:],
|
||||
@@ -227,8 +239,9 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
genesisBlk := testutil.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlk))
|
||||
s := testutil.NewBeaconState()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(0))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -253,29 +266,30 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := testutil.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: helpers.SlotToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: core.SlotToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
headBlk, err := c.HeadBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headBlock, headBlk, "Head block incorrect")
|
||||
assert.DeepEqual(t, headBlock, headBlk.Proto(), "Head block incorrect")
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, c.HeadSlot(), headBlock.Block.Slot, "Head slot incorrect")
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -293,27 +307,28 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := testutil.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
|
||||
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
assert.DeepEqual(t, genesis, c.head.block)
|
||||
assert.DeepEqual(t, genesis, c.head.block.Proto())
|
||||
}
|
||||
|
||||
func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
@@ -334,14 +349,14 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
genesisRoot, err := genesisBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
|
||||
|
||||
finalizedBlock := testutil.NewBeaconBlock()
|
||||
finalizedBlock.Block.Slot = finalizedSlot
|
||||
finalizedBlock.Block.ParentRoot = genesisRoot[:]
|
||||
finalizedRoot, err := finalizedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, finalizedBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(finalizedBlock)))
|
||||
|
||||
// Set head slot close to the finalization point, no head sync is triggered.
|
||||
headBlock := testutil.NewBeaconBlock()
|
||||
@@ -349,9 +364,10 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
headBlock.Block.ParentRoot = finalizedRoot[:]
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(headBlock.Block.Slot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
@@ -359,19 +375,19 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{
|
||||
Epoch: helpers.SlotToEpoch(finalizedBlock.Block.Slot),
|
||||
Epoch: core.SlotToEpoch(finalizedBlock.Block.Slot),
|
||||
Root: finalizedRoot[:],
|
||||
}))
|
||||
|
||||
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
|
||||
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
// Since head sync is not triggered, chain is initialized to the last finalization checkpoint.
|
||||
assert.DeepEqual(t, finalizedBlock, c.head.block)
|
||||
assert.DeepEqual(t, finalizedBlock, c.head.block.Proto())
|
||||
assert.LogsContain(t, hook, "resetting head from the checkpoint ('--head-sync' flag is ignored)")
|
||||
assert.LogsDoNotContain(t, hook, "Regenerating state from the last checkpoint at slot")
|
||||
|
||||
@@ -381,7 +397,7 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
headBlock.Block.ParentRoot = finalizedRoot[:]
|
||||
headRoot, err = headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
|
||||
|
||||
@@ -389,10 +405,10 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
s, err = c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
// Head slot is far beyond the latest finalized checkpoint, head sync is triggered.
|
||||
assert.DeepEqual(t, headBlock, c.head.block)
|
||||
assert.DeepEqual(t, headBlock, c.head.block.Proto())
|
||||
assert.LogsContain(t, hook, "Regenerating state from the last checkpoint at slot 225")
|
||||
assert.LogsDoNotContain(t, hook, "resetting head from the checkpoint ('--head-sync' flag is ignored)")
|
||||
}
|
||||
@@ -401,18 +417,18 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB),
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
}
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
r, err := blk.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
newState := testutil.NewBeaconState()
|
||||
require.NoError(t, s.stateGen.SaveState(ctx, r, newState))
|
||||
require.NoError(t, s.saveHeadNoDB(ctx, blk, r, newState))
|
||||
newState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.StateGen.SaveState(ctx, r, newState))
|
||||
require.NoError(t, s.saveHeadNoDB(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r, newState))
|
||||
|
||||
newB, err := s.beaconDB.HeadBlock(ctx)
|
||||
newB, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
if reflect.DeepEqual(newB, blk) {
|
||||
t.Error("head block should not be equal")
|
||||
@@ -423,15 +439,15 @@ func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
finalizedCheckpt: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
beaconDB: beaconDB,
|
||||
}
|
||||
block := testutil.NewBeaconBlock()
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
beaconState := testutil.NewBeaconState()
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, beaconState))
|
||||
beaconState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
@@ -441,34 +457,47 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: beaconDB,
|
||||
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
|
||||
stateGen: stategen.New(beaconDB),
|
||||
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
|
||||
}
|
||||
b := testutil.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
block := testutil.NewBeaconBlock()
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.saveInitSyncBlock(r, b)
|
||||
s.saveInitSyncBlock(r, wrapper.WrappedPhase0SignedBeaconBlock(block))
|
||||
require.NoError(t, s.Stop())
|
||||
require.Equal(t, true, s.beaconDB.HasBlock(ctx, r))
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
|
||||
}
|
||||
|
||||
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := service.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
service.processChainStartTime(context.Background(), time.Now())
|
||||
|
||||
stateEvent := <-stateChannel
|
||||
require.Equal(t, int(stateEvent.Type), statefeed.Initialized)
|
||||
_, ok := stateEvent.Data.(*statefeed.InitializedData)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockDB(b *testing.B) {
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
cfg: &Config{BeaconDB: beaconDB},
|
||||
}
|
||||
block := testutil.NewBeaconBlock()
|
||||
require.NoError(b, s.beaconDB.SaveBlock(ctx, block))
|
||||
require.NoError(b, s.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block)))
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
require.Equal(b, true, s.beaconDB.HasBlock(ctx, r), "Block is not in DB")
|
||||
require.Equal(b, true, s.cfg.BeaconDB.HasBlock(ctx, r), "Block is not in DB")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -476,20 +505,19 @@ func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
finalizedCheckpt: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
beaconDB: beaconDB,
|
||||
}
|
||||
block := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
bs := &pb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := beaconstate.InitializeFromProto(bs)
|
||||
bs := ðpb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := v1.InitializeFromProto(bs)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, beaconState))
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
require.Equal(b, true, s.forkChoiceStore.HasNode(r), "Block is not in fork choice store")
|
||||
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,9 +7,10 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//fuzz:__pkg__",
|
||||
"//testing/fuzz:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
@@ -19,11 +20,13 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -9,7 +9,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
|
||||
@@ -18,27 +19,29 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ChainService defines the mock interface for testing
|
||||
type ChainService struct {
|
||||
State *stateTrie.BeaconState
|
||||
State state.BeaconState
|
||||
Root []byte
|
||||
Block *ethpb.SignedBeaconBlock
|
||||
Block block.SignedBeaconBlock
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckPoint *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckPoint *ethpb.Checkpoint
|
||||
BlocksReceived []*ethpb.SignedBeaconBlock
|
||||
BlocksReceived []block.SignedBeaconBlock
|
||||
Balance *precompute.Balance
|
||||
Genesis time.Time
|
||||
ValidatorsRoot [32]byte
|
||||
CanonicalRoots map[[32]byte]bool
|
||||
Fork *pb.Fork
|
||||
Fork *ethpb.Fork
|
||||
ETH1Data *ethpb.Eth1Data
|
||||
DB db.Database
|
||||
stateNotifier statefeed.Notifier
|
||||
@@ -47,23 +50,29 @@ type ChainService struct {
|
||||
ValidAttestation bool
|
||||
ForkChoiceStore *protoarray.Store
|
||||
VerifyBlkDescendantErr error
|
||||
Slot *uint64 // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
SyncCommitteeIndices []types.CommitteeIndex
|
||||
SyncCommitteeDomain []byte
|
||||
SyncSelectionProofDomain []byte
|
||||
SyncContributionProofDomain []byte
|
||||
PublicKey [48]byte
|
||||
SyncCommitteePubkeys [][]byte
|
||||
}
|
||||
|
||||
// StateNotifier mocks the same method in the chain service.
|
||||
func (ms *ChainService) StateNotifier() statefeed.Notifier {
|
||||
if ms.stateNotifier == nil {
|
||||
ms.stateNotifier = &MockStateNotifier{}
|
||||
func (s *ChainService) StateNotifier() statefeed.Notifier {
|
||||
if s.stateNotifier == nil {
|
||||
s.stateNotifier = &MockStateNotifier{}
|
||||
}
|
||||
return ms.stateNotifier
|
||||
return s.stateNotifier
|
||||
}
|
||||
|
||||
// BlockNotifier mocks the same method in the chain service.
|
||||
func (ms *ChainService) BlockNotifier() blockfeed.Notifier {
|
||||
if ms.blockNotifier == nil {
|
||||
ms.blockNotifier = &MockBlockNotifier{}
|
||||
func (s *ChainService) BlockNotifier() blockfeed.Notifier {
|
||||
if s.blockNotifier == nil {
|
||||
s.blockNotifier = &MockBlockNotifier{}
|
||||
}
|
||||
return ms.blockNotifier
|
||||
return s.blockNotifier
|
||||
}
|
||||
|
||||
// MockBlockNotifier mocks the block notifier.
|
||||
@@ -72,11 +81,11 @@ type MockBlockNotifier struct {
|
||||
}
|
||||
|
||||
// BlockFeed returns a block feed.
|
||||
func (msn *MockBlockNotifier) BlockFeed() *event.Feed {
|
||||
if msn.feed == nil {
|
||||
msn.feed = new(event.Feed)
|
||||
func (mbn *MockBlockNotifier) BlockFeed() *event.Feed {
|
||||
if mbn.feed == nil {
|
||||
mbn.feed = new(event.Feed)
|
||||
}
|
||||
return msn.feed
|
||||
return mbn.feed
|
||||
}
|
||||
|
||||
// MockStateNotifier mocks the state notifier.
|
||||
@@ -125,11 +134,11 @@ func (msn *MockStateNotifier) StateFeed() *event.Feed {
|
||||
}
|
||||
|
||||
// OperationNotifier mocks the same method in the chain service.
|
||||
func (ms *ChainService) OperationNotifier() opfeed.Notifier {
|
||||
if ms.opNotifier == nil {
|
||||
ms.opNotifier = &MockOperationNotifier{}
|
||||
func (s *ChainService) OperationNotifier() opfeed.Notifier {
|
||||
if s.opNotifier == nil {
|
||||
s.opNotifier = &MockOperationNotifier{}
|
||||
}
|
||||
return ms.opNotifier
|
||||
return s.opNotifier
|
||||
}
|
||||
|
||||
// MockOperationNotifier mocks the operation notifier.
|
||||
@@ -146,227 +155,227 @@ func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
|
||||
}
|
||||
|
||||
// ReceiveBlockInitialSync mocks ReceiveBlockInitialSync method in chain service.
|
||||
func (ms *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, _ [32]byte) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &stateTrie.BeaconState{}
|
||||
func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &v1.BeaconState{}
|
||||
}
|
||||
if !bytes.Equal(ms.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.Block.ParentRoot)
|
||||
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
|
||||
}
|
||||
if err := ms.State.SetSlot(block.Block.Slot); err != nil {
|
||||
if err := s.State.SetSlot(block.Block().Slot()); err != nil {
|
||||
return err
|
||||
}
|
||||
ms.BlocksReceived = append(ms.BlocksReceived, block)
|
||||
signingRoot, err := block.Block.HashTreeRoot()
|
||||
s.BlocksReceived = append(s.BlocksReceived, block)
|
||||
signingRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ms.DB != nil {
|
||||
if err := ms.DB.SaveBlock(ctx, block); err != nil {
|
||||
if s.DB != nil {
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
}
|
||||
ms.Root = signingRoot[:]
|
||||
ms.Block = block
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (ms *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock, _ [][32]byte) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &stateTrie.BeaconState{}
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock, _ [][32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &v1.BeaconState{}
|
||||
}
|
||||
for _, block := range blks {
|
||||
if !bytes.Equal(ms.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.Block.ParentRoot)
|
||||
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
|
||||
}
|
||||
if err := ms.State.SetSlot(block.Block.Slot); err != nil {
|
||||
if err := s.State.SetSlot(block.Block().Slot()); err != nil {
|
||||
return err
|
||||
}
|
||||
ms.BlocksReceived = append(ms.BlocksReceived, block)
|
||||
signingRoot, err := block.Block.HashTreeRoot()
|
||||
s.BlocksReceived = append(s.BlocksReceived, block)
|
||||
signingRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ms.DB != nil {
|
||||
if err := ms.DB.SaveBlock(ctx, block); err != nil {
|
||||
if s.DB != nil {
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
}
|
||||
ms.Root = signingRoot[:]
|
||||
ms.Block = block
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (ms *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, _ [32]byte) error {
|
||||
if ms.State == nil {
|
||||
ms.State = &stateTrie.BeaconState{}
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &v1.BeaconState{}
|
||||
}
|
||||
if !bytes.Equal(ms.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", ms.Root, block.Block.ParentRoot)
|
||||
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
|
||||
}
|
||||
if err := ms.State.SetSlot(block.Block.Slot); err != nil {
|
||||
if err := s.State.SetSlot(block.Block().Slot()); err != nil {
|
||||
return err
|
||||
}
|
||||
ms.BlocksReceived = append(ms.BlocksReceived, block)
|
||||
signingRoot, err := block.Block.HashTreeRoot()
|
||||
s.BlocksReceived = append(s.BlocksReceived, block)
|
||||
signingRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ms.DB != nil {
|
||||
if err := ms.DB.SaveBlock(ctx, block); err != nil {
|
||||
if s.DB != nil {
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
}
|
||||
ms.Root = signingRoot[:]
|
||||
ms.Block = block
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
return nil
|
||||
}
|
||||
|
||||
// HeadSlot mocks HeadSlot method in chain service.
|
||||
func (ms *ChainService) HeadSlot() uint64 {
|
||||
if ms.State == nil {
|
||||
func (s *ChainService) HeadSlot() types.Slot {
|
||||
if s.State == nil {
|
||||
return 0
|
||||
}
|
||||
return ms.State.Slot()
|
||||
return s.State.Slot()
|
||||
}
|
||||
|
||||
// HeadRoot mocks HeadRoot method in chain service.
|
||||
func (ms *ChainService) HeadRoot(_ context.Context) ([]byte, error) {
|
||||
if len(ms.Root) > 0 {
|
||||
return ms.Root, nil
|
||||
func (s *ChainService) HeadRoot(_ context.Context) ([]byte, error) {
|
||||
if len(s.Root) > 0 {
|
||||
return s.Root, nil
|
||||
}
|
||||
return make([]byte, 32), nil
|
||||
}
|
||||
|
||||
// HeadBlock mocks HeadBlock method in chain service.
|
||||
func (ms *ChainService) HeadBlock(context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
return ms.Block, nil
|
||||
func (s *ChainService) HeadBlock(context.Context) (block.SignedBeaconBlock, error) {
|
||||
return s.Block, nil
|
||||
}
|
||||
|
||||
// HeadState mocks HeadState method in chain service.
|
||||
func (ms *ChainService) HeadState(context.Context) (*stateTrie.BeaconState, error) {
|
||||
return ms.State, nil
|
||||
func (s *ChainService) HeadState(context.Context) (state.BeaconState, error) {
|
||||
return s.State, nil
|
||||
}
|
||||
|
||||
// CurrentFork mocks HeadState method in chain service.
|
||||
func (ms *ChainService) CurrentFork() *pb.Fork {
|
||||
return ms.Fork
|
||||
func (s *ChainService) CurrentFork() *ethpb.Fork {
|
||||
return s.Fork
|
||||
}
|
||||
|
||||
// FinalizedCheckpt mocks FinalizedCheckpt method in chain service.
|
||||
func (ms *ChainService) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.FinalizedCheckPoint
|
||||
func (s *ChainService) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return s.FinalizedCheckPoint
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt mocks CurrentJustifiedCheckpt method in chain service.
|
||||
func (ms *ChainService) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.CurrentJustifiedCheckPoint
|
||||
func (s *ChainService) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return s.CurrentJustifiedCheckPoint
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt mocks PreviousJustifiedCheckpt method in chain service.
|
||||
func (ms *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ms.PreviousJustifiedCheckPoint
|
||||
func (s *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return s.PreviousJustifiedCheckPoint
|
||||
}
|
||||
|
||||
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
|
||||
func (ms *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestation) error {
|
||||
func (s *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveAttestationNoPubsub mocks ReceiveAttestationNoPubsub method in chain service.
|
||||
func (ms *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attestation) error {
|
||||
func (s *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attestation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttestationPreState mocks AttestationPreState method in chain service.
|
||||
func (ms *ChainService) AttestationPreState(_ context.Context, _ *ethpb.Attestation) (*stateTrie.BeaconState, error) {
|
||||
return ms.State, nil
|
||||
func (s *ChainService) AttestationPreState(_ context.Context, _ *ethpb.Attestation) (state.BeaconState, error) {
|
||||
return s.State, nil
|
||||
}
|
||||
|
||||
// HeadValidatorsIndices mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadValidatorsIndices(_ context.Context, epoch uint64) ([]uint64, error) {
|
||||
if ms.State == nil {
|
||||
return []uint64{}, nil
|
||||
func (s *ChainService) HeadValidatorsIndices(_ context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error) {
|
||||
if s.State == nil {
|
||||
return []types.ValidatorIndex{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(ms.State, epoch)
|
||||
return helpers.ActiveValidatorIndices(s.State, epoch)
|
||||
}
|
||||
|
||||
// HeadSeed mocks the same method in the chain service.
|
||||
func (ms *ChainService) HeadSeed(_ context.Context, epoch uint64) ([32]byte, error) {
|
||||
return helpers.Seed(ms.State, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
func (s *ChainService) HeadSeed(_ context.Context, epoch types.Epoch) ([32]byte, error) {
|
||||
return helpers.Seed(s.State, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
// HeadETH1Data provides the current ETH1Data of the head state.
|
||||
func (ms *ChainService) HeadETH1Data() *ethpb.Eth1Data {
|
||||
return ms.ETH1Data
|
||||
func (s *ChainService) HeadETH1Data() *ethpb.Eth1Data {
|
||||
return s.ETH1Data
|
||||
}
|
||||
|
||||
// ProtoArrayStore mocks the same method in the chain service.
|
||||
func (ms *ChainService) ProtoArrayStore() *protoarray.Store {
|
||||
return ms.ForkChoiceStore
|
||||
func (s *ChainService) ProtoArrayStore() *protoarray.Store {
|
||||
return s.ForkChoiceStore
|
||||
}
|
||||
|
||||
// GenesisTime mocks the same method in the chain service.
|
||||
func (ms *ChainService) GenesisTime() time.Time {
|
||||
return ms.Genesis
|
||||
func (s *ChainService) GenesisTime() time.Time {
|
||||
return s.Genesis
|
||||
}
|
||||
|
||||
// GenesisValidatorRoot mocks the same method in the chain service.
|
||||
func (ms *ChainService) GenesisValidatorRoot() [32]byte {
|
||||
return ms.ValidatorsRoot
|
||||
func (s *ChainService) GenesisValidatorRoot() [32]byte {
|
||||
return s.ValidatorsRoot
|
||||
}
|
||||
|
||||
// CurrentSlot mocks the same method in the chain service.
|
||||
func (ms *ChainService) CurrentSlot() uint64 {
|
||||
if ms.Slot != nil {
|
||||
return *ms.Slot
|
||||
func (s *ChainService) CurrentSlot() types.Slot {
|
||||
if s.Slot != nil {
|
||||
return *s.Slot
|
||||
}
|
||||
return uint64(time.Now().Unix()-ms.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot
|
||||
return types.Slot(uint64(time.Now().Unix()-s.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot)
|
||||
}
|
||||
|
||||
// Participation mocks the same method in the chain service.
|
||||
func (ms *ChainService) Participation(_ uint64) *precompute.Balance {
|
||||
return ms.Balance
|
||||
func (s *ChainService) Participation(_ uint64) *precompute.Balance {
|
||||
return s.Balance
|
||||
}
|
||||
|
||||
// IsValidAttestation always returns true.
|
||||
func (ms *ChainService) IsValidAttestation(_ context.Context, _ *ethpb.Attestation) bool {
|
||||
return ms.ValidAttestation
|
||||
func (s *ChainService) IsValidAttestation(_ context.Context, _ *ethpb.Attestation) bool {
|
||||
return s.ValidAttestation
|
||||
}
|
||||
|
||||
// IsCanonical returns and determines whether a block with the provided root is part of
|
||||
// the canonical chain.
|
||||
func (ms *ChainService) IsCanonical(_ context.Context, r [32]byte) (bool, error) {
|
||||
if ms.CanonicalRoots != nil {
|
||||
_, ok := ms.CanonicalRoots[r]
|
||||
func (s *ChainService) IsCanonical(_ context.Context, r [32]byte) (bool, error) {
|
||||
if s.CanonicalRoots != nil {
|
||||
_, ok := s.CanonicalRoots[r]
|
||||
return ok, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// HasInitSyncBlock mocks the same method in the chain service.
|
||||
func (ms *ChainService) HasInitSyncBlock(_ [32]byte) bool {
|
||||
func (s *ChainService) HasInitSyncBlock(_ [32]byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorRoot mocks HeadGenesisValidatorRoot method in chain service.
|
||||
func (ms *ChainService) HeadGenesisValidatorRoot() [32]byte {
|
||||
func (s *ChainService) HeadGenesisValidatorRoot() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// VerifyBlkDescendant mocks VerifyBlkDescendant and always returns nil.
|
||||
func (ms *ChainService) VerifyBlkDescendant(_ context.Context, _ [32]byte) error {
|
||||
return ms.VerifyBlkDescendantErr
|
||||
func (s *ChainService) VerifyBlkDescendant(_ context.Context, _ [32]byte) error {
|
||||
return s.VerifyBlkDescendantErr
|
||||
}
|
||||
|
||||
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
|
||||
func (ms *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
|
||||
func (s *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
|
||||
if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) {
|
||||
return errors.New("LMD and FFG miss matched")
|
||||
}
|
||||
@@ -374,9 +383,53 @@ func (ms *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Atte
|
||||
}
|
||||
|
||||
// VerifyFinalizedConsistency mocks VerifyFinalizedConsistency and always returns nil.
|
||||
func (ms *ChainService) VerifyFinalizedConsistency(_ context.Context, r []byte) error {
|
||||
if !bytes.Equal(r, ms.FinalizedCheckPoint.Root) {
|
||||
func (s *ChainService) VerifyFinalizedConsistency(_ context.Context, r []byte) error {
|
||||
if !bytes.Equal(r, s.FinalizedCheckPoint.Root) {
|
||||
return errors.New("Root and finalized store are not consistent")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChainHeads mocks ChainHeads and always return nil.
|
||||
func (s *ChainService) ChainHeads() ([][32]byte, []types.Slot) {
|
||||
return [][32]byte{
|
||||
bytesutil.ToBytes32(bytesutil.PadTo([]byte("foo"), 32)),
|
||||
bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)),
|
||||
},
|
||||
[]types.Slot{0, 1}
|
||||
}
|
||||
|
||||
// HeadPublicKeyToValidatorIndex mocks HeadPublicKeyToValidatorIndex and always return 0 and true.
|
||||
func (s *ChainService) HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool) {
|
||||
return 0, true
|
||||
}
|
||||
|
||||
// HeadValidatorIndexToPublicKey mocks HeadValidatorIndexToPublicKey and always return empty and nil.
|
||||
func (s *ChainService) HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error) {
|
||||
return s.PublicKey, nil
|
||||
}
|
||||
|
||||
// HeadSyncCommitteeIndices mocks HeadSyncCommitteeIndices and always return `HeadNextSyncCommitteeIndices`.
|
||||
func (s *ChainService) HeadSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
||||
return s.SyncCommitteeIndices, nil
|
||||
}
|
||||
|
||||
// HeadSyncCommitteePubKeys mocks HeadSyncCommitteePubKeys and always return empty nil.
|
||||
func (s *ChainService) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error) {
|
||||
return s.SyncCommitteePubkeys, nil
|
||||
}
|
||||
|
||||
// HeadSyncCommitteeDomain mocks HeadSyncCommitteeDomain and always return empty nil.
|
||||
func (s *ChainService) HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
return s.SyncCommitteeDomain, nil
|
||||
}
|
||||
|
||||
// HeadSyncSelectionProofDomain mocks HeadSyncSelectionProofDomain and always return empty nil.
|
||||
func (s *ChainService) HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
return s.SyncSelectionProofDomain, nil
|
||||
}
|
||||
|
||||
// HeadSyncContributionProofDomain mocks HeadSyncContributionProofDomain and always return empty nil.
|
||||
func (s *ChainService) HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
return s.SyncContributionProofDomain, nil
|
||||
}
|
||||
|
||||
@@ -4,17 +4,17 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// VerifyWeakSubjectivityRoot verifies the weak subjectivity root in the service struct.
|
||||
// Reference design: https://github.com/ethereum/eth2.0-specs/blob/master/specs/phase0/weak-subjectivity.md#weak-subjectivity-sync-procedure
|
||||
// Reference design: https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/weak-subjectivity.md#weak-subjectivity-sync-procedure
|
||||
func (s *Service) VerifyWeakSubjectivityRoot(ctx context.Context) error {
|
||||
// TODO(7342): Remove the following to fully use weak subjectivity in production.
|
||||
if len(s.wsRoot) == 0 || s.wsEpoch == 0 {
|
||||
if s.cfg.WeakSubjectivityCheckpt == nil || len(s.cfg.WeakSubjectivityCheckpt.Root) == 0 || s.cfg.WeakSubjectivityCheckpt.Epoch == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -23,28 +23,28 @@ func (s *Service) VerifyWeakSubjectivityRoot(ctx context.Context) error {
|
||||
if s.wsVerified {
|
||||
return nil
|
||||
}
|
||||
if s.wsEpoch > s.finalizedCheckpt.Epoch {
|
||||
if s.cfg.WeakSubjectivityCheckpt.Epoch > s.finalizedCheckpt.Epoch {
|
||||
return nil
|
||||
}
|
||||
|
||||
r := bytesutil.ToBytes32(s.wsRoot)
|
||||
log.Infof("Performing weak subjectivity check for root %#x in epoch %d", r, s.wsEpoch)
|
||||
r := bytesutil.ToBytes32(s.cfg.WeakSubjectivityCheckpt.Root)
|
||||
log.Infof("Performing weak subjectivity check for root %#x in epoch %d", r, s.cfg.WeakSubjectivityCheckpt.Epoch)
|
||||
// Save initial sync cached blocks to DB.
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
// A node should have the weak subjectivity block in the DB.
|
||||
if !s.beaconDB.HasBlock(ctx, r) {
|
||||
if !s.cfg.BeaconDB.HasBlock(ctx, r) {
|
||||
return fmt.Errorf("node does not have root in DB: %#x", r)
|
||||
}
|
||||
|
||||
startSlot, err := helpers.StartSlot(s.wsEpoch)
|
||||
startSlot, err := core.StartSlot(s.cfg.WeakSubjectivityCheckpt.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// A node should have the weak subjectivity block corresponds to the correct epoch in the DB.
|
||||
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(startSlot + params.BeaconConfig().SlotsPerEpoch)
|
||||
roots, err := s.beaconDB.BlockRoots(ctx, filter)
|
||||
roots, err := s.cfg.BeaconDB.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -56,5 +56,5 @@ func (s *Service) VerifyWeakSubjectivityRoot(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("node does not have root in db corresponding to epoch: %#x %d", r, s.wsEpoch)
|
||||
return fmt.Errorf("node does not have root in db corresponding to epoch: %#x %d", r, s.cfg.WeakSubjectivityCheckpt.Epoch)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,11 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
@@ -15,15 +18,14 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 32
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), b))
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
wsVerified bool
|
||||
wantErr bool
|
||||
wsRoot [32]byte
|
||||
wsEpoch uint64
|
||||
finalizedEpoch uint64
|
||||
checkpt *ethpb.Checkpoint
|
||||
finalizedEpoch types.Epoch
|
||||
errString string
|
||||
name string
|
||||
}{
|
||||
@@ -33,37 +35,34 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "already verified",
|
||||
wsEpoch: 2,
|
||||
checkpt: ðpb.Checkpoint{Epoch: 2},
|
||||
finalizedEpoch: 2,
|
||||
wsVerified: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "not yet to verify, ws epoch higher than finalized epoch",
|
||||
wsEpoch: 2,
|
||||
checkpt: ðpb.Checkpoint{Epoch: 2},
|
||||
finalizedEpoch: 1,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "can't find the block in DB",
|
||||
wsEpoch: 1,
|
||||
wsRoot: [32]byte{'a'},
|
||||
checkpt: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, 32), Epoch: 1},
|
||||
finalizedEpoch: 3,
|
||||
wantErr: true,
|
||||
errString: "node does not have root in DB",
|
||||
},
|
||||
{
|
||||
name: "can't find the block corresponds to ws epoch in DB",
|
||||
wsEpoch: 2,
|
||||
wsRoot: r, // Root belongs in epoch 1.
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: 2}, // Root belongs in epoch 1.
|
||||
finalizedEpoch: 3,
|
||||
wantErr: true,
|
||||
errString: "node does not have root in db corresponding to epoch",
|
||||
},
|
||||
{
|
||||
name: "can verify and pass",
|
||||
wsEpoch: 1,
|
||||
wsRoot: r,
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: 1},
|
||||
finalizedEpoch: 3,
|
||||
wantErr: false,
|
||||
},
|
||||
@@ -71,9 +70,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
wsRoot: tt.wsRoot[:],
|
||||
wsEpoch: tt.wsEpoch,
|
||||
cfg: &Config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},
|
||||
wsVerified: tt.wsVerified,
|
||||
finalizedCheckpt: ðpb.Checkpoint{Epoch: tt.finalizedEpoch},
|
||||
}
|
||||
|
||||
57
beacon-chain/cache/BUILD.bazel
vendored
57
beacon-chain/cache/BUILD.bazel
vendored
@@ -1,8 +1,7 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
# gazelle:ignore committee_disabled.go
|
||||
# gazelle:ignore proposer_indices_disabled.go
|
||||
# gazelle:exclude committee_disabled.go
|
||||
# gazelle:exclude proposer_indices_disabled.go
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
@@ -11,37 +10,50 @@ go_library(
|
||||
"committees.go",
|
||||
"common.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"proposer_indices_type.go",
|
||||
"skip_slot_cache.go",
|
||||
"subnet_ids.go",
|
||||
"proposer_indices_type.go",
|
||||
"sync_committee_head_state.go",
|
||||
"sync_subnet_ids.go",
|
||||
] + select({
|
||||
"//fuzz:fuzzing_enabled": [
|
||||
"//testing/fuzz:fuzzing_enabled": [
|
||||
"active_balance_disabled.go",
|
||||
"committee_disabled.go",
|
||||
"proposer_indices_disabled.go"
|
||||
"proposer_indices_disabled.go",
|
||||
"sync_committee_disabled.go",
|
||||
],
|
||||
"//conditions:default": [
|
||||
"active_balance.go",
|
||||
"committee.go",
|
||||
"proposer_indices.go",
|
||||
"sync_committee.go",
|
||||
],
|
||||
}),
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//fuzz:__pkg__",
|
||||
"//testing/fuzz:__pkg__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
@@ -51,26 +63,33 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"active_balance_test.go",
|
||||
"attestation_data_test.go",
|
||||
"cache_test.go",
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"cache_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"subnet_ids_test.go",
|
||||
"proposer_indices_test.go"
|
||||
"sync_committee_head_state_test.go",
|
||||
"sync_committee_test.go",
|
||||
"sync_subnet_ids_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
119
beacon-chain/cache/active_balance.go
vendored
Normal file
119
beacon-chain/cache/active_balance.go
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// +build !libfuzzer
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethTypes "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
var (
|
||||
// maxBalanceCacheSize defines the max number of active balances can cache.
|
||||
maxBalanceCacheSize = uint64(4)
|
||||
|
||||
// BalanceCacheMiss tracks the number of balance requests that aren't present in the cache.
|
||||
balanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "total_effective_balance_cache_miss",
|
||||
Help: "The number of get requests that aren't present in the cache.",
|
||||
})
|
||||
// BalanceCacheHit tracks the number of balance requests that are in the cache.
|
||||
balanceCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "total_effective_balance_cache_hit",
|
||||
Help: "The number of get requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// BalanceCache is a struct with 1 LRU cache for looking up balance by epoch.
|
||||
type BalanceCache struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
|
||||
func NewEffectiveBalanceCache() *BalanceCache {
|
||||
return &BalanceCache{
|
||||
cache: lruwrpr.New(int(maxBalanceCacheSize)),
|
||||
}
|
||||
}
|
||||
|
||||
// AddTotalEffectiveBalance adds a new total effective balance entry for current balance for state `st` into the cache.
|
||||
func (c *BalanceCache) AddTotalEffectiveBalance(st state.ReadOnlyBeaconState, balance uint64) error {
|
||||
if !features.Get().EnableActiveBalanceCache {
|
||||
return nil
|
||||
}
|
||||
key, err := balanceCacheKey(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
_ = c.cache.Add(key, balance)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the current epoch's effective balance for state `st` in cache.
|
||||
func (c *BalanceCache) Get(st state.ReadOnlyBeaconState) (uint64, error) {
|
||||
if !features.Get().EnableActiveBalanceCache {
|
||||
return 0, ErrNotFound
|
||||
}
|
||||
key, err := balanceCacheKey(st)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
value, exists := c.cache.Get(key)
|
||||
if !exists {
|
||||
balanceCacheMiss.Inc()
|
||||
return 0, ErrNotFound
|
||||
}
|
||||
balanceCacheHit.Inc()
|
||||
return value.(uint64), nil
|
||||
}
|
||||
|
||||
// Given input state `st`, balance key is constructed as:
|
||||
// (block_root in `st` at epoch_start_slot - 1) + current_epoch + validator_count
|
||||
func balanceCacheKey(st state.ReadOnlyBeaconState) (string, error) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
currentEpoch := st.Slot().DivSlot(slotsPerEpoch)
|
||||
epochStartSlot, err := slotsPerEpoch.SafeMul(uint64(currentEpoch))
|
||||
if err != nil {
|
||||
// impossible condition due to early division
|
||||
return "", errors.Errorf("start slot calculation overflows: %v", err)
|
||||
}
|
||||
prevSlot := ethTypes.Slot(0)
|
||||
if epochStartSlot > 1 {
|
||||
prevSlot = epochStartSlot - 1
|
||||
}
|
||||
r, err := st.BlockRootAtIndex(uint64(prevSlot % params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
if err != nil {
|
||||
// impossible condition because index is always constrained within state
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Mix in current epoch
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(currentEpoch))
|
||||
key := append(r, b...)
|
||||
|
||||
// Mix in validator count
|
||||
b = make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(st.NumValidators()))
|
||||
key = append(key, b...)
|
||||
|
||||
return string(key), nil
|
||||
}
|
||||
31
beacon-chain/cache/active_balance_disabled.go
vendored
Normal file
31
beacon-chain/cache/active_balance_disabled.go
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
// +build libfuzzer
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
)
|
||||
|
||||
// FakeBalanceCache is a fake struct with 1 LRU cache for looking up balance by epoch.
|
||||
type FakeBalanceCache struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewEffectiveBalanceCache creates a new effective balance cache for storing/accessing total balance by epoch.
|
||||
func NewEffectiveBalanceCache() *FakeBalanceCache {
|
||||
return &FakeBalanceCache{}
|
||||
}
|
||||
|
||||
// AddTotalEffectiveBalance adds a new total effective balance entry for current balance for state `st` into the cache.
|
||||
func (c *FakeBalanceCache) AddTotalEffectiveBalance(st state.ReadOnlyBeaconState, balance uint64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the current epoch's effective balance for state `st` in cache.
|
||||
func (c *FakeBalanceCache) Get(st state.ReadOnlyBeaconState) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
81
beacon-chain/cache/active_balance_test.go
vendored
Normal file
81
beacon-chain/cache/active_balance_test.go
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
state "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestBalanceCache_AddGetBalance(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableActiveBalanceCache: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(i))
|
||||
blockRoots[i] = b
|
||||
}
|
||||
raw := ðpb.BeaconState{
|
||||
BlockRoots: blockRoots,
|
||||
}
|
||||
st, err := state.InitializeFromProto(raw)
|
||||
require.NoError(t, err)
|
||||
|
||||
cache := NewEffectiveBalanceCache()
|
||||
_, err = cache.Get(st)
|
||||
require.ErrorContains(t, ErrNotFound.Error(), err)
|
||||
|
||||
b := uint64(100)
|
||||
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err := cache.Get(st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, cachedB)
|
||||
|
||||
require.NoError(t, st.SetSlot(1000))
|
||||
_, err = cache.Get(st)
|
||||
require.ErrorContains(t, ErrNotFound.Error(), err)
|
||||
|
||||
b = uint64(200)
|
||||
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cache.Get(st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, cachedB)
|
||||
|
||||
require.NoError(t, st.SetSlot(1000+params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
_, err = cache.Get(st)
|
||||
require.ErrorContains(t, ErrNotFound.Error(), err)
|
||||
|
||||
b = uint64(300)
|
||||
require.NoError(t, cache.AddTotalEffectiveBalance(st, b))
|
||||
cachedB, err = cache.Get(st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, cachedB)
|
||||
}
|
||||
|
||||
func TestBalanceCache_BalanceKey(t *testing.T) {
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(i))
|
||||
blockRoots[i] = b
|
||||
}
|
||||
raw := ðpb.BeaconState{
|
||||
BlockRoots: blockRoots,
|
||||
}
|
||||
st, err := state.InitializeFromProto(raw)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(types.Slot(math.MaxUint64)))
|
||||
|
||||
_, err = balanceCacheKey(st)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
5
beacon-chain/cache/attestation_data.go
vendored
5
beacon-chain/cache/attestation_data.go
vendored
@@ -10,8 +10,7 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
@@ -98,7 +97,7 @@ func (c *AttestationCache) Get(ctx context.Context, req *ethpb.AttestationDataRe
|
||||
|
||||
if exists && item != nil && item.(*attestationReqResWrapper).res != nil {
|
||||
attestationCacheHit.Inc()
|
||||
return state.CopyAttestationData(item.(*attestationReqResWrapper).res), nil
|
||||
return ethpb.CopyAttestationData(item.(*attestationReqResWrapper).res), nil
|
||||
}
|
||||
attestationCacheMiss.Inc()
|
||||
return nil, nil
|
||||
|
||||
4
beacon-chain/cache/attestation_data_test.go
vendored
4
beacon-chain/cache/attestation_data_test.go
vendored
@@ -4,10 +4,10 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestAttestationCache_RoundTrip(t *testing.T) {
|
||||
|
||||
4
beacon-chain/cache/cache_test.go
vendored
4
beacon-chain/cache/cache_test.go
vendored
@@ -2,12 +2,8 @@ package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{EnableEth1DataVoteCache: true})
|
||||
defer resetCfg()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
23
beacon-chain/cache/checkpoint_state.go
vendored
23
beacon-chain/cache/checkpoint_state.go
vendored
@@ -6,9 +6,10 @@ import (
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -36,21 +37,17 @@ type CheckpointStateCache struct {
|
||||
|
||||
// NewCheckpointStateCache creates a new checkpoint state cache for storing/accessing processed state.
|
||||
func NewCheckpointStateCache() *CheckpointStateCache {
|
||||
cache, err := lru.New(maxCheckpointStateSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &CheckpointStateCache{
|
||||
cache: cache,
|
||||
cache: lruwrpr.New(maxCheckpointStateSize),
|
||||
}
|
||||
}
|
||||
|
||||
// StateByCheckpoint fetches state by checkpoint. Returns true with a
|
||||
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
h, err := hashutil.HashProto(cp)
|
||||
h, err := hash.HashProto(cp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -60,7 +57,7 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*stateTr
|
||||
if exists && item != nil {
|
||||
checkpointStateHit.Inc()
|
||||
// Copy here is unnecessary since the return will only be used to verify attestation signature.
|
||||
return item.(*stateTrie.BeaconState), nil
|
||||
return item.(state.BeaconState), nil
|
||||
}
|
||||
|
||||
checkpointStateMiss.Inc()
|
||||
@@ -69,10 +66,10 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*stateTr
|
||||
|
||||
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
|
||||
// recently added CheckpointState object if the cache size has ready the max cache size limit.
|
||||
func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s *stateTrie.BeaconState) error {
|
||||
func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s state.ReadOnlyBeaconState) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
h, err := hashutil.HashProto(cp)
|
||||
h, err := hash.HashProto(cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
39
beacon-chain/cache/checkpoint_state_test.go
vendored
39
beacon-chain/cache/checkpoint_state_test.go
vendored
@@ -3,65 +3,70 @@ package cache
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
cache := NewCheckpointStateCache()
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
st, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Slot: 64,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
state, err := cache.StateByCheckpoint(cp1)
|
||||
s, err := cache.StateByCheckpoint(cp1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*stateTrie.BeaconState)(nil), state, "Expected state not to exist in empty cache")
|
||||
assert.Equal(t, state.BeaconState(nil), s, "Expected state not to exist in empty cache")
|
||||
|
||||
require.NoError(t, cache.AddCheckpointState(cp1, st))
|
||||
|
||||
state, err = cache.StateByCheckpoint(cp1)
|
||||
s, err = cache.StateByCheckpoint(cp1)
|
||||
require.NoError(t, err)
|
||||
|
||||
if !proto.Equal(state.InnerStateUnsafe(), st.InnerStateUnsafe()) {
|
||||
pbState1, err := v1.ProtobufBeaconState(s.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
pbstate, err := v1.ProtobufBeaconState(st.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(pbState1, pbstate) {
|
||||
t.Error("incorrectly cached state")
|
||||
}
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
|
||||
st2, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
st2, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 128,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, cache.AddCheckpointState(cp2, st2))
|
||||
|
||||
state, err = cache.StateByCheckpoint(cp2)
|
||||
s, err = cache.StateByCheckpoint(cp2)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, st2.CloneInnerState(), state.CloneInnerState(), "incorrectly cached state")
|
||||
assert.DeepEqual(t, st2.CloneInnerState(), s.CloneInnerState(), "incorrectly cached state")
|
||||
|
||||
state, err = cache.StateByCheckpoint(cp1)
|
||||
s, err = cache.StateByCheckpoint(cp1)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, st.CloneInnerState(), state.CloneInnerState(), "incorrectly cached state")
|
||||
assert.DeepEqual(t, st.CloneInnerState(), s.CloneInnerState(), "incorrectly cached state")
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_MaxSize(t *testing.T) {
|
||||
c := NewCheckpointStateCache()
|
||||
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
st, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := uint64(0); i < uint64(maxCheckpointStateSize+100); i++ {
|
||||
require.NoError(t, st.SetSlot(i))
|
||||
require.NoError(t, c.AddCheckpointState(ðpb.Checkpoint{Epoch: i, Root: make([]byte, 32)}, st))
|
||||
require.NoError(t, st.SetSlot(types.Slot(i)))
|
||||
require.NoError(t, c.AddCheckpointState(ðpb.Checkpoint{Epoch: types.Epoch(i), Root: make([]byte, 32)}, st))
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCheckpointStateSize, len(c.cache.Keys()))
|
||||
|
||||
58
beacon-chain/cache/committee.go
vendored
58
beacon-chain/cache/committee.go
vendored
@@ -6,11 +6,14 @@ import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/math"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -32,7 +35,7 @@ var (
|
||||
|
||||
// CommitteeCache is a struct with 1 queue for looking up shuffled indices list by seed.
|
||||
type CommitteeCache struct {
|
||||
CommitteeCache *cache.FIFO
|
||||
CommitteeCache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
@@ -42,28 +45,23 @@ func committeeKeyFn(obj interface{}) (string, error) {
|
||||
if !ok {
|
||||
return "", ErrNotCommittee
|
||||
}
|
||||
|
||||
return key(info.Seed), nil
|
||||
}
|
||||
|
||||
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
|
||||
func NewCommitteesCache() *CommitteeCache {
|
||||
return &CommitteeCache{
|
||||
CommitteeCache: cache.NewFIFO(committeeKeyFn),
|
||||
CommitteeCache: lruwrpr.New(int(maxCommitteesCacheSize)),
|
||||
}
|
||||
}
|
||||
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
|
||||
func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]uint64, error) {
|
||||
func (c *CommitteeCache) Committee(slot types.Slot, seed [32]byte, index types.CommitteeIndex) ([]types.ValidatorIndex, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, exists := c.CommitteeCache.Get(key(seed))
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
@@ -77,11 +75,14 @@ func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]
|
||||
}
|
||||
|
||||
committeeCountPerSlot := uint64(1)
|
||||
if item.CommitteeCount/params.BeaconConfig().SlotsPerEpoch > 1 {
|
||||
committeeCountPerSlot = item.CommitteeCount / params.BeaconConfig().SlotsPerEpoch
|
||||
if item.CommitteeCount/uint64(params.BeaconConfig().SlotsPerEpoch) > 1 {
|
||||
committeeCountPerSlot = item.CommitteeCount / uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
indexOffSet := index + (slot%params.BeaconConfig().SlotsPerEpoch)*committeeCountPerSlot
|
||||
indexOffSet, err := math.Add64(uint64(index), uint64(slot.ModSlot(params.BeaconConfig().SlotsPerEpoch).Mul(committeeCountPerSlot)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
start, end := startEndIndices(item, indexOffSet)
|
||||
|
||||
if end > uint64(len(item.ShuffledIndices)) || end < start {
|
||||
@@ -96,22 +97,19 @@ func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]
|
||||
func (c *CommitteeCache) AddCommitteeShuffledList(committees *Committees) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if err := c.CommitteeCache.AddIfNotPresent(committees); err != nil {
|
||||
key, err := committeeKeyFn(committees)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trim(c.CommitteeCache, maxCommitteesCacheSize)
|
||||
_ = c.CommitteeCache.Add(key, committees)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveIndices returns the active indices of a given seed stored in cache.
|
||||
func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
|
||||
func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]types.ValidatorIndex, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, exists := c.CommitteeCache.Get(key(seed))
|
||||
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
@@ -132,11 +130,7 @@ func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
|
||||
func (c *CommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
obj, exists := c.CommitteeCache.Get(key(seed))
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
@@ -154,21 +148,21 @@ func (c *CommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
|
||||
|
||||
// HasEntry returns true if the committee cache has a value.
|
||||
func (c *CommitteeCache) HasEntry(seed string) bool {
|
||||
_, ok, err := c.CommitteeCache.GetByKey(seed)
|
||||
return err == nil && ok
|
||||
_, ok := c.CommitteeCache.Get(seed)
|
||||
return ok
|
||||
}
|
||||
|
||||
func startEndIndices(c *Committees, index uint64) (uint64, uint64) {
|
||||
validatorCount := uint64(len(c.ShuffledIndices))
|
||||
start := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index)
|
||||
end := sliceutil.SplitOffset(validatorCount, c.CommitteeCount, index+1)
|
||||
start := slice.SplitOffset(validatorCount, c.CommitteeCount, index)
|
||||
end := slice.SplitOffset(validatorCount, c.CommitteeCount, index+1)
|
||||
return start, end
|
||||
}
|
||||
|
||||
// Using seed as source for key to handle reorgs in the same epoch.
|
||||
// The seed is derived from state's array of randao mixes and epoch value
|
||||
// hashed together. This avoids collisions on different validator set. Spec definition:
|
||||
// https://github.com/ethereum/eth2.0-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#get_seed
|
||||
// https://github.com/ethereum/consensus-specs/blob/v0.9.3/specs/core/0_beacon-chain.md#get_seed
|
||||
func key(seed [32]byte) string {
|
||||
return string(seed[:])
|
||||
}
|
||||
|
||||
15
beacon-chain/cache/committee_disabled.go
vendored
15
beacon-chain/cache/committee_disabled.go
vendored
@@ -3,6 +3,8 @@
|
||||
// This file is used in fuzzer builds to bypass global committee caches.
|
||||
package cache
|
||||
|
||||
import types "github.com/prysmaticlabs/eth2-types"
|
||||
|
||||
// FakeCommitteeCache is a struct with 1 queue for looking up shuffled indices list by seed.
|
||||
type FakeCommitteeCache struct {
|
||||
}
|
||||
@@ -14,7 +16,7 @@ func NewCommitteesCache() *FakeCommitteeCache {
|
||||
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
|
||||
func (c *FakeCommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]uint64, error) {
|
||||
func (c *FakeCommitteeCache) Committee(slot types.Slot, seed [32]byte, index types.CommitteeIndex) ([]types.ValidatorIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -25,12 +27,12 @@ func (c *FakeCommitteeCache) AddCommitteeShuffledList(committees *Committees) er
|
||||
}
|
||||
|
||||
// AddProposerIndicesList updates the committee shuffled list with proposer indices.
|
||||
func (c *FakeCommitteeCache) AddProposerIndicesList(seed [32]byte, indices []uint64) error {
|
||||
func (c *FakeCommitteeCache) AddProposerIndicesList(seed [32]byte, indices []types.ValidatorIndex) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveIndices returns the active indices of a given seed stored in cache.
|
||||
func (c *FakeCommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
|
||||
func (c *FakeCommitteeCache) ActiveIndices(seed [32]byte) ([]types.ValidatorIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -39,8 +41,13 @@ func (c *FakeCommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// ActiveBalance returns the active balance of a given seed stored in cache.
|
||||
func (c *FakeCommitteeCache) ActiveBalance(seed [32]byte) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices of a given seed.
|
||||
func (c *FakeCommitteeCache) ProposerIndices(seed [32]byte) ([]uint64, error) {
|
||||
func (c *FakeCommitteeCache) ProposerIndices(seed [32]byte) ([]types.ValidatorIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
4
beacon-chain/cache/committee_fuzz_test.go
vendored
4
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -32,7 +32,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.ListKeys())), "Incorrect key size")
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
|
||||
}
|
||||
|
||||
func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
@@ -49,5 +49,5 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
assert.DeepEqual(t, c.SortedIndices, indices)
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.ListKeys())), "Incorrect key size")
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
|
||||
}
|
||||
|
||||
31
beacon-chain/cache/committee_test.go
vendored
31
beacon-chain/cache/committee_test.go
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -16,7 +17,7 @@ func TestCommitteeKeyFn_OK(t *testing.T) {
|
||||
item := &Committees{
|
||||
CommitteeCount: 1,
|
||||
Seed: [32]byte{'A'},
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4, 5},
|
||||
ShuffledIndices: []types.ValidatorIndex{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
k, err := committeeKeyFn(item)
|
||||
@@ -33,13 +34,13 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4, 5, 6},
|
||||
ShuffledIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6},
|
||||
Seed: [32]byte{'A'},
|
||||
CommitteeCount: 3,
|
||||
}
|
||||
|
||||
slot := params.BeaconConfig().SlotsPerEpoch
|
||||
committeeIndex := uint64(1)
|
||||
committeeIndex := types.CommitteeIndex(1)
|
||||
indices, err := cache.Committee(slot, item.Seed, committeeIndex)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
@@ -47,18 +48,18 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
|
||||
wantedIndex := uint64(0)
|
||||
wantedIndex := types.CommitteeIndex(0)
|
||||
indices, err = cache.Committee(slot, item.Seed, wantedIndex)
|
||||
require.NoError(t, err)
|
||||
|
||||
start, end := startEndIndices(item, wantedIndex)
|
||||
start, end := startEndIndices(item, uint64(wantedIndex))
|
||||
assert.DeepEqual(t, item.ShuffledIndices[start:end], indices)
|
||||
}
|
||||
|
||||
func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
indices, err := cache.ActiveIndices(item.Seed)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
@@ -75,7 +76,7 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
func TestCommitteeCache_ActiveCount(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
count, err := cache.ActiveIndicesCount(item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
|
||||
@@ -99,11 +100,11 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
}
|
||||
|
||||
k := cache.CommitteeCache.ListKeys()
|
||||
k := cache.CommitteeCache.Keys()
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(k)))
|
||||
|
||||
sort.Slice(k, func(i, j int) bool {
|
||||
return k[i] < k[j]
|
||||
return k[i].(string) < k[j].(string)
|
||||
})
|
||||
wanted := end - int(maxCommitteesCacheSize)
|
||||
s := bytesutil.ToBytes32([]byte(strconv.Itoa(wanted)))
|
||||
@@ -116,13 +117,15 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
func TestCommitteeCacheOutOfRange(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
seed := bytesutil.ToBytes32([]byte("foo"))
|
||||
err := cache.CommitteeCache.Add(&Committees{
|
||||
comms := &Committees{
|
||||
CommitteeCount: 1,
|
||||
Seed: seed,
|
||||
ShuffledIndices: []uint64{0},
|
||||
SortedIndices: []uint64{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ShuffledIndices: []types.ValidatorIndex{0},
|
||||
SortedIndices: []types.ValidatorIndex{},
|
||||
}
|
||||
key, err := committeeKeyFn(comms)
|
||||
assert.NoError(t, err)
|
||||
_ = cache.CommitteeCache.Add(key, comms)
|
||||
|
||||
_, err = cache.Committee(0, seed, math.MaxUint64) // Overflow!
|
||||
require.NotNil(t, err, "Did not fail as expected")
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user