mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
813 Commits
v1.2.0
...
interopFix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
765122dc05 | ||
|
|
a283fa58fb | ||
|
|
12fe2d91ed | ||
|
|
d44905329c | ||
|
|
e2238bd6d1 | ||
|
|
c0310ad534 | ||
|
|
e10ac0af02 | ||
|
|
6e358da5ed | ||
|
|
df291e2ffb | ||
|
|
5ba5b303d3 | ||
|
|
f2ce4dcab3 | ||
|
|
8765c3ac42 | ||
|
|
57fff2d88e | ||
|
|
c010a972e7 | ||
|
|
0d818a5709 | ||
|
|
560fe69425 | ||
|
|
012d279663 | ||
|
|
fcd4938ec0 | ||
|
|
c02ed805b0 | ||
|
|
4db0dfc4f1 | ||
|
|
93adf4980a | ||
|
|
cf4ff3d6f9 | ||
|
|
3fe969992a | ||
|
|
2135108830 | ||
|
|
4c146dc896 | ||
|
|
042a3cda02 | ||
|
|
b8676480f0 | ||
|
|
711022d34e | ||
|
|
eec93be4ed | ||
|
|
21d096622f | ||
|
|
62846d61b8 | ||
|
|
a228a407be | ||
|
|
f527b676da | ||
|
|
5bd4e10dd6 | ||
|
|
d19e13352b | ||
|
|
6bda9a0bf2 | ||
|
|
2da6b7bb97 | ||
|
|
9145310647 | ||
|
|
21c79f4dfa | ||
|
|
98f7f82b2f | ||
|
|
f16b9859e7 | ||
|
|
7faed861c4 | ||
|
|
8b9129d84e | ||
|
|
a372e30c8f | ||
|
|
8b219b14da | ||
|
|
415973fca3 | ||
|
|
2d9915d2d9 | ||
|
|
c6bd7a2f83 | ||
|
|
7129af5ccf | ||
|
|
f700808c8b | ||
|
|
5bf9bd3d73 | ||
|
|
59f12c8ac1 | ||
|
|
1ea1c69174 | ||
|
|
1094ca0838 | ||
|
|
ebe4b309c0 | ||
|
|
54772596e0 | ||
|
|
a556cf27c4 | ||
|
|
ea94f0e70d | ||
|
|
47443e130d | ||
|
|
4dfa5c2757 | ||
|
|
1851d40f74 | ||
|
|
eee1d47655 | ||
|
|
7ce76652fb | ||
|
|
5b5c8f4d3f | ||
|
|
245dc23cde | ||
|
|
837cd4eb8f | ||
|
|
8c8f1bb9c1 | ||
|
|
a8c49c50ad | ||
|
|
14d0d9195f | ||
|
|
8db0c9a0e6 | ||
|
|
938a038c42 | ||
|
|
30e93f5763 | ||
|
|
be82c8714f | ||
|
|
19e6f0c19a | ||
|
|
c0076cc7a2 | ||
|
|
6470e2718a | ||
|
|
30cd5c076e | ||
|
|
03d8af5cda | ||
|
|
194f0cb76d | ||
|
|
2a0e8510d4 | ||
|
|
a8363405f8 | ||
|
|
2d10bcf179 | ||
|
|
5e35f778b9 | ||
|
|
526596a679 | ||
|
|
5864795ca5 | ||
|
|
b7919b3115 | ||
|
|
715aa408e3 | ||
|
|
ceb1ec451a | ||
|
|
b35c3ab2e7 | ||
|
|
c8685d256c | ||
|
|
0fb43aec34 | ||
|
|
9addb5f7c3 | ||
|
|
972ae7f169 | ||
|
|
80fafaddff | ||
|
|
33f7582d25 | ||
|
|
e6ecdfde0d | ||
|
|
1daf51788d | ||
|
|
6dadb80cc4 | ||
|
|
ae140073e7 | ||
|
|
b667d30d3f | ||
|
|
e7bdf35721 | ||
|
|
c780301096 | ||
|
|
35055539a7 | ||
|
|
e1d543a77b | ||
|
|
81ab3ca46c | ||
|
|
2ffe8336fc | ||
|
|
5895b10678 | ||
|
|
11d9e7da9b | ||
|
|
53e02b1601 | ||
|
|
7ea645ed37 | ||
|
|
a900792160 | ||
|
|
cd87bfd8ab | ||
|
|
98477a0286 | ||
|
|
2d1a63d9f4 | ||
|
|
52be270f0a | ||
|
|
895a86fd53 | ||
|
|
af6246a5f9 | ||
|
|
5e80ceeff9 | ||
|
|
3a35a953bf | ||
|
|
8d1c9fe1e2 | ||
|
|
4229b466ae | ||
|
|
412ddbb29e | ||
|
|
8a7010f5aa | ||
|
|
40a96bc2b6 | ||
|
|
1beb0071b5 | ||
|
|
2a0c4e0d5f | ||
|
|
dad205bd04 | ||
|
|
15bfcf8ff6 | ||
|
|
15704053e1 | ||
|
|
ee661971f0 | ||
|
|
cc7e36776d | ||
|
|
14a9d9a1ad | ||
|
|
5be1ccd3cf | ||
|
|
97e6eb14e2 | ||
|
|
6f126c92c0 | ||
|
|
2b9fb29ed2 | ||
|
|
223d5309a0 | ||
|
|
31d2482f9c | ||
|
|
3ef0c5d6e5 | ||
|
|
187a1ca53d | ||
|
|
17d7fc492e | ||
|
|
9300d1026f | ||
|
|
48345eb68e | ||
|
|
f029fdd44a | ||
|
|
60d14f1806 | ||
|
|
d80d4d01a6 | ||
|
|
44d266313e | ||
|
|
1a9a46d042 | ||
|
|
71c8c75bc3 | ||
|
|
275192680f | ||
|
|
9ca958064e | ||
|
|
604958da6c | ||
|
|
ade94444f2 | ||
|
|
b22935a724 | ||
|
|
4df2f4c790 | ||
|
|
3f8f5edb3f | ||
|
|
0ad4e433a5 | ||
|
|
1be2503e82 | ||
|
|
3ff5b95945 | ||
|
|
e6bd5f31b1 | ||
|
|
9a4ab41761 | ||
|
|
7f0749c398 | ||
|
|
d73ac71ee6 | ||
|
|
0dd228bb94 | ||
|
|
78450ea557 | ||
|
|
6ba741f251 | ||
|
|
f0e6d4a0bd | ||
|
|
4ebb008bc9 | ||
|
|
97901c90a5 | ||
|
|
1379dbfc23 | ||
|
|
19dbc7e249 | ||
|
|
76a70065f2 | ||
|
|
a9ee3ee06a | ||
|
|
51f513b246 | ||
|
|
2b349a1b06 | ||
|
|
a819caca16 | ||
|
|
7c23d02c3e | ||
|
|
e7116d4ea8 | ||
|
|
f8cd989161 | ||
|
|
4c19265ac5 | ||
|
|
9dc3dd04c7 | ||
|
|
f361bf781f | ||
|
|
a458e556e0 | ||
|
|
773b259cd5 | ||
|
|
2bb3da1ba3 | ||
|
|
47367d98b4 | ||
|
|
1ff18c07a4 | ||
|
|
72886986ea | ||
|
|
279a95deba | ||
|
|
c0bfa6ef79 | ||
|
|
7e961c2be9 | ||
|
|
c7c7f9bf1b | ||
|
|
7ce85cac31 | ||
|
|
2d836f485d | ||
|
|
1d3a9983cc | ||
|
|
cca4a8c821 | ||
|
|
6c8fd745a4 | ||
|
|
780253b786 | ||
|
|
710bb98575 | ||
|
|
466546bb40 | ||
|
|
1ff4605398 | ||
|
|
2ae6452cb5 | ||
|
|
00907fd7d1 | ||
|
|
1edf1f4c87 | ||
|
|
877cb9f6e6 | ||
|
|
d5387851d0 | ||
|
|
5d65ace970 | ||
|
|
2df024afc6 | ||
|
|
ca7b312761 | ||
|
|
ab8dd3788f | ||
|
|
bf1b550b7d | ||
|
|
705564108c | ||
|
|
d6bd619429 | ||
|
|
24d17a536c | ||
|
|
3df82e7540 | ||
|
|
d0a749ce4b | ||
|
|
081c80998c | ||
|
|
b23f63a064 | ||
|
|
8c62f10b74 | ||
|
|
e232b3ce30 | ||
|
|
17153bb4e9 | ||
|
|
4fb3e05124 | ||
|
|
329a45c06a | ||
|
|
1c82394a69 | ||
|
|
856081c80c | ||
|
|
6fff327864 | ||
|
|
e2879f8352 | ||
|
|
cd3a2e87fe | ||
|
|
523fe58f61 | ||
|
|
0347b4b4af | ||
|
|
04a303c8d2 | ||
|
|
0cfc16ec3b | ||
|
|
ea34af4abf | ||
|
|
0844bd62ea | ||
|
|
0f2e6fb8d5 | ||
|
|
43623e1089 | ||
|
|
940ce0c292 | ||
|
|
71f1f19334 | ||
|
|
f75548ad1e | ||
|
|
816dc47b17 | ||
|
|
442f8d1d3c | ||
|
|
caeec851d4 | ||
|
|
349f832bd3 | ||
|
|
a860648960 | ||
|
|
dd0ae1bbef | ||
|
|
9eb3ff6bdf | ||
|
|
062933af35 | ||
|
|
169573c32e | ||
|
|
b114247836 | ||
|
|
e7a7b2861e | ||
|
|
8f5c0833ff | ||
|
|
43655e6c04 | ||
|
|
fe6c80fe95 | ||
|
|
ff99a2ed40 | ||
|
|
2f52dfe96e | ||
|
|
93a7b96f16 | ||
|
|
f078b62c3e | ||
|
|
860716666b | ||
|
|
1e99ec30cb | ||
|
|
f476c39708 | ||
|
|
c02e507422 | ||
|
|
ece07e5fbb | ||
|
|
6bbe3dbd10 | ||
|
|
18bfc2a34e | ||
|
|
b774af9535 | ||
|
|
1f6a031630 | ||
|
|
719a5fca02 | ||
|
|
b4a0e4375a | ||
|
|
4d276d2fdf | ||
|
|
8797179cfb | ||
|
|
7cc38108aa | ||
|
|
365ced285e | ||
|
|
942472fc7e | ||
|
|
228369e95f | ||
|
|
97e5730fd9 | ||
|
|
3919b49000 | ||
|
|
7f13396e44 | ||
|
|
9ee086feaf | ||
|
|
6e42f1bc5d | ||
|
|
4ec0d60911 | ||
|
|
2583dc1279 | ||
|
|
a747d151ba | ||
|
|
ae3e5718e6 | ||
|
|
4f3c17cf69 | ||
|
|
97a49240ba | ||
|
|
a4be7c4a46 | ||
|
|
232d519445 | ||
|
|
ae7e2764e0 | ||
|
|
fbed11b380 | ||
|
|
afd815bb5d | ||
|
|
56d383a354 | ||
|
|
4d1b5f42af | ||
|
|
733023df03 | ||
|
|
9d9ce13753 | ||
|
|
975e7a76bf | ||
|
|
20ae23bd42 | ||
|
|
63cf429fa0 | ||
|
|
e953804239 | ||
|
|
3c0a9455a2 | ||
|
|
2aab4e2efe | ||
|
|
7d4418e2fe | ||
|
|
9dc4c49fbd | ||
|
|
6971245e05 | ||
|
|
12403d249f | ||
|
|
e2156f25e0 | ||
|
|
19ff25f171 | ||
|
|
a537833f75 | ||
|
|
61bf95e4e2 | ||
|
|
4551bb47fc | ||
|
|
77bf4bd9f0 | ||
|
|
357d3f3b6a | ||
|
|
b0bbfcab7f | ||
|
|
dfd2cc751c | ||
|
|
06801a5230 | ||
|
|
fb08014146 | ||
|
|
6981b16ecd | ||
|
|
278857d576 | ||
|
|
71fd747dca | ||
|
|
9c5c70fb32 | ||
|
|
54326af141 | ||
|
|
6020682ad1 | ||
|
|
7a6ce27497 | ||
|
|
6eb006175a | ||
|
|
8f90e91e99 | ||
|
|
644d5bbb1a | ||
|
|
3f45d54986 | ||
|
|
0d8dd8d280 | ||
|
|
bb6f00fd87 | ||
|
|
ecb51dc55d | ||
|
|
cbf4aeb859 | ||
|
|
b107bd2a5a | ||
|
|
a5e2c3f551 | ||
|
|
efffaeb359 | ||
|
|
144576cf36 | ||
|
|
fcf2be08d8 | ||
|
|
f2768c1414 | ||
|
|
a81c4ed6a0 | ||
|
|
26c4e7b527 | ||
|
|
2b5cd139f0 | ||
|
|
aa4c21d6df | ||
|
|
833ac0092c | ||
|
|
77a4fdb509 | ||
|
|
2ee5bd12cf | ||
|
|
d4aa4bc258 | ||
|
|
1ea042dd6e | ||
|
|
a14d37b0ad | ||
|
|
38e28af51e | ||
|
|
6dbe6cfd8c | ||
|
|
dd10a3ab5d | ||
|
|
8d428606a2 | ||
|
|
c156c1fb91 | ||
|
|
393a744091 | ||
|
|
8433a313f0 | ||
|
|
693fdf87bb | ||
|
|
5aac06f04e | ||
|
|
1d2fc60ba7 | ||
|
|
2b6c7e7b37 | ||
|
|
fc898d541f | ||
|
|
f83993b211 | ||
|
|
41433f8b2e | ||
|
|
91bd477f4f | ||
|
|
ef20f2da7e | ||
|
|
8a6e2a33a2 | ||
|
|
95f62de465 | ||
|
|
fbb140eff7 | ||
|
|
22483a285a | ||
|
|
eb1d122aec | ||
|
|
638e76cbd7 | ||
|
|
bb8625ede5 | ||
|
|
1d835d9859 | ||
|
|
40b4079924 | ||
|
|
6bb3cc45ef | ||
|
|
e97f6cfdbf | ||
|
|
d5f4385525 | ||
|
|
8094c1c4c5 | ||
|
|
98f8ab331a | ||
|
|
0e88418b12 | ||
|
|
276d03553c | ||
|
|
ef3ff6f1d5 | ||
|
|
caf9bdbc6f | ||
|
|
7b55213e6d | ||
|
|
d5ad15ed80 | ||
|
|
2e322bdf6d | ||
|
|
5fe98ab260 | ||
|
|
a958dd246b | ||
|
|
aa0fb058c6 | ||
|
|
7c22496c65 | ||
|
|
c5256d09e0 | ||
|
|
0cbdce6bd6 | ||
|
|
5ae6e83c7f | ||
|
|
180b8447f7 | ||
|
|
503ec126e1 | ||
|
|
76cdbaca22 | ||
|
|
09d7efe964 | ||
|
|
d3d1eb833e | ||
|
|
7dc3d7fc60 | ||
|
|
59897b78ff | ||
|
|
2d9fd4ea29 | ||
|
|
f4d6e91e72 | ||
|
|
be168e4034 | ||
|
|
213369f5d0 | ||
|
|
f7c2b9c197 | ||
|
|
89b7cf9be3 | ||
|
|
8aa909de2d | ||
|
|
3591f85a66 | ||
|
|
36e9edd654 | ||
|
|
9db4eba72b | ||
|
|
6ba5ad0325 | ||
|
|
76b16a8989 | ||
|
|
fe440bc0eb | ||
|
|
74a19741b4 | ||
|
|
f68e0846ba | ||
|
|
f2a2d5d43c | ||
|
|
fdb68c482e | ||
|
|
b51729bd2f | ||
|
|
aef1269223 | ||
|
|
f2fe1f7683 | ||
|
|
35dc8c31fa | ||
|
|
9fc1683ec7 | ||
|
|
519e1e4f4a | ||
|
|
3790c5edb2 | ||
|
|
2f10b1c7b1 | ||
|
|
c6c7f8234d | ||
|
|
c66ea88da8 | ||
|
|
e254e8edcc | ||
|
|
e6a1d5b1b9 | ||
|
|
9a8facd76b | ||
|
|
bc27a73600 | ||
|
|
ca7e0e4807 | ||
|
|
4ef5c9ad15 | ||
|
|
ddab82e52a | ||
|
|
8fe26e7c84 | ||
|
|
f97c7ee1da | ||
|
|
ad4fadb36a | ||
|
|
6acedb7dfd | ||
|
|
255354f27e | ||
|
|
050b244fa6 | ||
|
|
a3183bc33e | ||
|
|
f6caf627e1 | ||
|
|
ab6d8486fb | ||
|
|
b628b5f8ca | ||
|
|
ac4fe2db5a | ||
|
|
2f66d1a46c | ||
|
|
60c6cf7438 | ||
|
|
8dd2915d3c | ||
|
|
3c9b9e7bf5 | ||
|
|
a090650446 | ||
|
|
fa696a883d | ||
|
|
cbbf188637 | ||
|
|
dace0f9b10 | ||
|
|
a6e1ff0e63 | ||
|
|
4d3e65bdcd | ||
|
|
1dde588932 | ||
|
|
54d4d39ea1 | ||
|
|
7be6fc1780 | ||
|
|
2fdfda2804 | ||
|
|
a063952abe | ||
|
|
1a00121e60 | ||
|
|
8fffa6c92a | ||
|
|
64bd9dba9b | ||
|
|
0be2bde4cc | ||
|
|
d5662556bc | ||
|
|
f2767f4f8d | ||
|
|
71f53d908f | ||
|
|
26a10ca56e | ||
|
|
bda70352ca | ||
|
|
fbd45dbf50 | ||
|
|
7b8e48d80e | ||
|
|
b6c50df435 | ||
|
|
9b697d2c12 | ||
|
|
119ef0f8fa | ||
|
|
04f38324ba | ||
|
|
d1a952a6fc | ||
|
|
58a81c704b | ||
|
|
5c9f361903 | ||
|
|
fa27b6e24c | ||
|
|
fe647e99fc | ||
|
|
bbcaa7eaf2 | ||
|
|
ef9f6c5b4d | ||
|
|
424e115331 | ||
|
|
f1d7c0f2c9 | ||
|
|
3d96e3c142 | ||
|
|
1c1b2eb811 | ||
|
|
8cda50f179 | ||
|
|
3574ee177d | ||
|
|
427e792073 | ||
|
|
463481febe | ||
|
|
386b69f473 | ||
|
|
6e41923388 | ||
|
|
648e360842 | ||
|
|
17798f878a | ||
|
|
23ff14c34b | ||
|
|
d502f0825a | ||
|
|
96fe2b76bf | ||
|
|
a51a4ca9eb | ||
|
|
9dd8a1737c | ||
|
|
c97f74ccef | ||
|
|
e52df323f7 | ||
|
|
823f7c99d2 | ||
|
|
711f527efb | ||
|
|
af8f444166 | ||
|
|
2634430f67 | ||
|
|
70dc22aa9a | ||
|
|
806a923974 | ||
|
|
ad269ee147 | ||
|
|
cff7dbd015 | ||
|
|
4b4c2b97b7 | ||
|
|
afc0ace624 | ||
|
|
400e42cc2d | ||
|
|
59ee339497 | ||
|
|
9d22ea840e | ||
|
|
c53d0e1c5f | ||
|
|
02a8ae7f36 | ||
|
|
8a507d749a | ||
|
|
a4ff97d24b | ||
|
|
2850581611 | ||
|
|
3d3b9d1217 | ||
|
|
169cd78bbd | ||
|
|
405e2a1a03 | ||
|
|
d77c298ec6 | ||
|
|
2c6549b431 | ||
|
|
aba2ec9aac | ||
|
|
a8716d2949 | ||
|
|
59bc0c679c | ||
|
|
04bc8a85ca | ||
|
|
a9a0ecd76d | ||
|
|
902e3f4f53 | ||
|
|
228033f7a3 | ||
|
|
53ffc67850 | ||
|
|
59eb2e60d0 | ||
|
|
0ea11ac212 | ||
|
|
3e686a71e3 | ||
|
|
969dec8ad2 | ||
|
|
91fb8eea8c | ||
|
|
efbefaeb7c | ||
|
|
e7ebdb11be | ||
|
|
dd7481e99c | ||
|
|
fda003288d | ||
|
|
131a14ee2f | ||
|
|
45d2df1af7 | ||
|
|
5f3299e598 | ||
|
|
5217081567 | ||
|
|
389bad7d24 | ||
|
|
97b4b86ddf | ||
|
|
cc9bec7a19 | ||
|
|
8b494fb1bc | ||
|
|
6aa1297829 | ||
|
|
a91f2688f1 | ||
|
|
1c1bf37b33 | ||
|
|
ff3bb0aa8a | ||
|
|
76e36b95f7 | ||
|
|
74cf2320ad | ||
|
|
2a1c880673 | ||
|
|
5945849cb4 | ||
|
|
c65c6ebc38 | ||
|
|
19a9b4b064 | ||
|
|
1af88b2c35 | ||
|
|
3435a61413 | ||
|
|
a3b69600ef | ||
|
|
e3149c4f0b | ||
|
|
01841434ec | ||
|
|
f60edb055c | ||
|
|
ee3d106a36 | ||
|
|
0808c02c65 | ||
|
|
76390c94af | ||
|
|
9b41a069eb | ||
|
|
f0e2f561d5 | ||
|
|
dc1d5b778b | ||
|
|
224b92781f | ||
|
|
6f54a9d057 | ||
|
|
7906e571a8 | ||
|
|
74d0134ee3 | ||
|
|
91df0112c7 | ||
|
|
7998348bcb | ||
|
|
458817d5ad | ||
|
|
99bd988375 | ||
|
|
0a2f3e4d48 | ||
|
|
dfe5372db5 | ||
|
|
06290c6805 | ||
|
|
f2f509be0e | ||
|
|
01eb77c834 | ||
|
|
3cd95b4a6c | ||
|
|
80ab9201b3 | ||
|
|
1c6c058bba | ||
|
|
528cd89616 | ||
|
|
c179cfb93e | ||
|
|
f67228bacb | ||
|
|
af3d3e8cd3 | ||
|
|
878439065c | ||
|
|
acf17f9b82 | ||
|
|
1adf1f1bef | ||
|
|
af57cf5e96 | ||
|
|
d59ba818f0 | ||
|
|
9afa304817 | ||
|
|
9aa2dd1ae6 | ||
|
|
847640333a | ||
|
|
f3abe70838 | ||
|
|
fe4a852e78 | ||
|
|
28e4a3b7e8 | ||
|
|
d7103fdef3 | ||
|
|
f822f0436e | ||
|
|
0b06c48ed0 | ||
|
|
190d862552 | ||
|
|
a8ab279cb8 | ||
|
|
feeb59de23 | ||
|
|
6af0f619c9 | ||
|
|
2ec3f79146 | ||
|
|
e26dab84f6 | ||
|
|
4886a4e749 | ||
|
|
54cf5f3c7a | ||
|
|
5f2f53a0a6 | ||
|
|
7f0c92504f | ||
|
|
961a012502 | ||
|
|
139017546d | ||
|
|
5d12cc1ded | ||
|
|
e32c88e14c | ||
|
|
3d405910e7 | ||
|
|
2779daee32 | ||
|
|
a0ba4a8563 | ||
|
|
6a4d4d7028 | ||
|
|
e56ab297b1 | ||
|
|
ad303fb943 | ||
|
|
89da5d1ef5 | ||
|
|
82f25bacf2 | ||
|
|
b2d9f9a2d8 | ||
|
|
9cb4eafad4 | ||
|
|
ff40a68215 | ||
|
|
926b3725a1 | ||
|
|
2fe50c5edd | ||
|
|
3e92ae0f48 | ||
|
|
c112d27ab5 | ||
|
|
a539e3d66e | ||
|
|
5cc9f4df0b | ||
|
|
446029c1ba | ||
|
|
fd297999b8 | ||
|
|
fba56df765 | ||
|
|
0d45eeac56 | ||
|
|
e2fcd25039 | ||
|
|
2436d84370 | ||
|
|
8281634131 | ||
|
|
b346cde919 | ||
|
|
eca67cec4c | ||
|
|
14439d2b12 | ||
|
|
4a64d4d133 | ||
|
|
5418d8c367 | ||
|
|
9421ac13d8 | ||
|
|
d2b1115f46 | ||
|
|
9282a73663 | ||
|
|
55e5dee7ab | ||
|
|
799a4d80cd | ||
|
|
fe6e6909e6 | ||
|
|
e477fdfd6d | ||
|
|
a921455836 | ||
|
|
beadec32b8 | ||
|
|
6a06a4bf98 | ||
|
|
693ce7b952 | ||
|
|
ce725ceec3 | ||
|
|
ecf25d1284 | ||
|
|
0a73be7389 | ||
|
|
a9d981dce1 | ||
|
|
034a28710e | ||
|
|
7b16601399 | ||
|
|
a69947ba51 | ||
|
|
50e99fb6c1 | ||
|
|
ea4ea3d1c1 | ||
|
|
1f8171d069 | ||
|
|
9fea9816bd | ||
|
|
6a32b18ca9 | ||
|
|
9ebf8651b4 | ||
|
|
aa389c82a1 | ||
|
|
c577fbd772 | ||
|
|
8467485aec | ||
|
|
dc6dee3f4e | ||
|
|
a3c96c2f44 | ||
|
|
3b6b3f6ef6 | ||
|
|
eb694ab5d5 | ||
|
|
fdb6cf9b57 | ||
|
|
fa2084330b | ||
|
|
286444a2ec | ||
|
|
55b6134be1 | ||
|
|
3da55ad7a4 | ||
|
|
5374d07c4d | ||
|
|
b62619ae3a | ||
|
|
dc0fc94c13 | ||
|
|
548b471b8a | ||
|
|
fa92766095 | ||
|
|
9980ca3b7e | ||
|
|
72be10f9a5 | ||
|
|
773d561361 | ||
|
|
ab301aa4fe | ||
|
|
2bb0a602e4 | ||
|
|
363771a5c7 | ||
|
|
bdf2b2019b | ||
|
|
f2125e5f64 | ||
|
|
294b031fa4 | ||
|
|
4a98300c59 | ||
|
|
0f1d14437c | ||
|
|
79754bded2 | ||
|
|
90da16432f | ||
|
|
f074c5ee89 | ||
|
|
edd86fd358 | ||
|
|
067a519b37 | ||
|
|
32f2f711db | ||
|
|
c1d4ff6239 | ||
|
|
e36c3dd668 | ||
|
|
1c7c62cf8a | ||
|
|
d215607e55 | ||
|
|
ff329df808 | ||
|
|
c9858b5e6b | ||
|
|
565d51009d | ||
|
|
c6b74b2ba7 | ||
|
|
090fbbf18c | ||
|
|
cdea2debc9 | ||
|
|
4c49d4af7e | ||
|
|
46f6bd6d08 | ||
|
|
b3dcbfe2a4 | ||
|
|
a92b20d2bb | ||
|
|
8a27449595 | ||
|
|
a3781e2ffc | ||
|
|
f973924fbf | ||
|
|
9547f53e88 | ||
|
|
c30ee6c166 | ||
|
|
29d1959b81 | ||
|
|
878bc15229 | ||
|
|
e39ce36f1c | ||
|
|
b4648f1df9 | ||
|
|
f4adc0ea86 | ||
|
|
7f6d3ccb36 | ||
|
|
658cbf5631 | ||
|
|
b400098296 | ||
|
|
c3f875bf65 | ||
|
|
5db5ca7056 | ||
|
|
f0eb843138 | ||
|
|
d44c27ec63 | ||
|
|
0625a6906c | ||
|
|
4d28d5e4d2 | ||
|
|
08b938982b | ||
|
|
6ee290a9af | ||
|
|
4da7a7797e | ||
|
|
6e831920bf | ||
|
|
eca8d85446 | ||
|
|
1db3c86b68 | ||
|
|
0c599521b1 | ||
|
|
e40fba7679 | ||
|
|
6b82873737 | ||
|
|
3edfa8cb88 | ||
|
|
b577869ed6 | ||
|
|
5c14b4c833 | ||
|
|
dd08305aa7 | ||
|
|
1395c11c29 | ||
|
|
ef48f6a061 | ||
|
|
09ddfae1d9 | ||
|
|
ad9cd1933a | ||
|
|
0f515784d8 | ||
|
|
7d3e53f3e4 | ||
|
|
dc1bec79ed | ||
|
|
d472380fef | ||
|
|
eea0160dd4 | ||
|
|
1ae429dc61 | ||
|
|
fc265055df | ||
|
|
917252d7d0 | ||
|
|
4f9752bb3e | ||
|
|
6391dec5de | ||
|
|
1ba414bd77 | ||
|
|
6f2438436c | ||
|
|
2515dc4c06 | ||
|
|
2c36e6534c | ||
|
|
fe27ca359c | ||
|
|
bf7425bae4 | ||
|
|
558b16275d | ||
|
|
a069738c20 | ||
|
|
aef5a7b428 | ||
|
|
4bed8d4ed4 | ||
|
|
f4a6b90e8b | ||
|
|
36b6a71af4 | ||
|
|
7c3827a9a4 | ||
|
|
d17f210024 | ||
|
|
28631e7791 | ||
|
|
28839fbab2 | ||
|
|
0716519be9 | ||
|
|
068f758f49 | ||
|
|
e2c5ae53e7 | ||
|
|
473172ca8b | ||
|
|
47fdb3b99a | ||
|
|
143d3a3203 | ||
|
|
66471c2f13 | ||
|
|
7f6b15271a | ||
|
|
cbb0f1e11d | ||
|
|
25caa6d1be | ||
|
|
d551c8e1d0 | ||
|
|
2fd2bafdfa | ||
|
|
e236dedc32 | ||
|
|
7d2f7ae9e1 | ||
|
|
ed9c69ec61 | ||
|
|
b6a40094a6 | ||
|
|
de15d6d2c1 | ||
|
|
143cb142bc | ||
|
|
d3e93dd106 | ||
|
|
56c5938898 | ||
|
|
d44ab1ace5 | ||
|
|
ae028d9c1d | ||
|
|
cbd01d4ff4 | ||
|
|
65645face1 | ||
|
|
cd3851c3d5 | ||
|
|
2f98e6aaaf | ||
|
|
9afc9d92d9 | ||
|
|
a8e501b3cf | ||
|
|
5727d4eb8a | ||
|
|
fed65122fe | ||
|
|
86a9d4c6a4 | ||
|
|
0a180dc662 | ||
|
|
f9303ca2e4 | ||
|
|
4c25fe978a |
0
.bazelignore
Normal file
0
.bazelignore
Normal file
13
.bazelrc
13
.bazelrc
@@ -39,16 +39,15 @@ build --define kafka_enabled=false
|
||||
test --define kafka_enabled=false
|
||||
run --define kafka_enabled=false
|
||||
|
||||
# Enable blst by default, we use a config so that our fuzzer stops complaining.
|
||||
build --config=blst_enabled
|
||||
test --config=blst_enabled
|
||||
run --config=blst_enabled
|
||||
build --define blst_disabled=false
|
||||
test --define blst_disabled=false
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:kafka_enabled --define kafka_enabled=true
|
||||
build:kafka_enabled --define gotags=kafka_enabled
|
||||
|
||||
build:blst_enabled --define blst_enabled=true
|
||||
build:blst_enabled --define gotags=blst_enabled
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
build:blst_disabled --define gotags=blst_disabled
|
||||
|
||||
# Release flags
|
||||
build:release --compilation_mode=opt
|
||||
@@ -84,7 +83,7 @@ build:fuzz --linkopt -Wl,--no-as-needed
|
||||
build:fuzz --define=gc_goopts=-d=libfuzzer,checkptr
|
||||
build:fuzz --run_under=//tools:fuzz_wrapper
|
||||
build:fuzz --compilation_mode=opt
|
||||
build:fuzz --define=blst_enabled=false
|
||||
build:fuzz --define=blst_disabled=true
|
||||
|
||||
test:fuzz --local_test_jobs="HOST_CPUS*.5"
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# across machines, developers, and workspaces.
|
||||
#
|
||||
# This config is loaded from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/latest.bazelrc
|
||||
build:remote-cache --remote_timeout=3600
|
||||
#build:remote-cache --remote_timeout=3600
|
||||
#build:remote-cache --spawn_strategy=standalone
|
||||
#build:remote-cache --strategy=Javac=standalone
|
||||
#build:remote-cache --strategy=Closure=standalone
|
||||
@@ -11,11 +11,18 @@ build:remote-cache --remote_timeout=3600
|
||||
# Prysm specific remote-cache properties.
|
||||
#build:remote-cache --disk_cache=
|
||||
build:remote-cache --remote_download_minimal
|
||||
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
|
||||
build:remote-cache --remote_local_fallback
|
||||
# Enforce stricter environment rules, which eliminates some non-hermetic
|
||||
# behavior and therefore improves both the remote cache hit rate and the
|
||||
# correctness and repeatability of the build.
|
||||
build:remote-cache --incompatible_strict_action_env=true
|
||||
|
||||
# Import workspace options.
|
||||
import %workspace%/.bazelrc
|
||||
|
||||
startup --host_jvm_args=-Xmx1000m --host_jvm_args=-Xms1000m
|
||||
startup --host_jvm_args=-Xmx2g --host_jvm_args=-Xms2g
|
||||
query --repository_cache=/tmp/repositorycache
|
||||
query --experimental_repository_cache_hardlinks
|
||||
build --repository_cache=/tmp/repositorycache
|
||||
|
||||
@@ -30,4 +30,4 @@ comment:
|
||||
|
||||
ignore:
|
||||
- "**/*.pb.go"
|
||||
- "**/*_mock.go"
|
||||
- "**/testing/**/*"
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
version = 1
|
||||
|
||||
exclude_patterns = [
|
||||
"tools/analyzers/**",
|
||||
"validator/keymanager/remote/keymanager_test.go",
|
||||
"validator/web/site_data.go"
|
||||
]
|
||||
|
||||
[[analyzers]]
|
||||
name = "go"
|
||||
enabled = true
|
||||
|
||||
[analyzers.meta]
|
||||
import_paths = ["github.com/prysmaticlabs/prysm"]
|
||||
[analyzers.meta]
|
||||
import_paths = ["github.com/prysmaticlabs/prysm"]
|
||||
|
||||
[[analyzers]]
|
||||
name = "test-coverage"
|
||||
@@ -14,3 +20,7 @@ enabled = true
|
||||
[[analyzers]]
|
||||
name = "shell"
|
||||
enabled = true
|
||||
|
||||
[[analyzers]]
|
||||
name = "secrets"
|
||||
enabled = true
|
||||
|
||||
8
.github/workflows/go.yml
vendored
8
.github/workflows/go.yml
vendored
@@ -24,6 +24,12 @@ jobs:
|
||||
uses: ./.github/actions/gofmt
|
||||
with:
|
||||
path: ./
|
||||
|
||||
- name: GoImports checker
|
||||
id: goimports
|
||||
uses: Jerome1337/goimports-action@v1.0.2
|
||||
with:
|
||||
goimports-path: ./
|
||||
|
||||
build:
|
||||
name: Build
|
||||
@@ -44,7 +50,7 @@ jobs:
|
||||
|
||||
- name: Build
|
||||
# Use blst tag to allow go and bazel builds for blst.
|
||||
run: go build -v --tags=blst_enabled ./...
|
||||
run: go build -v ./...
|
||||
|
||||
# Tests run via Bazel for now...
|
||||
# - name: Test
|
||||
|
||||
29
BUILD.bazel
29
BUILD.bazel
@@ -14,6 +14,7 @@ exports_files([
|
||||
|
||||
# gazelle:prefix github.com/prysmaticlabs/prysm
|
||||
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
|
||||
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
|
||||
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
|
||||
gazelle(
|
||||
name = "gazelle",
|
||||
@@ -23,7 +24,16 @@ gazelle(
|
||||
# Protobuf compiler (non-gRPC)
|
||||
alias(
|
||||
name = "proto_compiler",
|
||||
actual = "@io_bazel_rules_go//proto:gogofast_proto",
|
||||
actual = "@io_bazel_rules_go//proto:go_proto",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
)
|
||||
|
||||
# Cast protobuf compiler (non-gRPC)
|
||||
alias(
|
||||
name = "cast_proto_compiler",
|
||||
actual = "@com_github_prysmaticlabs_protoc_gen_go_cast//:go_cast",
|
||||
visibility = [
|
||||
"//proto:__subpackages__",
|
||||
],
|
||||
@@ -32,21 +42,21 @@ alias(
|
||||
# Protobuf compiler (gRPC)
|
||||
alias(
|
||||
name = "grpc_proto_compiler",
|
||||
actual = "@io_bazel_rules_go//proto:gogofast_grpc",
|
||||
actual = "@io_bazel_rules_go//proto:go_grpc",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Protobuf gRPC compiler without gogoproto. Required for gRPC gateway.
|
||||
# Cast protobuf compiler (gRPC)
|
||||
alias(
|
||||
name = "grpc_nogogo_proto_compiler",
|
||||
actual = "@io_bazel_rules_go//proto:go_grpc",
|
||||
name = "cast_grpc_proto_compiler",
|
||||
actual = "@com_github_prysmaticlabs_protoc_gen_go_cast//:go_cast_grpc",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Protobuf gRPC gateway compiler
|
||||
alias(
|
||||
name = "grpc_gateway_proto_compiler",
|
||||
actual = "@com_github_grpc_ecosystem_grpc_gateway//protoc-gen-grpc-gateway:go_gen_grpc_gateway",
|
||||
actual = "@com_github_grpc_ecosystem_grpc_gateway_v2//protoc-gen-grpc-gateway:go_gen_grpc_gateway",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
@@ -111,6 +121,7 @@ nogo(
|
||||
"//tools/analyzers/shadowpredecl:go_tool_library",
|
||||
"//tools/analyzers/nop:go_tool_library",
|
||||
"//tools/analyzers/slicedirect:go_tool_library",
|
||||
"//tools/analyzers/interfacechecker:go_tool_library",
|
||||
"//tools/analyzers/ineffassign:go_tool_library",
|
||||
"//tools/analyzers/properpermissions:go_tool_library",
|
||||
] + select({
|
||||
@@ -147,3 +158,9 @@ string_setting(
|
||||
"libfuzzer",
|
||||
],
|
||||
)
|
||||
|
||||
sh_binary(
|
||||
name = "prysm_sh",
|
||||
srcs = ["prysm.sh"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
@@ -175,7 +175,7 @@ We consider two types of contributions to our repo and categorize them as follow
|
||||
|
||||
### Part-Time Contributors
|
||||
|
||||
Anyone can become a part-time contributor and help out on implementing eth2. The responsibilities of a part-time contributor include:
|
||||
Anyone can become a part-time contributor and help out on implementing Ethereum consensus. The responsibilities of a part-time contributor include:
|
||||
|
||||
- Engaging in Gitter conversations, asking the questions on how to begin contributing to the project
|
||||
- Opening up github issues to express interest in code to implement
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Prysm Client Interoperability Guide
|
||||
|
||||
This README details how to setup Prysm for interop testing for usage with other Ethereum 2.0 clients.
|
||||
This README details how to setup Prysm for interop testing for usage with other Ethereum consensus clients.
|
||||
|
||||
## Installation & Setup
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# Prysm: An Ethereum 2.0 Implementation Written in Go
|
||||
# Prysm: An Ethereum Consensus Implementation Written in Go
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/eth2.0-specs/tree/v1.0.0)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum 2.0](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
### Getting Started
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ Effective as of Oct 14, 2020
|
||||
By downloading, accessing or using the Prysm implementation (“Prysm”), you (referenced herein as “you” or the “user”) certify that you have read and agreed to the terms and conditions below (the “Terms”) which form a binding contract between you and Prysmatic Labs (referenced herein as “we” or “us”). If you do not agree to the Terms, do not download or use Prysm.
|
||||
|
||||
### About Prysm
|
||||
Prysm is a client implementation for Ethereum 2.0 Phase 0 protocol for a proof-of-stake blockchain. To participate in the network, a user must send ETH from the Eth1.0 chain into a validator deposit contract, which will queue in the user as a validator in the system. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
|
||||
Prysm is a client implementation for Ethereum consensus protocol for a proof-of-stake blockchain. To participate in the network, a user must send ETH from the Eth1.0 chain into a validator deposit contract, which will queue in the user as a validator in the system. Validators participate in proposing and voting on blocks in the protocol, and the network applies rewards/penalties based on their behavior. A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the official documentation portal, however, we do not warrant the accuracy, completeness or usefulness of this documentation. Any reliance you place on such information is strictly at your own risk.
|
||||
|
||||
### Licensing Terms
|
||||
Prysm is a fully open-source software program licensed pursuant to the GNU General Public License v3.0.
|
||||
@@ -13,7 +13,7 @@ Prysm is a fully open-source software program licensed pursuant to the GNU Gener
|
||||
The Prysmatic Labs name, the term “Prysm” and all related names, logos, product and service names, designs and slogans are trademarks of Prysmatic Labs or its affiliates and/or licensors. You must not use such marks without our prior written permission.
|
||||
|
||||
### Risks of Operating Prysm
|
||||
The use of Prysm and acting as a validator on the Ethereum 2.0 network can lead to loss of money. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm including the security of your ETH and meeting any applicable minimum system requirements.
|
||||
The use of Prysm and acting as a validator on the Ethereum network can lead to loss of money. Ethereum is still an experimental system and ETH remains a risky investment. You alone are responsible for your actions on Prysm including the security of your ETH and meeting any applicable minimum system requirements.
|
||||
|
||||
Use of Prysm and the ability to receive rewards or penalties may be affected at any time by mistakes made by the user or other users, software problems such as bugs, errors, incorrectly constructed transactions, unsafe cryptographic libraries or malware affecting the network, technical failures in the hardware of a user, security problems experienced by a user and/or actions or inactions of third parties and/or events experienced by third parties, among other risks. We cannot and do not guarantee that any user of Prysm will make money, that the Prysm network will operate in accordance with the documentation or that transactions will be effective or secure.
|
||||
|
||||
|
||||
181
WORKSPACE
181
WORKSPACE
@@ -15,9 +15,9 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "com_grail_bazel_toolchain",
|
||||
sha256 = "b924b102adc0c3368d38a19bd971cb4fa75362a27bc363d0084b90ca6877d3f0",
|
||||
strip_prefix = "bazel-toolchain-0.5.7",
|
||||
urls = ["https://github.com/grailbio/bazel-toolchain/archive/0.5.7.tar.gz"],
|
||||
sha256 = "040b9d00b8a03e8a28e38159ad0f2d0e0de625d93f453a9f226971a8c47e757b",
|
||||
strip_prefix = "bazel-toolchain-5f82830f9d6a1941c3eb29683c1864ccf2862454",
|
||||
urls = ["https://github.com/grailbio/bazel-toolchain/archive/5f82830f9d6a1941c3eb29683c1864ccf2862454.tar.gz"],
|
||||
)
|
||||
|
||||
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
|
||||
@@ -60,10 +60,10 @@ bazel_skylib_workspace()
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "1f4fc1d91826ec436ae04833430626f4cc02c20bb0a813c0c2f3c4c421307b1d",
|
||||
strip_prefix = "bazel-gazelle-e368a11b76e92932122d824970dc0ce5feb9c349",
|
||||
sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
|
||||
urls = [
|
||||
"https://github.com/bazelbuild/bazel-gazelle/archive/e368a11b76e92932122d824970dc0ce5feb9c349.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -76,9 +76,9 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_docker",
|
||||
sha256 = "1286175a94c0b1335efe1d75d22ea06e89742557d3fac2a0366f242a6eac6f5a",
|
||||
strip_prefix = "rules_docker-ba4310833230294fa69b7d6ea1787ac684631a7d",
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/archive/ba4310833230294fa69b7d6ea1787ac684631a7d.tar.gz"],
|
||||
sha256 = "59d5b42ac315e7eadffa944e86e90c2990110a1c8075f1cd145f487e999d22b3",
|
||||
strip_prefix = "rules_docker-0.17.0",
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.17.0/rules_docker-v0.17.0.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -88,11 +88,13 @@ http_archive(
|
||||
# Required until https://github.com/bazelbuild/rules_go/pull/2450 merges otherwise nilness
|
||||
# nogo check fails for certain third_party dependencies.
|
||||
"//third_party:io_bazel_rules_go.patch",
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "dbf5a9ef855684f84cac2e7ae7886c5a001d4f66ae23f6904da0faaaef0d61fc",
|
||||
sha256 = "7c10271940c6bce577d51a075ae77728964db285dac0a46614a7934dc34303e6",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.24.11/rules_go-v0.24.11.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.24.11/rules_go-v0.24.11.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.26.0/rules_go-v0.26.0.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.26.0/rules_go-v0.26.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -156,40 +158,10 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.15.7",
|
||||
go_version = "1.16.4",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
|
||||
|
||||
gazelle_dependencies()
|
||||
|
||||
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
|
||||
|
||||
go_embed_data_dependencies()
|
||||
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
|
||||
|
||||
gometalinter_dependencies()
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_docker//go:image.bzl",
|
||||
_go_image_repos = "repositories",
|
||||
)
|
||||
|
||||
# Golang images
|
||||
# This is using gcr.io/distroless/base
|
||||
_go_image_repos()
|
||||
|
||||
# CC images
|
||||
# This is using gcr.io/distroless/base
|
||||
load(
|
||||
"@io_bazel_rules_docker//cc:image.bzl",
|
||||
_cc_image_repos = "repositories",
|
||||
)
|
||||
|
||||
_cc_image_repos()
|
||||
|
||||
http_archive(
|
||||
name = "prysm_testnet_site",
|
||||
build_file_content = """
|
||||
@@ -225,20 +197,22 @@ filegroup(
|
||||
url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||
)
|
||||
|
||||
eth2_spec_version = "v1.1.0-beta.1"
|
||||
|
||||
http_archive(
|
||||
name = "eth2_spec_tests_general",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.ssz",
|
||||
"**/*.ssz_snappy",
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "ef5396e4b13995da9776eeb5ae346a2de90970c28da3c4f0dcaa4ab9f0ad1f93",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.0.0/general.tar.gz",
|
||||
sha256 = "e9b4cc60a3e676c6b4a9348424e44cff1ebada603ffb31b0df600dbd70e7fbf6",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/%s/general.tar.gz" % eth2_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -247,14 +221,14 @@ http_archive(
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.ssz",
|
||||
"**/*.ssz_snappy",
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "170551b441e7d54b73248372ad9ce8cb6c148810b5f1364637117a63f4f1c085",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.0.0/minimal.tar.gz",
|
||||
sha256 = "cf82dc729ffe7b924f852e57d1973e1a6377c5b52acc903c953277fa9b4e6de8",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/%s/minimal.tar.gz" % eth2_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -263,38 +237,46 @@ http_archive(
|
||||
filegroup(
|
||||
name = "test_data",
|
||||
srcs = glob([
|
||||
"**/*.ssz",
|
||||
"**/*.ssz_snappy",
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "b541a9979b4703fa5ee5d2182b0b5313c38efc54ae7eaec2eef793230a52ec83",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/v1.0.0/mainnet.tar.gz",
|
||||
sha256 = "6c6792375b81858037014e282d28a64b0cf12e12daf16054265c85403b8b329f",
|
||||
url = "https://github.com/ethereum/eth2.0-spec-tests/releases/download/%s/mainnet.tar.gz" % eth2_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eth2_spec",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "spec_data",
|
||||
srcs = glob([
|
||||
"**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "16094dad1bab4e8ab3adb60c10e311cd1e294cd7bbf5a89505f24bebd3d0e513",
|
||||
strip_prefix = "eth2.0-specs-" + eth2_spec_version[1:],
|
||||
url = "https://github.com/ethereum/eth2.0-specs/archive/refs/tags/%s.tar.gz" % eth2_spec_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_github_bazelbuild_buildtools",
|
||||
sha256 = "b5d7dbc6832f11b6468328a376de05959a1a9e4e9f5622499d3bab509c26b46a",
|
||||
strip_prefix = "buildtools-bf564b4925ab5876a3f64d8b90fab7f769013d42",
|
||||
url = "https://github.com/bazelbuild/buildtools/archive/bf564b4925ab5876a3f64d8b90fab7f769013d42.zip",
|
||||
sha256 = "7a182df18df1debabd9e36ae07c8edfa1378b8424a04561b674d933b965372b3",
|
||||
strip_prefix = "buildtools-f2aed9ee205d62d45c55cfabbfd26342f8526862",
|
||||
url = "https://github.com/bazelbuild/buildtools/archive/f2aed9ee205d62d45c55cfabbfd26342f8526862.zip",
|
||||
)
|
||||
|
||||
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
|
||||
|
||||
buildifier_dependencies()
|
||||
|
||||
git_repository(
|
||||
name = "com_google_protobuf",
|
||||
commit = "fde7cf7358ec7cd69e8db9be4f1fa6a5c431386a", # v3.13.0
|
||||
commit = "436bd7880e458532901c58f4d9d1ea23fa7edd52",
|
||||
remote = "https://github.com/protocolbuffers/protobuf",
|
||||
shallow_since = "1597443653 -0700",
|
||||
shallow_since = "1617835118 -0700",
|
||||
)
|
||||
|
||||
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
|
||||
|
||||
protobuf_deps()
|
||||
|
||||
# Group the sources of the library so that CMake rule have access to it
|
||||
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
|
||||
|
||||
@@ -331,33 +313,6 @@ http_archive(
|
||||
|
||||
# External dependencies
|
||||
|
||||
http_archive(
|
||||
name = "sszgen", # Hack because we don't want to build this binary with libfuzzer, but need it to build.
|
||||
build_file_content = """
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_binary")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"sszgen/main.go",
|
||||
"sszgen/marshal.go",
|
||||
"sszgen/size.go",
|
||||
"sszgen/unmarshal.go",
|
||||
],
|
||||
importpath = "github.com/ferranbt/fastssz/sszgen",
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "sszgen",
|
||||
embed = [":go_default_library"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
strip_prefix = "fastssz-06015a5d84f9e4eefe2c21377ca678fa8f1a1b09",
|
||||
urls = ["https://github.com/ferranbt/fastssz/archive/06015a5d84f9e4eefe2c21377ca678fa8f1a1b09.tar.gz"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "prysm_web_ui",
|
||||
build_file_content = """
|
||||
@@ -367,9 +322,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "edb80f3a695d84f6000f0e05abf7a4bbf207c03abb91219780ec97e7d6ad21c8",
|
||||
sha256 = "54ce527b83d092da01127f2e3816f4d5cfbab69354caba8537f1ea55889b6d7c",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.3/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.4/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -382,8 +337,46 @@ load("@prysm//third_party/herumi:herumi.bzl", "bls_dependencies")
|
||||
|
||||
bls_dependencies()
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_docker//go:image.bzl",
|
||||
_go_image_repos = "repositories",
|
||||
)
|
||||
|
||||
# Golang images
|
||||
# This is using gcr.io/distroless/base
|
||||
_go_image_repos()
|
||||
|
||||
# CC images
|
||||
# This is using gcr.io/distroless/base
|
||||
load(
|
||||
"@io_bazel_rules_docker//cc:image.bzl",
|
||||
_cc_image_repos = "repositories",
|
||||
)
|
||||
|
||||
_cc_image_repos()
|
||||
|
||||
load("@com_github_ethereum_go_ethereum//:deps.bzl", "geth_dependencies")
|
||||
|
||||
geth_dependencies()
|
||||
|
||||
load("@io_bazel_rules_go//extras:embed_data_deps.bzl", "go_embed_data_dependencies")
|
||||
|
||||
go_embed_data_dependencies()
|
||||
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
|
||||
|
||||
gometalinter_dependencies()
|
||||
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
|
||||
|
||||
gazelle_dependencies()
|
||||
|
||||
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
|
||||
|
||||
buildifier_dependencies()
|
||||
|
||||
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
|
||||
|
||||
protobuf_deps()
|
||||
|
||||
# Do NOT add new go dependencies here! Refer to DEPENDENCIES.md!
|
||||
|
||||
4
bazel.sh
4
bazel.sh
@@ -6,6 +6,6 @@
|
||||
|
||||
env -i \
|
||||
PATH=/usr/bin:/bin \
|
||||
HOME=$HOME \
|
||||
GOOGLE_APPLICATION_CREDENTIALS=$GOOGLE_APPLICATION_CREDENTIALS \
|
||||
HOME="$HOME" \
|
||||
GOOGLE_APPLICATION_CREDENTIALS="$GOOGLE_APPLICATION_CREDENTIALS" \
|
||||
bazel "$@"
|
||||
|
||||
@@ -1,138 +1,19 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_test")
|
||||
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle", "container_image")
|
||||
load("//tools:go_image.bzl", "go_image_alpine", "go_image_debug")
|
||||
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
|
||||
load("//tools:target_migration.bzl", "moved_targets")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"log.go",
|
||||
"main.go",
|
||||
"usage.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/node:go_default_library",
|
||||
"//shared/cmd:go_default_library",
|
||||
"//shared/debug:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/fileutil:go_default_library",
|
||||
"//shared/journald:go_default_library",
|
||||
"//shared/logutil:go_default_library",
|
||||
"//shared/maxprocs:go_default_library",
|
||||
"//shared/tos:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//log:go_default_library",
|
||||
"@com_github_ipfs_go_log_v2//:go_default_library",
|
||||
"@com_github_joonix_log//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
"@com_github_x_cray_logrus_prefixed_formatter//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_image(
|
||||
name = "image",
|
||||
base = select({
|
||||
"//tools:base_image_alpine": "//tools:alpine_cc_image",
|
||||
"//tools:base_image_cc": "//tools:cc_image",
|
||||
"//conditions:default": "//tools:cc_image",
|
||||
}),
|
||||
binary = ":beacon-chain",
|
||||
tags = ["manual"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
container_image(
|
||||
name = "image_with_creation_time",
|
||||
base = "image",
|
||||
stamp = True,
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest": ":image_with_creation_time",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest": ":image_with_creation_time",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
|
||||
},
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
go_image_debug(
|
||||
name = "image_debug",
|
||||
image = ":image",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle_debug",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-debug": ":image_debug",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-debug": ":image_debug",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest-debug": ":image_debug",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}-debug": ":image_debug",
|
||||
},
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
go_image_alpine(
|
||||
name = "image_alpine",
|
||||
image = ":image",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle_alpine",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest-alpine": ":image_alpine",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}-alpine": ":image_alpine",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest-alpine": ":image_alpine",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}-alpine": ":image_alpine",
|
||||
},
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images",
|
||||
bundle = ":image_bundle",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images_debug",
|
||||
bundle = ":image_bundle_debug",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
docker_push(
|
||||
name = "push_images_alpine",
|
||||
bundle = ":image_bundle_alpine",
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "beacon-chain",
|
||||
embed = [":go_default_library"],
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//endtoend:__pkg__",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["usage_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
moved_targets(
|
||||
[
|
||||
":push_images_debug",
|
||||
":push_images_alpine",
|
||||
":push_images",
|
||||
":image_bundle_debug",
|
||||
":image_debug",
|
||||
":image_bundle_alpine",
|
||||
":image_bundle",
|
||||
":image_with_creation_time",
|
||||
":image_alpine",
|
||||
":image",
|
||||
":go_default_test",
|
||||
":beacon-chain",
|
||||
],
|
||||
"//cmd/beacon-chain",
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Prysmatic Labs Beacon Chain Implementation
|
||||
|
||||
This is the main project folder for the beacon chain implementation of eth2 written in Go by [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
This is the main project folder for the beacon chain implementation of Ethereum written in Go by [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
|
||||
You can also read our main [README](https://github.com/prysmaticlabs/prysm/blob/master/README.md) and join our active chat room on Discord.
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"chain_info.go",
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
"info.go",
|
||||
"init_sync_process_block.go",
|
||||
"log.go",
|
||||
@@ -27,7 +27,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
@@ -35,7 +35,6 @@ go_library(
|
||||
"//beacon-chain/core/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
@@ -45,21 +44,28 @@ go_library(
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/copyutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/mputil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/slotutil:go_default_library",
|
||||
"//shared/timeutils:go_default_library",
|
||||
"//shared/traceutil:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_emicklei_dot//:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
@@ -82,6 +88,7 @@ go_test(
|
||||
"checktags_test.go",
|
||||
"head_test.go",
|
||||
"info_test.go",
|
||||
"init_test.go",
|
||||
"metrics_test.go",
|
||||
"process_attestation_test.go",
|
||||
"process_block_test.go",
|
||||
@@ -103,8 +110,9 @@ go_test(
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//proto/beacon/db:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
@@ -114,11 +122,10 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@in_gopkg_d4l3k_messagediff_v1//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_x_net//context:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -128,6 +135,7 @@ go_test(
|
||||
srcs = [
|
||||
"chain_info_norace_test.go",
|
||||
"checktags_test.go",
|
||||
"init_test.go",
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
],
|
||||
@@ -152,7 +160,8 @@ go_test(
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
@@ -162,10 +171,9 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_x_net//context:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,15 +4,16 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/copyutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ChainInfoFetcher defines a common interface for methods in blockchain service which
|
||||
@@ -22,15 +23,18 @@ type ChainInfoFetcher interface {
|
||||
FinalizationFetcher
|
||||
GenesisFetcher
|
||||
CanonicalFetcher
|
||||
ForkFetcher
|
||||
TimeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Eth2 data that's related to time.
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
type TimeFetcher interface {
|
||||
GenesisTime() time.Time
|
||||
CurrentSlot() uint64
|
||||
CurrentSlot() types.Slot
|
||||
}
|
||||
|
||||
// GenesisFetcher retrieves the eth2 data related to its genesis.
|
||||
// GenesisFetcher retrieves the Ethereum consensus data related to its genesis.
|
||||
type GenesisFetcher interface {
|
||||
GenesisValidatorRoot() [32]byte
|
||||
}
|
||||
@@ -38,20 +42,25 @@ type GenesisFetcher interface {
|
||||
// HeadFetcher defines a common interface for methods in blockchain service which
|
||||
// directly retrieves head related data.
|
||||
type HeadFetcher interface {
|
||||
HeadSlot() uint64
|
||||
HeadSlot() types.Slot
|
||||
HeadRoot(ctx context.Context) ([]byte, error)
|
||||
HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error)
|
||||
HeadState(ctx context.Context) (*state.BeaconState, error)
|
||||
HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error)
|
||||
HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error)
|
||||
HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error)
|
||||
HeadState(ctx context.Context) (state.BeaconState, error)
|
||||
HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error)
|
||||
HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error)
|
||||
HeadGenesisValidatorRoot() [32]byte
|
||||
HeadETH1Data() *ethpb.Eth1Data
|
||||
HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool)
|
||||
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error)
|
||||
ProtoArrayStore() *protoarray.Store
|
||||
ChainHeads() ([][32]byte, []types.Slot)
|
||||
HeadSyncCommitteeFetcher
|
||||
HeadDomainFetcher
|
||||
}
|
||||
|
||||
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
|
||||
type ForkFetcher interface {
|
||||
CurrentFork() *pb.Fork
|
||||
CurrentFork() *ethpb.Fork
|
||||
}
|
||||
|
||||
// CanonicalFetcher retrieves the current chain's canonical information.
|
||||
@@ -74,7 +83,7 @@ func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
return state.CopyCheckpoint(s.finalizedCheckpt)
|
||||
return copyutil.CopyCheckpoint(s.finalizedCheckpt)
|
||||
}
|
||||
|
||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from head state.
|
||||
@@ -83,7 +92,7 @@ func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
return state.CopyCheckpoint(s.justifiedCheckpt)
|
||||
return copyutil.CopyCheckpoint(s.justifiedCheckpt)
|
||||
}
|
||||
|
||||
// PreviousJustifiedCheckpt returns the previous justified checkpoint from head state.
|
||||
@@ -92,11 +101,11 @@ func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
}
|
||||
|
||||
return state.CopyCheckpoint(s.prevJustifiedCheckpt)
|
||||
return copyutil.CopyCheckpoint(s.prevJustifiedCheckpt)
|
||||
}
|
||||
|
||||
// HeadSlot returns the slot of the head of the chain.
|
||||
func (s *Service) HeadSlot() uint64 {
|
||||
func (s *Service) HeadSlot() types.Slot {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
@@ -117,15 +126,15 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
return r[:], nil
|
||||
}
|
||||
|
||||
b, err := s.beaconDB.HeadBlock(ctx)
|
||||
b, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == nil {
|
||||
if b == nil || b.IsNil() {
|
||||
return params.BeaconConfig().ZeroHash[:], nil
|
||||
}
|
||||
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
r, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -136,7 +145,7 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
|
||||
// HeadBlock returns the head block of the chain.
|
||||
// If the head is nil from service struct,
|
||||
// it will attempt to get the head block from DB.
|
||||
func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
func (s *Service) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
@@ -144,13 +153,13 @@ func (s *Service) HeadBlock(ctx context.Context) (*ethpb.SignedBeaconBlock, erro
|
||||
return s.headBlock(), nil
|
||||
}
|
||||
|
||||
return s.beaconDB.HeadBlock(ctx)
|
||||
return s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
}
|
||||
|
||||
// HeadState returns the head state of the chain.
|
||||
// If the head is nil from service struct,
|
||||
// it will attempt to get the head state from DB.
|
||||
func (s *Service) HeadState(ctx context.Context) (*state.BeaconState, error) {
|
||||
func (s *Service) HeadState(ctx context.Context) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.HeadState")
|
||||
defer span.End()
|
||||
s.headLock.RLock()
|
||||
@@ -163,22 +172,22 @@ func (s *Service) HeadState(ctx context.Context) (*state.BeaconState, error) {
|
||||
return s.headState(ctx), nil
|
||||
}
|
||||
|
||||
return s.stateGen.StateByRoot(ctx, s.headRoot())
|
||||
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
|
||||
}
|
||||
|
||||
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
|
||||
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch uint64) ([]uint64, error) {
|
||||
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return []uint64{}, nil
|
||||
return []types.ValidatorIndex{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(s.headState(ctx), epoch)
|
||||
}
|
||||
|
||||
// HeadSeed returns the seed from the head view of a given epoch.
|
||||
func (s *Service) HeadSeed(ctx context.Context, epoch uint64) ([32]byte, error) {
|
||||
func (s *Service) HeadSeed(ctx context.Context, epoch types.Epoch) ([32]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
@@ -214,7 +223,7 @@ func (s *Service) HeadETH1Data() *ethpb.Eth1Data {
|
||||
|
||||
// ProtoArrayStore returns the proto array store object.
|
||||
func (s *Service) ProtoArrayStore() *protoarray.Store {
|
||||
return s.forkChoiceStore.Store()
|
||||
return s.cfg.ForkChoiceStore.Store()
|
||||
}
|
||||
|
||||
// GenesisTime returns the genesis time of beacon chain.
|
||||
@@ -235,12 +244,12 @@ func (s *Service) GenesisValidatorRoot() [32]byte {
|
||||
}
|
||||
|
||||
// CurrentFork retrieves the latest fork information of the beacon chain.
|
||||
func (s *Service) CurrentFork() *pb.Fork {
|
||||
func (s *Service) CurrentFork() *ethpb.Fork {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
if !s.hasHeadState() {
|
||||
return &pb.Fork{
|
||||
return ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
@@ -251,10 +260,52 @@ func (s *Service) CurrentFork() *pb.Fork {
|
||||
// IsCanonical returns true if the input block root is part of the canonical chain.
|
||||
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
// If the block has been finalized, the block will always be part of the canonical chain.
|
||||
if s.beaconDB.IsFinalizedBlock(ctx, blockRoot) {
|
||||
if s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
||||
return s.forkChoiceStore.IsCanonical(blockRoot), nil
|
||||
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
|
||||
}
|
||||
|
||||
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
||||
// Heads roots and heads slots are returned.
|
||||
func (s *Service) ChainHeads() ([][32]byte, []types.Slot) {
|
||||
nodes := s.ProtoArrayStore().Nodes()
|
||||
|
||||
// Deliberate choice to not preallocate space for below.
|
||||
// Heads cant be more than 2-3 in the worst case where pre-allocation will be 64 to begin with.
|
||||
headsRoots := make([][32]byte, 0)
|
||||
headsSlots := make([]types.Slot, 0)
|
||||
|
||||
nonExistentNode := ^uint64(0)
|
||||
for _, node := range nodes {
|
||||
// Possible heads have no children.
|
||||
if node.BestDescendant() == nonExistentNode && node.BestChild() == nonExistentNode {
|
||||
headsRoots = append(headsRoots, node.Root())
|
||||
headsSlots = append(headsSlots, node.Slot())
|
||||
}
|
||||
}
|
||||
|
||||
return headsRoots, headsSlots
|
||||
}
|
||||
|
||||
// HeadPublicKeyToValidatorIndex returns the validator index of the `pubkey` in current head state.
|
||||
func (s *Service) HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.headState(ctx).ValidatorIndexByPubkey(pubKey)
|
||||
}
|
||||
|
||||
// HeadValidatorIndexToPublicKey returns the pubkey of the validator `index` in current head state.
|
||||
func (s *Service) HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
v, err := s.headState(ctx).ValidatorAtIndexReadOnly(index)
|
||||
if err != nil {
|
||||
return [48]byte{}, err
|
||||
}
|
||||
return v.PublicKey(), nil
|
||||
}
|
||||
|
||||
@@ -4,16 +4,17 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
cfg: &Config{BeaconDB: beaconDB},
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
@@ -24,9 +25,8 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
||||
func TestHeadRoot_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
stateGen: stategen.New(beaconDB),
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
@@ -38,9 +38,8 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
||||
func TestHeadBlock_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
head: &head{block: ðpb.SignedBeaconBlock{}},
|
||||
stateGen: stategen.New(beaconDB),
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{block: wrapper.WrappedPhase0SignedBeaconBlock(ðpb.SignedBeaconBlock{})},
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
@@ -52,8 +51,7 @@ func TestHeadBlock_DataRace(t *testing.T) {
|
||||
func TestHeadState_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB),
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
|
||||
@@ -5,18 +5,19 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Ensure Service implements chain info interface.
|
||||
@@ -103,10 +104,10 @@ func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
||||
|
||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
c.head = &head{slot: 100, state: s}
|
||||
assert.Equal(t, uint64(100), c.HeadSlot())
|
||||
assert.Equal(t, types.Slot(100), c.HeadSlot())
|
||||
}
|
||||
|
||||
func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
@@ -119,13 +120,13 @@ func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||
|
||||
func TestHeadRoot_UseDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
c := &Service{beaconDB: beaconDB}
|
||||
c := &Service{cfg: &Config{BeaconDB: beaconDB}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := testutil.NewBeaconBlock()
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), b))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Root: br[:]}))
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:]}))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(context.Background(), br))
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -135,18 +136,18 @@ func TestHeadRoot_UseDB(t *testing.T) {
|
||||
func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c.head = &head{block: b, state: s}
|
||||
c.head = &head{block: wrapper.WrappedPhase0SignedBeaconBlock(b), state: s}
|
||||
|
||||
recevied, err := c.HeadBlock(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, b, recevied, "Incorrect head block received")
|
||||
assert.DeepEqual(t, b, recevied.Proto(), "Incorrect head block received")
|
||||
}
|
||||
|
||||
func TestHeadState_CanRetrieve(t *testing.T) {
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
@@ -162,8 +163,8 @@ func TestGenesisTime_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||
f := &pb.Fork{Epoch: 999}
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{Fork: f})
|
||||
f := ðpb.Fork{Epoch: 999}
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Fork: f})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
@@ -173,7 +174,7 @@ func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentFork_NilHeadSTate(t *testing.T) {
|
||||
f := &pb.Fork{
|
||||
f := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
@@ -188,7 +189,7 @@ func TestGenesisValidatorRoot_CanRetrieve(t *testing.T) {
|
||||
c := &Service{}
|
||||
assert.Equal(t, [32]byte{}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
|
||||
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
|
||||
require.NoError(t, err)
|
||||
c.head = &head{state: s}
|
||||
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorRoot(), "Did not get correct genesis validator root")
|
||||
@@ -202,7 +203,7 @@ func TestHeadETH1Data_Nil(t *testing.T) {
|
||||
|
||||
func TestHeadETH1Data_CanRetrieve(t *testing.T) {
|
||||
d := ðpb.Eth1Data{DepositCount: 999}
|
||||
s, err := state.InitializeFromProto(&pb.BeaconState{Eth1Data: d})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Eth1Data: d})
|
||||
require.NoError(t, err)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
@@ -220,7 +221,7 @@ func TestIsCanonical_Ok(t *testing.T) {
|
||||
blk.Block.Slot = 0
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, blk))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
|
||||
can, err := c.IsCanonical(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -277,7 +278,51 @@ func TestService_HeadGenesisValidatorRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_ProtoArrayStore(t *testing.T) {
|
||||
c := &Service{forkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
c := &Service{cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
|
||||
p := c.ProtoArrayStore()
|
||||
require.Equal(t, 0, int(p.FinalizedEpoch()))
|
||||
}
|
||||
|
||||
func TestService_ChainHeads(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}}
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 103, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.ProcessBlock(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, [32]byte{}, 0, 0))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
|
||||
require.DeepEqual(t, []types.Slot{102, 103, 104}, slots)
|
||||
}
|
||||
|
||||
func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
_, e := c.HeadPublicKeyToValidatorIndex(context.Background(), [48]byte{})
|
||||
require.Equal(t, false, e)
|
||||
|
||||
v, err := s.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
i, e := c.HeadPublicKeyToValidatorIndex(context.Background(), bytesutil.ToBytes48(v.PublicKey))
|
||||
require.Equal(t, true, e)
|
||||
require.Equal(t, types.ValidatorIndex(0), i)
|
||||
}
|
||||
|
||||
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisState(t, 10)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
p, err := c.HeadValidatorIndexToPublicKey(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err := s.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, bytesutil.ToBytes48(v.PublicKey), p)
|
||||
}
|
||||
|
||||
@@ -6,24 +6,27 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// This defines the current chain service's view of head.
|
||||
type head struct {
|
||||
slot uint64 // current head slot.
|
||||
root [32]byte // current head root.
|
||||
block *ethpb.SignedBeaconBlock // current head block.
|
||||
state *stateTrie.BeaconState // current head state.
|
||||
slot types.Slot // current head slot.
|
||||
root [32]byte // current head root.
|
||||
block block.SignedBeaconBlock // current head block.
|
||||
state state.BeaconState // current head state.
|
||||
}
|
||||
|
||||
// Determined the head from the fork choice service and saves its new data
|
||||
@@ -56,18 +59,18 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
// If the fork choice store is missing justified block info, a node should
|
||||
// re-initiate fork choice store using the latest justified info.
|
||||
// This recovers a fatal condition and should not happen in run time.
|
||||
if !s.forkChoiceStore.HasNode(headStartRoot) {
|
||||
jb, err := s.beaconDB.Block(ctx, headStartRoot)
|
||||
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
|
||||
jb, err := s.cfg.BeaconDB.Block(ctx, headStartRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.forkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block, headStartRoot, f, j); err != nil {
|
||||
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
headRoot, err := s.forkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -93,40 +96,50 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
|
||||
// If the head state is not available, just return nil.
|
||||
// There's nothing to cache
|
||||
if !s.beaconDB.HasStateSummary(ctx, headRoot) {
|
||||
if !s.cfg.BeaconDB.HasStateSummary(ctx, headRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the new head block from DB.
|
||||
newHeadBlock, err := s.beaconDB.Block(ctx, headRoot)
|
||||
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if newHeadBlock == nil || newHeadBlock.Block == nil {
|
||||
if newHeadBlock == nil || newHeadBlock.IsNil() || newHeadBlock.Block().IsNil() {
|
||||
return errors.New("cannot save nil head block")
|
||||
}
|
||||
|
||||
// Get the new head state from cached state or DB.
|
||||
newHeadState, err := s.stateGen.StateByRoot(ctx, headRoot)
|
||||
newHeadState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
if newHeadState == nil {
|
||||
if newHeadState == nil || newHeadState.IsNil() {
|
||||
return errors.New("cannot save nil head state")
|
||||
}
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
headSlot := s.HeadSlot()
|
||||
if bytesutil.ToBytes32(newHeadBlock.Block.ParentRoot) != bytesutil.ToBytes32(r) {
|
||||
newHeadSlot := newHeadBlock.Block().Slot()
|
||||
oldHeadRoot := s.headRoot()
|
||||
oldStateRoot := s.headBlock().Block().StateRoot()
|
||||
newStateRoot := newHeadBlock.Block().StateRoot()
|
||||
if bytesutil.ToBytes32(newHeadBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadBlock.Block.Slot),
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
}).Debug("Chain reorg occurred")
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
absoluteSlotDifference := slotutil.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Reorg,
|
||||
Data: &statefeed.ReorgData{
|
||||
NewSlot: newHeadBlock.Block.Slot,
|
||||
OldSlot: headSlot,
|
||||
Data: ðpbv1.EventChainReorg{
|
||||
Slot: newHeadSlot,
|
||||
Depth: absoluteSlotDifference,
|
||||
OldHeadBlock: oldHeadRoot[:],
|
||||
NewHeadBlock: headRoot[:],
|
||||
OldHeadState: oldStateRoot,
|
||||
NewHeadState: newStateRoot,
|
||||
Epoch: helpers.SlotToEpoch(newHeadSlot),
|
||||
},
|
||||
})
|
||||
|
||||
@@ -137,17 +150,28 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
s.setHead(headRoot, newHeadBlock, newHeadState)
|
||||
|
||||
// Save the new head root to DB.
|
||||
if err := s.beaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head root in DB")
|
||||
}
|
||||
|
||||
// Forward an event capturing a new chain head over a common event feed
|
||||
// done in a goroutine to avoid blocking the critical runtime main routine.
|
||||
go func() {
|
||||
if err := s.notifyNewHeadEvent(newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not notify event feed of new chain head")
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called to update canonical root mapping. It does not save head block
|
||||
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
|
||||
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock, r [32]byte, hs *stateTrie.BeaconState) error {
|
||||
func (s *Service) saveHeadNoDB(ctx context.Context, b block.SignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return err
|
||||
}
|
||||
cachedHeadRoot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head root from cache")
|
||||
@@ -156,24 +180,20 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b *ethpb.SignedBeaconBlock,
|
||||
return nil
|
||||
}
|
||||
|
||||
if b == nil || b.Block == nil {
|
||||
return errors.New("cannot save nil head block")
|
||||
}
|
||||
|
||||
s.setHeadInitialSync(r, stateTrie.CopySignedBeaconBlock(b), hs)
|
||||
s.setHeadInitialSync(r, b.Copy(), hs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This sets head view object which is used to track the head slot, root, block and state.
|
||||
func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *stateTrie.BeaconState) {
|
||||
func (s *Service) setHead(root [32]byte, block block.SignedBeaconBlock, state state.BeaconState) {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
// This does a full copy of the block and state.
|
||||
s.head = &head{
|
||||
slot: block.Block.Slot,
|
||||
slot: block.Block().Slot(),
|
||||
root: root,
|
||||
block: stateTrie.CopySignedBeaconBlock(block),
|
||||
block: block.Copy(),
|
||||
state: state.Copy(),
|
||||
}
|
||||
}
|
||||
@@ -181,22 +201,22 @@ func (s *Service) setHead(root [32]byte, block *ethpb.SignedBeaconBlock, state *
|
||||
// This sets head view object which is used to track the head slot, root, block and state. The method
|
||||
// assumes that state being passed into the method will not be modified by any other alternate
|
||||
// caller which holds the state's reference.
|
||||
func (s *Service) setHeadInitialSync(root [32]byte, block *ethpb.SignedBeaconBlock, state *stateTrie.BeaconState) {
|
||||
func (s *Service) setHeadInitialSync(root [32]byte, block block.SignedBeaconBlock, state state.BeaconState) {
|
||||
s.headLock.Lock()
|
||||
defer s.headLock.Unlock()
|
||||
|
||||
// This does a full copy of the block only.
|
||||
s.head = &head{
|
||||
slot: block.Block.Slot,
|
||||
slot: block.Block().Slot(),
|
||||
root: root,
|
||||
block: stateTrie.CopySignedBeaconBlock(block),
|
||||
block: block.Copy(),
|
||||
state: state,
|
||||
}
|
||||
}
|
||||
|
||||
// This returns the head slot.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headSlot() uint64 {
|
||||
func (s *Service) headSlot() types.Slot {
|
||||
return s.head.slot
|
||||
}
|
||||
|
||||
@@ -214,14 +234,14 @@ func (s *Service) headRoot() [32]byte {
|
||||
// This returns the head block.
|
||||
// It does a full copy on head block for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headBlock() *ethpb.SignedBeaconBlock {
|
||||
return stateTrie.CopySignedBeaconBlock(s.head.block)
|
||||
func (s *Service) headBlock() block.SignedBeaconBlock {
|
||||
return s.head.block.Copy()
|
||||
}
|
||||
|
||||
// This returns the head state.
|
||||
// It does a full copy on head state for immutability.
|
||||
// This is a lock free version.
|
||||
func (s *Service) headState(ctx context.Context) *stateTrie.BeaconState {
|
||||
func (s *Service) headState(ctx context.Context) state.BeaconState {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||
defer span.End()
|
||||
|
||||
@@ -242,33 +262,33 @@ func (s *Service) hasHeadState() bool {
|
||||
|
||||
// This caches justified state balances to be used for fork choice.
|
||||
func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error {
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.clearInitSyncBlocks()
|
||||
|
||||
var justifiedState *stateTrie.BeaconState
|
||||
var justifiedState state.BeaconState
|
||||
var err error
|
||||
if justifiedRoot == s.genesisRoot {
|
||||
justifiedState, err = s.beaconDB.GenesisState(ctx)
|
||||
justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
justifiedState, err = s.stateGen.StateByRoot(ctx, justifiedRoot)
|
||||
justifiedState, err = s.cfg.StateGen.StateByRoot(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if justifiedState == nil {
|
||||
if justifiedState == nil || justifiedState.IsNil() {
|
||||
return errors.New("justified state can't be nil")
|
||||
}
|
||||
|
||||
epoch := helpers.CurrentEpoch(justifiedState)
|
||||
|
||||
justifiedBalances := make([]uint64, justifiedState.NumValidators())
|
||||
if err := justifiedState.ReadFromEveryValidator(func(idx int, val stateTrie.ReadOnlyValidator) error {
|
||||
if err := justifiedState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if helpers.IsActiveValidatorUsingTrie(val, epoch) {
|
||||
justifiedBalances[idx] = val.EffectiveBalance()
|
||||
} else {
|
||||
@@ -290,3 +310,53 @@ func (s *Service) getJustifiedBalances() []uint64 {
|
||||
defer s.justifiedBalancesLock.RUnlock()
|
||||
return s.justifiedBalances
|
||||
}
|
||||
|
||||
// Notifies a common event feed of a new chain head event. Called right after a new
|
||||
// chain head is determined, set, and saved to disk.
|
||||
func (s *Service) notifyNewHeadEvent(
|
||||
newHeadSlot types.Slot,
|
||||
newHeadState state.BeaconState,
|
||||
newHeadStateRoot,
|
||||
newHeadRoot []byte,
|
||||
) error {
|
||||
previousDutyDependentRoot := s.genesisRoot[:]
|
||||
currentDutyDependentRoot := s.genesisRoot[:]
|
||||
|
||||
var previousDutyEpoch types.Epoch
|
||||
currentDutyEpoch := helpers.SlotToEpoch(newHeadSlot)
|
||||
if currentDutyEpoch > 0 {
|
||||
previousDutyEpoch = currentDutyEpoch.Sub(1)
|
||||
}
|
||||
currentDutySlot, err := helpers.StartSlot(currentDutyEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty slot")
|
||||
}
|
||||
previousDutySlot, err := helpers.StartSlot(previousDutyEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty slot")
|
||||
}
|
||||
if currentDutySlot > 0 {
|
||||
currentDutyDependentRoot, err = helpers.BlockRootAtSlot(newHeadState, currentDutySlot-1)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty dependent root")
|
||||
}
|
||||
}
|
||||
if previousDutySlot > 0 {
|
||||
previousDutyDependentRoot, err = helpers.BlockRootAtSlot(newHeadState, previousDutySlot-1)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty dependent root")
|
||||
}
|
||||
}
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.NewHead,
|
||||
Data: ðpbv1.EventHead{
|
||||
Slot: newHeadSlot,
|
||||
Block: newHeadRoot,
|
||||
State: newHeadStateRoot,
|
||||
EpochTransition: helpers.IsEpochEnd(newHeadSlot),
|
||||
PreviousDutyDependentRoot: previousDutyDependentRoot,
|
||||
CurrentDutyDependentRoot: currentDutyDependentRoot,
|
||||
},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
188
beacon-chain/blockchain/head_sync_committee_info.go
Normal file
188
beacon-chain/blockchain/head_sync_committee_info.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
|
||||
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
|
||||
type HeadSyncCommitteeFetcher interface {
|
||||
HeadCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error)
|
||||
HeadNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error)
|
||||
HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error)
|
||||
}
|
||||
|
||||
// HeadDomainFetcher is the interface that wraps the head sync domain related functions.
|
||||
// The head sync committee domain functions return callers domain data with respect to slot and head state.
|
||||
type HeadDomainFetcher interface {
|
||||
HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error)
|
||||
HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
|
||||
HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
|
||||
}
|
||||
|
||||
// HeadSyncCommitteeDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
||||
func (s *Service) HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommittee)
|
||||
}
|
||||
|
||||
// HeadSyncSelectionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
||||
func (s *Service) HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommitteeSelectionProof)
|
||||
}
|
||||
|
||||
// HeadSyncContributionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
||||
func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainContributionAndProof)
|
||||
}
|
||||
|
||||
// HeadCurrentSyncCommitteeIndices returns the input validator `index`'s position indices in the current sync committee with respect to `slot`.
|
||||
// Head state advanced up to `slot` is used for calculation.
|
||||
func (s *Service) HeadCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return helpers.CurrentPeriodSyncSubcommitteeIndices(headState, index)
|
||||
}
|
||||
|
||||
// HeadNextSyncCommitteeIndices returns the input validator `index`'s position indices in the next sync committee with respect to `slot`.
|
||||
// Head state advanced up to `slot` is used for calculation.
|
||||
func (s *Service) HeadNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return helpers.NextPeriodSyncSubcommitteeIndices(headState, index)
|
||||
}
|
||||
|
||||
// HeadSyncCommitteePubKeys returns the head sync committee public keys with respect to `slot` and subcommittee index `committeeIndex`.
|
||||
// Head state advanced up to `slot` is used for calculation.
|
||||
func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error) {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nextSlotEpoch := helpers.SlotToEpoch(headState.Slot() + 1)
|
||||
currEpoch := helpers.SlotToEpoch(headState.Slot())
|
||||
|
||||
var syncCommittee *ethpb.SyncCommittee
|
||||
if helpers.SyncCommitteePeriod(currEpoch) == helpers.SyncCommitteePeriod(nextSlotEpoch) {
|
||||
syncCommittee, err = headState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
syncCommittee, err = headState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return altair.SyncSubCommitteePubkeys(syncCommittee, committeeIndex)
|
||||
}
|
||||
|
||||
// returns calculated domain using input `domain` and `slot`.
|
||||
func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, domain [4]byte) ([]byte, error) {
|
||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return helpers.Domain(headState.Fork(), helpers.SlotToEpoch(headState.Slot()), domain, headState.GenesisValidatorRoot())
|
||||
}
|
||||
|
||||
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.
|
||||
// For the cache miss, it processes head state up to slot and fill the cache with `slot` as key.
|
||||
func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot types.Slot) (state.BeaconState, error) {
|
||||
var headState state.BeaconState
|
||||
var err error
|
||||
|
||||
// If there's already a head state exists with the request slot, we don't need to process slots.
|
||||
cachedState := syncCommitteeHeadStateCache.get(slot)
|
||||
if cachedState != nil && !cachedState.IsNil() {
|
||||
syncHeadStateHit.Inc()
|
||||
headState = cachedState
|
||||
} else {
|
||||
headState, err = s.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if slot > headState.Slot() {
|
||||
headState, err = core.ProcessSlots(ctx, headState, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
syncHeadStateMiss.Inc()
|
||||
syncCommitteeHeadStateCache.add(slot, headState)
|
||||
}
|
||||
|
||||
return headState, nil
|
||||
}
|
||||
|
||||
var syncCommitteeHeadStateCache = newSyncCommitteeHeadState()
|
||||
|
||||
// syncCommitteeHeadState to caches latest head state requested by the sync committee participant.
|
||||
type syncCommitteeHeadState struct {
|
||||
cache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// newSyncCommitteeHeadState initializes the lru cache for `syncCommitteeHeadState` with size of 1.
|
||||
func newSyncCommitteeHeadState() *syncCommitteeHeadState {
|
||||
c, err := lru.New(1) // only need size of 1 to avoid redundant state copy, HTR, and process slots.
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &syncCommitteeHeadState{cache: c}
|
||||
}
|
||||
|
||||
// add `slot` as key and `state` as value onto the lru cache.
|
||||
func (c *syncCommitteeHeadState) add(slot types.Slot, state state.BeaconState) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.cache.Add(slot, state)
|
||||
}
|
||||
|
||||
// get `state` using `slot` as key. Return nil if nothing is found.
|
||||
func (c *syncCommitteeHeadState) get(slot types.Slot) state.BeaconState {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
val, exists := c.cache.Get(slot)
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
if val == nil {
|
||||
return nil
|
||||
}
|
||||
return val.(*stateAltair.BeaconState)
|
||||
}
|
||||
108
beacon-chain/blockchain/head_sync_committee_info_test.go
Normal file
108
beacon-chain/blockchain/head_sync_committee_info_test.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestService_HeadCurrentSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
indices, err := c.HeadCurrentSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default.
|
||||
require.Equal(t, 0, len(indices))
|
||||
}
|
||||
|
||||
func TestService_HeadNextSyncCommitteeIndices(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
indices, err := c.HeadNextSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
|
||||
require.NotEqual(t, 0, len(indices))
|
||||
}
|
||||
|
||||
func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
|
||||
slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(slot), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Any subcommittee should match the subcommittee size.
|
||||
subCommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
require.Equal(t, int(subCommitteeSize), len(pubkeys))
|
||||
}
|
||||
|
||||
func TestService_HeadSyncCommitteeDomain(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommittee, s.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncCommitteeDomain(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
}
|
||||
|
||||
func TestService_HeadSyncContributionProofDomain(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainContributionAndProof, s.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncContributionProofDomain(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
}
|
||||
|
||||
func TestService_HeadSyncSelectionProofDomain(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||
c := &Service{}
|
||||
c.head = &head{state: s}
|
||||
|
||||
wanted, err := helpers.Domain(s.Fork(), helpers.SlotToEpoch(s.Slot()), params.BeaconConfig().DomainSyncCommitteeSelectionProof, s.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
d, err := c.HeadSyncSelectionProofDomain(context.Background(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, wanted, d)
|
||||
}
|
||||
|
||||
func TestSyncCommitteeHeadStateCache_RoundTrip(t *testing.T) {
|
||||
c := newSyncCommitteeHeadState()
|
||||
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
|
||||
require.NoError(t, beaconState.SetSlot(100))
|
||||
cachedState := c.get(101)
|
||||
require.Equal(t, nil, cachedState)
|
||||
c.add(101, beaconState)
|
||||
cachedState = c.get(101)
|
||||
require.DeepEqual(t, beaconState, cachedState)
|
||||
}
|
||||
@@ -5,9 +5,13 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
@@ -22,7 +26,7 @@ func TestSaveHead_Same(t *testing.T) {
|
||||
service.head = &head{slot: 0, root: r}
|
||||
|
||||
require.NoError(t, service.saveHead(context.Background(), r))
|
||||
assert.Equal(t, uint64(0), service.headSlot(), "Head did not stay the same")
|
||||
assert.Equal(t, types.Slot(0), service.headSlot(), "Head did not stay the same")
|
||||
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
|
||||
}
|
||||
|
||||
@@ -32,28 +36,40 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldRoot := [32]byte{'A'}
|
||||
service.head = &head{slot: 0, root: oldRoot}
|
||||
service.head = &head{
|
||||
slot: 0,
|
||||
root: oldRoot,
|
||||
block: wrapper.WrappedPhase0SignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 0,
|
||||
StateRoot: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
newHeadSignedBlock := testutil.NewBeaconBlock()
|
||||
newHeadSignedBlock.Block.Slot = 1
|
||||
newHeadBlock := newHeadSignedBlock.Block
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.beaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot))
|
||||
|
||||
assert.Equal(t, uint64(1), service.HeadSlot(), "Head did not change")
|
||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
cachedRoot, err := service.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, cachedRoot, newRoot[:], "Head did not change")
|
||||
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock(), "Head did not change")
|
||||
assert.DeepEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock().Proto(), "Head did not change")
|
||||
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
}
|
||||
|
||||
func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
@@ -63,7 +79,18 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
oldRoot := [32]byte{'A'}
|
||||
service.head = &head{slot: 0, root: oldRoot}
|
||||
service.head = &head{
|
||||
slot: 0,
|
||||
root: oldRoot,
|
||||
block: wrapper.WrappedPhase0SignedBeaconBlock(
|
||||
ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
Slot: 0,
|
||||
StateRoot: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
reorgChainParent := [32]byte{'B'}
|
||||
newHeadSignedBlock := testutil.NewBeaconBlock()
|
||||
@@ -71,24 +98,25 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
|
||||
newHeadBlock := newHeadSignedBlock.Block
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), newHeadSignedBlock))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(newHeadSignedBlock)))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.beaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot))
|
||||
|
||||
assert.Equal(t, uint64(1), service.HeadSlot(), "Head did not change")
|
||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
cachedRoot, err := service.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
if !bytes.Equal(cachedRoot, newRoot[:]) {
|
||||
t.Error("Head did not change")
|
||||
}
|
||||
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock(), "Head did not change")
|
||||
assert.DeepEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock().Proto(), "Head did not change")
|
||||
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
require.LogsContain(t, hook, "Chain reorg occurred")
|
||||
}
|
||||
|
||||
@@ -98,8 +126,8 @@ func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
|
||||
state, _ := testutil.DeterministicGenesisState(t, 100)
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(context.Background(), &pb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.beaconDB.SaveState(context.Background(), state, r))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: r[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
|
||||
require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r))
|
||||
require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances")
|
||||
}
|
||||
@@ -109,7 +137,7 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), b))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -119,3 +147,69 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.updateHead(context.Background(), []uint64{}))
|
||||
}
|
||||
|
||||
func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
t.Run("genesis_state_root", func(t *testing.T) {
|
||||
bState, _ := testutil.DeterministicGenesisState(t, 10)
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
srv := &Service{
|
||||
cfg: &Config{
|
||||
StateNotifier: notifier,
|
||||
},
|
||||
genesisRoot: [32]byte{1},
|
||||
}
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
err := srv.notifyNewHeadEvent(1, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
require.NoError(t, err)
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
|
||||
eventHead, ok := events[0].Data.(*ethpbv1.EventHead)
|
||||
require.Equal(t, true, ok)
|
||||
wanted := ðpbv1.EventHead{
|
||||
Slot: 1,
|
||||
Block: newHeadRoot[:],
|
||||
State: newHeadStateRoot[:],
|
||||
EpochTransition: false,
|
||||
PreviousDutyDependentRoot: srv.genesisRoot[:],
|
||||
CurrentDutyDependentRoot: srv.genesisRoot[:],
|
||||
}
|
||||
require.DeepSSZEqual(t, wanted, eventHead)
|
||||
})
|
||||
t.Run("non_genesis_values", func(t *testing.T) {
|
||||
bState, _ := testutil.DeterministicGenesisState(t, 10)
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
genesisRoot := [32]byte{1}
|
||||
srv := &Service{
|
||||
cfg: &Config{
|
||||
StateNotifier: notifier,
|
||||
},
|
||||
genesisRoot: genesisRoot,
|
||||
}
|
||||
epoch1Start, err := helpers.StartSlot(1)
|
||||
require.NoError(t, err)
|
||||
epoch2Start, err := helpers.StartSlot(1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bState.SetSlot(epoch1Start))
|
||||
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
err = srv.notifyNewHeadEvent(epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
require.NoError(t, err)
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
|
||||
eventHead, ok := events[0].Data.(*ethpbv1.EventHead)
|
||||
require.Equal(t, true, ok)
|
||||
wanted := ðpbv1.EventHead{
|
||||
Slot: epoch2Start,
|
||||
Block: newHeadRoot[:],
|
||||
State: newHeadStateRoot[:],
|
||||
EpochTransition: false,
|
||||
PreviousDutyDependentRoot: genesisRoot[:],
|
||||
CurrentDutyDependentRoot: make([]byte, 32),
|
||||
}
|
||||
require.DeepSSZEqual(t, wanted, eventHead)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -39,13 +39,13 @@ func (s *Service) TreeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
log.WithError(err).Error("Could not get head state")
|
||||
return
|
||||
}
|
||||
if headState == nil {
|
||||
if headState == nil || headState.IsNil() {
|
||||
if _, err := w.Write([]byte("Unavailable during initial syncing")); err != nil {
|
||||
log.WithError(err).Error("Failed to render p2p info page")
|
||||
}
|
||||
}
|
||||
|
||||
nodes := s.forkChoiceStore.Nodes()
|
||||
nodes := s.cfg.ForkChoiceStore.Nodes()
|
||||
|
||||
graph := dot.NewGraph(dot.Directed)
|
||||
graph.Attr("rankdir", "RL")
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -21,7 +22,8 @@ func TestService_TreeHandler(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth}))
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
@@ -32,11 +34,11 @@ func TestService_TreeHandler(t *testing.T) {
|
||||
),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := New(ctx, cfg)
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.forkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
|
||||
require.NoError(t, s.forkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
|
||||
s.setHead([32]byte{'a'}, testutil.NewBeaconBlock(), headState)
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0))
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0))
|
||||
s.setHead([32]byte{'a'}, wrapper.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()), headState)
|
||||
|
||||
rr := httptest.NewRecorder()
|
||||
handler := http.HandlerFunc(s.TreeHandler)
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
)
|
||||
|
||||
// This saves a beacon block to the initial sync blocks cache.
|
||||
func (s *Service) saveInitSyncBlock(r [32]byte, b *ethpb.SignedBeaconBlock) {
|
||||
func (s *Service) saveInitSyncBlock(r [32]byte, b block.SignedBeaconBlock) {
|
||||
s.initSyncBlocksLock.Lock()
|
||||
defer s.initSyncBlocksLock.Unlock()
|
||||
s.initSyncBlocks[r] = b
|
||||
@@ -22,7 +22,7 @@ func (s *Service) hasInitSyncBlock(r [32]byte) bool {
|
||||
|
||||
// This retrieves a beacon block from the initial sync blocks cache using the root of
|
||||
// the block.
|
||||
func (s *Service) getInitSyncBlock(r [32]byte) *ethpb.SignedBeaconBlock {
|
||||
func (s *Service) getInitSyncBlock(r [32]byte) block.SignedBeaconBlock {
|
||||
s.initSyncBlocksLock.RLock()
|
||||
defer s.initSyncBlocksLock.RUnlock()
|
||||
b := s.initSyncBlocks[r]
|
||||
@@ -31,11 +31,11 @@ func (s *Service) getInitSyncBlock(r [32]byte) *ethpb.SignedBeaconBlock {
|
||||
|
||||
// This retrieves all the beacon blocks from the initial sync blocks cache, the returned
|
||||
// blocks are unordered.
|
||||
func (s *Service) getInitSyncBlocks() []*ethpb.SignedBeaconBlock {
|
||||
func (s *Service) getInitSyncBlocks() []block.SignedBeaconBlock {
|
||||
s.initSyncBlocksLock.RLock()
|
||||
defer s.initSyncBlocksLock.RUnlock()
|
||||
|
||||
blks := make([]*ethpb.SignedBeaconBlock, 0, len(s.initSyncBlocks))
|
||||
blks := make([]block.SignedBeaconBlock, 0, len(s.initSyncBlocks))
|
||||
for _, b := range s.initSyncBlocks {
|
||||
blks = append(blks, b)
|
||||
}
|
||||
@@ -46,5 +46,5 @@ func (s *Service) getInitSyncBlocks() []*ethpb.SignedBeaconBlock {
|
||||
func (s *Service) clearInitSyncBlocks() {
|
||||
s.initSyncBlocksLock.Lock()
|
||||
defer s.initSyncBlocksLock.Unlock()
|
||||
s.initSyncBlocks = make(map[[32]byte]*ethpb.SignedBeaconBlock)
|
||||
s.initSyncBlocks = make(map[[32]byte]block.SignedBeaconBlock)
|
||||
}
|
||||
|
||||
12
beacon-chain/blockchain/init_test.go
Normal file
12
beacon-chain/blockchain/init_test.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Override network name so that hardcoded genesis files are not loaded.
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.ConfigName = "test"
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
@@ -3,33 +3,63 @@ package blockchain
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "blockchain")
|
||||
|
||||
// logs state transition related data every slot.
|
||||
func logStateTransitionData(b *ethpb.BeaconBlock) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestations": len(b.Body.Attestations),
|
||||
"deposits": len(b.Body.Deposits),
|
||||
"attesterSlashings": len(b.Body.AttesterSlashings),
|
||||
"proposerSlashings": len(b.Body.ProposerSlashings),
|
||||
"voluntaryExits": len(b.Body.VoluntaryExits),
|
||||
}).Info("Finished applying state transition")
|
||||
func logStateTransitionData(b block.BeaconBlock) {
|
||||
log := log.WithField("slot", b.Slot)
|
||||
if len(b.Body().Attestations()) > 0 {
|
||||
log = log.WithField("attestations", len(b.Body().Attestations()))
|
||||
}
|
||||
if len(b.Body().Deposits()) > 0 {
|
||||
log = log.WithField("deposits", len(b.Body().Deposits()))
|
||||
}
|
||||
if len(b.Body().AttesterSlashings()) > 0 {
|
||||
log = log.WithField("attesterSlashings", len(b.Body().AttesterSlashings()))
|
||||
}
|
||||
if len(b.Body().ProposerSlashings()) > 0 {
|
||||
log = log.WithField("proposerSlashings", len(b.Body().ProposerSlashings()))
|
||||
}
|
||||
if len(b.Body().VoluntaryExits()) > 0 {
|
||||
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
|
||||
}
|
||||
if b.Version() == version.Altair {
|
||||
agg, err := b.Body().SyncAggregate()
|
||||
if err == nil {
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
}
|
||||
|
||||
func logBlockSyncStatus(block *ethpb.BeaconBlock, blockRoot [32]byte, finalized *ethpb.Checkpoint) {
|
||||
func logBlockSyncStatus(block block.BeaconBlock, blockRoot [32]byte, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
|
||||
startTime, err := helpers.SlotToTime(genesisTime, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot,
|
||||
"slotInEpoch": block.Slot % params.BeaconConfig().SlotsPerEpoch,
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": helpers.SlotToEpoch(block.Slot),
|
||||
"epoch": helpers.SlotToEpoch(block.Slot()),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
}).Info("Synced new block")
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot,
|
||||
"sinceSlotStartTime": timeutils.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": timeutils.Now().Sub(receivedTime),
|
||||
}).Debug("Sync new block times")
|
||||
return nil
|
||||
}
|
||||
|
||||
66
beacon-chain/blockchain/log_test.go
Normal file
66
beacon-chain/blockchain/log_test.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_logStateTransitionData(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
b block.BeaconBlock
|
||||
want string
|
||||
}{
|
||||
{name: "empty block body",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}),
|
||||
want: "\"Finished applying state transition\" prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has attestation",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{Attestations: []*ethpb.Attestation{{}}}}),
|
||||
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has deposit",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(
|
||||
ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
Attestations: []*ethpb.Attestation{{}},
|
||||
Deposits: []*ethpb.Deposit{{}}}}),
|
||||
want: "\"Finished applying state transition\" attestations=1 deposits=1 prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has attester slashing",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{{}}}}),
|
||||
want: "\"Finished applying state transition\" attesterSlashings=1 prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has proposer slashing",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{{}}}}),
|
||||
want: "\"Finished applying state transition\" prefix=blockchain proposerSlashings=1 slot=0",
|
||||
},
|
||||
{name: "has exit",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}}),
|
||||
want: "\"Finished applying state transition\" prefix=blockchain slot=0 voluntaryExits=1",
|
||||
},
|
||||
{name: "has everything",
|
||||
b: wrapper.WrappedPhase0BeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||
Attestations: []*ethpb.Attestation{{}},
|
||||
Deposits: []*ethpb.Deposit{{}},
|
||||
AttesterSlashings: []*ethpb.AttesterSlashing{{}},
|
||||
ProposerSlashings: []*ethpb.ProposerSlashing{{}},
|
||||
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}}),
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
hook := logTest.NewGlobal()
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
logStateTransitionData(tt.b)
|
||||
require.LogsContain(t, hook, tt.want)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -3,13 +3,18 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -105,10 +110,18 @@ var (
|
||||
Buckets: []float64{1, 2, 3, 4, 6, 32, 64},
|
||||
},
|
||||
)
|
||||
syncHeadStateMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sync_head_state_miss",
|
||||
Help: "The number of sync head state requests that are present in the cache.",
|
||||
})
|
||||
syncHeadStateHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sync_head_state_hit",
|
||||
Help: "The number of sync head state requests that are not present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
func reportSlotMetrics(stateSlot, headSlot, clockSlot uint64, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
func reportSlotMetrics(stateSlot, headSlot, clockSlot types.Slot, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
clockTimeSlot.Set(float64(clockSlot))
|
||||
beaconSlot.Set(float64(stateSlot))
|
||||
beaconHeadSlot.Set(float64(headSlot))
|
||||
@@ -119,8 +132,8 @@ func reportSlotMetrics(stateSlot, headSlot, clockSlot uint64, finalizedCheckpoin
|
||||
}
|
||||
|
||||
// reportEpochMetrics reports epoch related metrics.
|
||||
func reportEpochMetrics(ctx context.Context, postState, headState *stateTrie.BeaconState) error {
|
||||
currentEpoch := postState.Slot() / params.BeaconConfig().SlotsPerEpoch
|
||||
func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconState) error {
|
||||
currentEpoch := types.Epoch(postState.Slot() / params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
// Validator instances
|
||||
pendingInstances := 0
|
||||
@@ -139,7 +152,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState *stateTrie.Bea
|
||||
slashingEffectiveBalance := uint64(0)
|
||||
|
||||
for i, validator := range postState.Validators() {
|
||||
bal, err := postState.BalanceAtIndex(uint64(i))
|
||||
bal, err := postState.BalanceAtIndex(types.ValidatorIndex(i))
|
||||
if err != nil {
|
||||
log.Errorf("Could not load validator balance: %v", err)
|
||||
continue
|
||||
@@ -204,14 +217,31 @@ func reportEpochMetrics(ctx context.Context, postState, headState *stateTrie.Bea
|
||||
beaconFinalizedRoot.Set(float64(bytesutil.ToLowInt64(postState.FinalizedCheckpoint().Root)))
|
||||
currentEth1DataDepositCount.Set(float64(postState.Eth1Data().DepositCount))
|
||||
|
||||
// Validator participation should be viewed on the canonical chain.
|
||||
v, b, err := precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, b, err = precompute.ProcessAttestations(ctx, headState, v, b)
|
||||
if err != nil {
|
||||
return err
|
||||
var b *precompute.Balance
|
||||
var v []*precompute.Validator
|
||||
var err error
|
||||
switch headState.Version() {
|
||||
case version.Phase0:
|
||||
// Validator participation should be viewed on the canonical chain.
|
||||
v, b, err = precompute.New(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, b, err = precompute.ProcessAttestations(ctx, headState, v, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case version.Altair:
|
||||
v, b, err = altair.InitializeEpochValidators(ctx, headState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, b, err = altair.ProcessEpochParticipation(ctx, headState, b, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.Errorf("invalid state type provided: %T", headState.InnerStateUnsafe())
|
||||
}
|
||||
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
|
||||
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))
|
||||
@@ -226,8 +256,8 @@ func reportEpochMetrics(ctx context.Context, postState, headState *stateTrie.Bea
|
||||
return nil
|
||||
}
|
||||
|
||||
func reportAttestationInclusion(blk *ethpb.BeaconBlock) {
|
||||
for _, att := range blk.Body.Attestations {
|
||||
attestationInclusionDelay.Observe(float64(blk.Slot - att.Data.Slot))
|
||||
func reportAttestationInclusion(blk block.BeaconBlock) {
|
||||
for _, att := range blk.Body().Attestations() {
|
||||
attestationInclusionDelay.Observe(float64(blk.Slot() - att.Data.Slot))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,25 +4,28 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
eth "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||
s := testutil.NewBeaconState()
|
||||
h := testutil.NewBeaconState()
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
h, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.SetValidators(nil))
|
||||
err := reportEpochMetrics(context.Background(), s, h)
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "failed to initialize precompute: nil validators in state", err)
|
||||
}
|
||||
|
||||
func TestReportEpochMetrics_BadAttestation(t *testing.T) {
|
||||
s := testutil.NewBeaconState()
|
||||
h := testutil.NewBeaconState()
|
||||
require.NoError(t, h.SetCurrentEpochAttestations([]*pb.PendingAttestation{{InclusionDelay: 0}}))
|
||||
err := reportEpochMetrics(context.Background(), s, h)
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
h, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 0}))
|
||||
err = reportEpochMetrics(context.Background(), s, h)
|
||||
require.ErrorContains(t, "attestation with inclusion delay of 0", err)
|
||||
}
|
||||
|
||||
@@ -32,7 +35,7 @@ func TestReportEpochMetrics_SlashedValidatorOutOfBound(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
v.Slashed = true
|
||||
require.NoError(t, h.UpdateValidatorAtIndex(0, v))
|
||||
require.NoError(t, h.SetCurrentEpochAttestations([]*pb.PendingAttestation{{InclusionDelay: 1, Data: testutil.HydrateAttestationData(ð.AttestationData{})}}))
|
||||
require.NoError(t, h.AppendCurrentEpochAttestations(ð.PendingAttestation{InclusionDelay: 1, Data: testutil.HydrateAttestationData(ð.AttestationData{})}))
|
||||
err = reportEpochMetrics(context.Background(), h, h)
|
||||
require.ErrorContains(t, "slot 0 out of bounds", err)
|
||||
}
|
||||
|
||||
@@ -4,20 +4,16 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/copyutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ErrTargetRootNotInDB returns when the target block root of an attestation cannot be found in the
|
||||
// beacon database.
|
||||
var ErrTargetRootNotInDB = errors.New("target root does not exist in db")
|
||||
|
||||
// onAttestation is called whenever an attestation is received, verifies the attestation is valid and saves
|
||||
// it to the DB. As a stateless function, this does not hold nor delay attestation based on the spec descriptions.
|
||||
// The delay is handled by the caller in `processAttestations`.
|
||||
@@ -50,7 +46,7 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) error
|
||||
if err := helpers.ValidateSlotTargetEpoch(a.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
tgt := stateTrie.CopyCheckpoint(a.Data.Target)
|
||||
tgt := copyutil.CopyCheckpoint(a.Data.Target)
|
||||
|
||||
// Note that target root check is ignored here because it was performed in sync's validation pipeline:
|
||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||
@@ -101,7 +97,7 @@ func (s *Service) onAttestation(ctx context.Context, a *ethpb.Attestation) error
|
||||
// We assume trusted attestation in this function has verified signature.
|
||||
|
||||
// Update forkchoice store with the new attestation for updating weight.
|
||||
s.forkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.AttestingIndices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,20 +6,22 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/mputil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// getAttPreState retrieves the att pre state by either from the cache or the DB.
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
|
||||
// allowing us to behave smarter in terms of how this function is used concurrently.
|
||||
epochKey := strconv.FormatUint(c.Epoch, 10 /* base 10 */)
|
||||
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
|
||||
lock := mputil.NewMultilock(string(c.Root) + epochKey)
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
@@ -27,11 +29,11 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get cached checkpoint state")
|
||||
}
|
||||
if cachedState != nil {
|
||||
if cachedState != nil && !cachedState.IsNil() {
|
||||
return cachedState, nil
|
||||
}
|
||||
|
||||
baseState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
||||
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
|
||||
}
|
||||
@@ -41,26 +43,24 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta
|
||||
return nil, err
|
||||
}
|
||||
if epochStartSlot > baseState.Slot() {
|
||||
baseState, err = state.ProcessSlots(ctx, baseState, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
|
||||
if featureconfig.Get().EnableNextSlotStateCache {
|
||||
baseState, err = core.ProcessSlotsUsingNextSlotCache(ctx, baseState, c.Root, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
|
||||
}
|
||||
} else {
|
||||
baseState, err = core.ProcessSlots(ctx, baseState, epochStartSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to epoch %d", c.Epoch)
|
||||
}
|
||||
}
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
return baseState, nil
|
||||
}
|
||||
|
||||
// To avoid sharing the same state across checkpoint state cache and hot state cache,
|
||||
// we don't add the state to check point cache.
|
||||
has, err := s.stateGen.HasStateInCache(ctx, bytesutil.ToBytes32(c.Root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !has {
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not saved checkpoint state to cache")
|
||||
}
|
||||
// Sharing the same state across caches is perfectly fine here, the fetching
|
||||
// of attestation prestate is by far the most accessed state fetching pattern in
|
||||
// the beacon node. An extra state instance cached isn't an issue in the bigger
|
||||
// picture.
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, baseState); err != nil {
|
||||
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
|
||||
}
|
||||
return baseState, nil
|
||||
|
||||
@@ -68,9 +68,9 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (*sta
|
||||
|
||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||
func (s *Service) verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||
currentSlot := (nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot
|
||||
currentSlot := types.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
|
||||
currentEpoch := helpers.SlotToEpoch(currentSlot)
|
||||
var prevEpoch uint64
|
||||
var prevEpoch types.Epoch
|
||||
// Prevents previous epoch under flow
|
||||
if currentEpoch > 1 {
|
||||
prevEpoch = currentEpoch - 1
|
||||
@@ -84,20 +84,20 @@ func (s *Service) verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime u
|
||||
// verifyBeaconBlock verifies beacon head block is known and not from the future.
|
||||
func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.AttestationData) error {
|
||||
r := bytesutil.ToBytes32(data.BeaconBlockRoot)
|
||||
b, err := s.beaconDB.Block(ctx, r)
|
||||
b, err := s.cfg.BeaconDB.Block(ctx, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the block does not exist in db, check again if block exists in initial sync block cache.
|
||||
// This could happen as the node first syncs to head.
|
||||
if b == nil && s.hasInitSyncBlock(r) {
|
||||
if (b == nil || b.IsNil()) && s.hasInitSyncBlock(r) {
|
||||
b = s.getInitSyncBlock(r)
|
||||
}
|
||||
if b == nil || b.Block == nil {
|
||||
return fmt.Errorf("beacon block %#x does not exist", bytesutil.Trunc(data.BeaconBlockRoot))
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return err
|
||||
}
|
||||
if b.Block.Slot > data.Slot {
|
||||
return fmt.Errorf("could not process attestation for future block, block.Slot=%d > attestation.Data.Slot=%d", b.Block.Slot, data.Slot)
|
||||
if b.Block().Slot() > data.Slot {
|
||||
return fmt.Errorf("could not process attestation for future block, block.Slot=%d > attestation.Data.Slot=%d", b.Block().Slot(), data.Slot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,14 +4,14 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
@@ -29,42 +29,44 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = blockTree1(beaconDB, []byte{'g'})
|
||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithOutState := testutil.NewBeaconBlock()
|
||||
BlkWithOutState.Block.Slot = 0
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, BlkWithOutState))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithOutState)))
|
||||
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
BlkWithStateBadAtt := testutil.NewBeaconBlock()
|
||||
BlkWithStateBadAtt.Block.Slot = 1
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, BlkWithStateBadAtt))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithStateBadAtt)))
|
||||
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
s := testutil.NewBeaconState()
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
||||
|
||||
BlkWithValidState := testutil.NewBeaconBlock()
|
||||
BlkWithValidState.Block.Slot = 2
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, BlkWithValidState))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(BlkWithValidState)))
|
||||
|
||||
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s = testutil.NewBeaconState()
|
||||
err = s.SetFork(&pb.Fork{
|
||||
s, err = testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = s.SetFork(ðpb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -133,7 +135,7 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
genesisState, pks := testutil.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(timeutils.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
@@ -144,8 +146,8 @@ func TestStore_OnAttestation_Ok(t *testing.T) {
|
||||
copied := genesisState.Copy()
|
||||
copied, err = state.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.onAttestation(ctx, att[0]))
|
||||
}
|
||||
|
||||
@@ -157,10 +159,11 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := testutil.NewBeaconState()
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
err = s.SetFinalizedCheckpoint(ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'A'}, 32)})
|
||||
require.NoError(t, err)
|
||||
val := ðpb.Validator{
|
||||
@@ -172,7 +175,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
err = s.SetBalances([]uint64{0})
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{'g'}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, r))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
|
||||
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
@@ -181,16 +184,16 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
|
||||
r = bytesutil.ToBytes32([]byte{'A'})
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, 32)}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, 32)}))
|
||||
|
||||
s1, err := service.getAttPreState(ctx, cp1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, 32)}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, 32)}))
|
||||
s2, err := service.getAttPreState(ctx, cp2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||
@@ -213,8 +216,8 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
service.prevFinalizedCheckpt = ðpb.Checkpoint{Root: r[:]}
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, 32)}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, 32)}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, 32)}))
|
||||
s3, err := service.getAttPreState(ctx, cp3)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
|
||||
@@ -228,24 +231,24 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
epoch := uint64(1)
|
||||
epoch := types.Epoch(1)
|
||||
baseState, _ := testutil.DeterministicGenesisState(t, 1)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), 32)}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, returned.Slot(), checkpoint.Epoch*params.BeaconConfig().SlotsPerEpoch, "Incorrectly returned base state")
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
|
||||
|
||||
cached, err := service.checkpointStateCache.StateByCheckpoint(checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
|
||||
|
||||
epoch = uint64(2)
|
||||
epoch = 2
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), 32)}
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
s, err := helpers.StartSlot(newCheckpoint.Epoch)
|
||||
@@ -256,9 +259,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
|
||||
cached, err = service.checkpointStateCache.StateByCheckpoint(newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(returned.InnerStateUnsafe(), cached.InnerStateUnsafe()) {
|
||||
t.Error("Incorrectly cached base state")
|
||||
}
|
||||
require.DeepSSZEqual(t, returned.InnerStateUnsafe(), cached.InnerStateUnsafe())
|
||||
}
|
||||
|
||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
@@ -266,10 +267,10 @@ func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)}))
|
||||
}
|
||||
|
||||
@@ -278,10 +279,10 @@ func TestAttEpoch_MatchCurrentEpoch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot
|
||||
nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1}))
|
||||
}
|
||||
|
||||
@@ -290,10 +291,10 @@ func TestAttEpoch_NotMatch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
nowTime := 2 * params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot
|
||||
nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
err = service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)})
|
||||
assert.ErrorContains(t, "target epoch 0 does not match current epoch 2 or prev epoch 1", err)
|
||||
}
|
||||
@@ -303,11 +304,11 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
d := testutil.HydrateAttestationData(ðpb.AttestationData{})
|
||||
assert.ErrorContains(t, "beacon block 0x000000000000 does not exist", service.verifyBeaconBlock(ctx, d))
|
||||
assert.ErrorContains(t, "signed beacon block can't be nil", service.verifyBeaconBlock(ctx, d))
|
||||
}
|
||||
|
||||
func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
@@ -315,12 +316,12 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
d := ðpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
|
||||
@@ -333,12 +334,12 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 2
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
d := ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
|
||||
@@ -351,12 +352,12 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -365,7 +366,7 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) {
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -378,12 +379,12 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -392,7 +393,7 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -405,7 +406,7 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
@@ -421,10 +422,10 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b32.Block.Slot, r32, [32]byte{}, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, b33.Block.Slot, r33, r32, [32]byte{}, 0, 0))
|
||||
|
||||
_, err = service.forkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0, r32, []uint64{}, 0)
|
||||
require.NoError(t, err)
|
||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -6,11 +6,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
@@ -22,8 +25,11 @@ import (
|
||||
// A custom slot deadline for processing state slots in our cache.
|
||||
const slotDeadline = 5 * time.Second
|
||||
|
||||
// A custom deadline for deposit trie insertion.
|
||||
const depositDeadline = 20 * time.Second
|
||||
|
||||
// This defines size of the upper bound for initial sync block cache.
|
||||
var initialSyncBlockCacheSize = 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
// onBlock is called when a gossip block is received. It runs regular state transition on the block.
|
||||
// The block's signing root should be computed before calling this method to avoid redundant
|
||||
@@ -46,7 +52,8 @@ var initialSyncBlockCacheSize = 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||
//
|
||||
// # Check the block is valid and compute the post-state
|
||||
// state = state_transition(pre_state, signed_block, True)
|
||||
// state = pre_state.copy()
|
||||
// state_transition(state, signed_block, True)
|
||||
// # Add new block to the store
|
||||
// store.blocks[hash_tree_root(block)] = block
|
||||
// # Add new state for this block to the store
|
||||
@@ -75,30 +82,23 @@ var initialSyncBlockCacheSize = 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
|
||||
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
|
||||
if signed == nil || signed.Block == nil {
|
||||
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
|
||||
return errors.New("nil block")
|
||||
}
|
||||
b := signed.Block
|
||||
b := signed.Block()
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
set, postState, err := state.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, signed)
|
||||
postState, err := core.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
valid, err := set.Verify()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not batch verify signature")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("signature in block failed to verify")
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
@@ -113,7 +113,7 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock,
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
if err := state.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
|
||||
if err := core.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
|
||||
log.WithError(err).Debug("could not update next slot state cache")
|
||||
}
|
||||
}()
|
||||
@@ -126,42 +126,67 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock,
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
newFinalized := postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch
|
||||
if featureconfig.Get().UpdateHeadTimely {
|
||||
if newFinalized {
|
||||
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save new justified")
|
||||
}
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = postState.FinalizedCheckpoint()
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: signed.Block().Slot(),
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: signed,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Update finalized check point.
|
||||
if newFinalized {
|
||||
if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
|
||||
if err := s.forkChoiceStore.Prune(ctx, fRoot); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not prune proto array fork choice nodes")
|
||||
}
|
||||
|
||||
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save new justified")
|
||||
}
|
||||
|
||||
// Update deposit cache.
|
||||
finalizedState, err := s.stateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not fetch finalized state")
|
||||
}
|
||||
// We update the cache up to the last deposit index in the finalized block's state.
|
||||
// We can be confident that these deposits will be included in some block
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
|
||||
s.depositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
|
||||
if featureconfig.Get().EnablePruningDepositProofs {
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.depositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
if !featureconfig.Get().UpdateHeadTimely {
|
||||
if err := s.finalizedImpliesNewJustified(ctx, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save new justified")
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
// Send an event regarding the new finalized checkpoint over a common event feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.FinalizedCheckpoint,
|
||||
Data: ðpbv1.EventFinalizedCheckpoint{
|
||||
Epoch: postState.FinalizedCheckpoint().Epoch,
|
||||
Block: postState.FinalizedCheckpoint().Root,
|
||||
State: signed.Block().StateRoot(),
|
||||
},
|
||||
})
|
||||
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
// with a custom deadline, therefore using the background context instead.
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
defer cancel()
|
||||
if err := s.insertFinalizedDeposits(depCtx, fRoot); err != nil {
|
||||
log.WithError(err).Error("Could not insert finalized deposits.")
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
defer reportAttestationInclusion(b)
|
||||
@@ -169,76 +194,7 @@ func (s *Service) onBlock(ctx context.Context, signed *ethpb.SignedBeaconBlock,
|
||||
return s.handleEpochBoundary(ctx, postState)
|
||||
}
|
||||
|
||||
// onBlockInitialSyncStateTransition is called when an initial sync block is received.
|
||||
// It runs state transition on the block and without fork choice and post operation pool processes.
|
||||
// The block's signing root should be computed before calling this method to avoid redundant
|
||||
// computation in this method and methods it calls into.
|
||||
func (s *Service) onBlockInitialSyncStateTransition(ctx context.Context, signed *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockInitialSyncStateTransition")
|
||||
defer span.End()
|
||||
|
||||
if signed == nil || signed.Block == nil {
|
||||
return errors.New("nil block")
|
||||
}
|
||||
|
||||
b := signed.Block
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.stateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(signed.Block.ParentRoot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if preState == nil {
|
||||
return fmt.Errorf("nil pre state for slot %d", b.Slot)
|
||||
}
|
||||
|
||||
// Exit early if the pre state slot is higher than incoming block's slot.
|
||||
if preState.Slot() >= signed.Block.Slot {
|
||||
return nil
|
||||
}
|
||||
|
||||
postState, err := state.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not execute state transition")
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, true /* init sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
// Save the latest block as head in cache.
|
||||
if err := s.saveHeadNoDB(ctx, signed, blockRoot, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
|
||||
if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
}
|
||||
|
||||
if postState.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
if err := s.updateJustifiedInitSync(ctx, postState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
|
||||
if postState.FinalizedCheckpointEpoch() > s.finalizedCheckpt.Epoch {
|
||||
if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return s.handleEpochBoundary(ctx, postState)
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock,
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock,
|
||||
blockRoots [][32]byte) ([]*ethpb.Checkpoint, []*ethpb.Checkpoint, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
@@ -246,21 +202,21 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
if len(blks) == 0 || len(blockRoots) == 0 {
|
||||
return nil, nil, errors.New("no blocks provided")
|
||||
}
|
||||
if blks[0] == nil || blks[0].Block == nil {
|
||||
if blks[0] == nil || blks[0].IsNil() || blks[0].Block().IsNil() {
|
||||
return nil, nil, errors.New("nil block")
|
||||
}
|
||||
b := blks[0].Block
|
||||
b := blks[0].Block()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
preState, err := s.stateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot()))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if preState == nil {
|
||||
return nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot)
|
||||
if preState == nil || preState.IsNil() {
|
||||
return nil, nil, fmt.Errorf("nil pre state for slot %d", b.Slot())
|
||||
}
|
||||
|
||||
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
@@ -271,9 +227,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
Messages: [][32]byte{},
|
||||
}
|
||||
var set *bls.SignatureSet
|
||||
boundaries := make(map[[32]byte]*stateTrie.BeaconState)
|
||||
boundaries := make(map[[32]byte]state.BeaconState)
|
||||
for i, b := range blks {
|
||||
set, preState, err = state.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
set, preState, err = core.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -296,14 +252,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
return nil, nil, errors.New("batch block signature verification failed")
|
||||
}
|
||||
for r, st := range boundaries {
|
||||
if err := s.stateGen.SaveState(ctx, r, st); err != nil {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
// Also saves the last post state which to be used as pre state for the next batch.
|
||||
lastB := blks[len(blks)-1]
|
||||
lastBR := blockRoots[len(blockRoots)-1]
|
||||
if err := s.stateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := s.saveHeadNoDB(ctx, lastB, lastBR, preState); err != nil {
|
||||
@@ -314,16 +270,16 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
|
||||
// handles a block after the block's batch has been verified, where we can save blocks
|
||||
// their state summaries and split them off to relative hot/cold storage.
|
||||
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb.SignedBeaconBlock,
|
||||
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed block.SignedBeaconBlock,
|
||||
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
b := signed.Block
|
||||
b := signed.Block()
|
||||
|
||||
s.saveInitSyncBlock(blockRoot, signed)
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b, blockRoot, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
|
||||
Slot: signed.Block.Slot,
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: signed.Block().Slot(),
|
||||
Root: blockRoot[:],
|
||||
}); err != nil {
|
||||
return err
|
||||
@@ -331,7 +287,7 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb
|
||||
|
||||
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
|
||||
if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
@@ -348,18 +304,27 @@ func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed *ethpb
|
||||
if err := s.updateFinalized(ctx, fCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
if featureconfig.Get().UpdateHeadTimely {
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = fCheckpoint
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, postState *stateTrie.BeaconState) error {
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState) error {
|
||||
if postState.Slot()+1 == s.nextEpochBoundarySlot {
|
||||
// Update caches for the next epoch at epoch boundary slot - 1.
|
||||
if err := helpers.UpdateCommitteeCache(postState, helpers.NextEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(postState, helpers.NextEpoch(postState)); err != nil {
|
||||
copied := postState.Copy()
|
||||
copied, err := core.ProcessSlots(ctx, copied, copied.Slot()+1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(copied); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if postState.Slot() >= s.nextEpochBoundarySlot {
|
||||
@@ -377,7 +342,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState *stateTrie.
|
||||
if err := helpers.UpdateCommitteeCache(postState, helpers.CurrentEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(postState, helpers.CurrentEpoch(postState)); err != nil {
|
||||
if err := helpers.UpdateProposerIndicesInCache(postState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -387,15 +352,15 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState *stateTrie.
|
||||
|
||||
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
|
||||
// to gain information on the most current chain.
|
||||
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock, root [32]byte,
|
||||
st *stateTrie.BeaconState) error {
|
||||
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk block.BeaconBlock, root [32]byte,
|
||||
st state.BeaconState) error {
|
||||
fCheckpoint := st.FinalizedCheckpoint()
|
||||
jCheckpoint := st.CurrentJustifiedCheckpoint()
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body.Attestations {
|
||||
for _, a := range blk.Body().Attestations() {
|
||||
committee, err := helpers.BeaconCommitteeFromState(st, a.Data.Slot, a.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -404,19 +369,19 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.forkChoiceStore.ProcessAttestation(ctx, indices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.BeaconBlock,
|
||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.BeaconBlock,
|
||||
root [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
// Feed in block to fork choice store.
|
||||
if err := s.forkChoiceStore.ProcessBlock(ctx,
|
||||
blk.Slot, root, bytesutil.ToBytes32(blk.ParentRoot), bytesutil.ToBytes32(blk.Body.Graffiti),
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
blk.Slot(), root, bytesutil.ToBytes32(blk.ParentRoot()), bytesutil.ToBytes32(blk.Body().Graffiti()),
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
@@ -426,19 +391,19 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk *ethpb.B
|
||||
|
||||
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
|
||||
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b *ethpb.SignedBeaconBlock, st *stateTrie.BeaconState, initSync bool) error {
|
||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b block.SignedBeaconBlock, st state.BeaconState, initSync bool) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
|
||||
defer span.End()
|
||||
if initSync {
|
||||
s.saveInitSyncBlock(r, b)
|
||||
} else if err := s.beaconDB.SaveBlock(ctx, b); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Block.Slot)
|
||||
} else if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil {
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Block().Slot())
|
||||
}
|
||||
if err := s.stateGen.SaveState(ctx, r, st); err != nil {
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block, r, st); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block.Slot)
|
||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, b.Block(), r, st); err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", b.Block().Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,25 +6,28 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() uint64 {
|
||||
func (s *Service) CurrentSlot() types.Slot {
|
||||
return helpers.CurrentSlot(uint64(s.genesisTime.Unix()))
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*stateTrie.BeaconState, error) {
|
||||
func (s *Service) getBlockPreState(ctx context.Context, b block.BeaconBlock) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.getBlockPreState")
|
||||
defer span.End()
|
||||
|
||||
@@ -33,16 +36,16 @@ func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*
|
||||
return nil, err
|
||||
}
|
||||
|
||||
preState, err := s.stateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot))
|
||||
preState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(b.ParentRoot()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot)
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot())
|
||||
}
|
||||
if preState == nil {
|
||||
return nil, errors.Wrapf(err, "nil pre state for slot %d", b.Slot)
|
||||
if preState == nil || preState.IsNil() {
|
||||
return nil, errors.Wrapf(err, "nil pre state for slot %d", b.Slot())
|
||||
}
|
||||
|
||||
// Verify block slot time is not from the future.
|
||||
if err := helpers.VerifySlotTime(preState.GenesisTime(), b.Slot, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
if err := helpers.VerifySlotTime(preState.GenesisTime(), b.Slot(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -55,28 +58,28 @@ func (s *Service) getBlockPreState(ctx context.Context, b *ethpb.BeaconBlock) (*
|
||||
}
|
||||
|
||||
// verifyBlkPreState validates input block has a valid pre-state.
|
||||
func (s *Service) verifyBlkPreState(ctx context.Context, b *ethpb.BeaconBlock) error {
|
||||
func (s *Service) verifyBlkPreState(ctx context.Context, b block.BeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.verifyBlkPreState")
|
||||
defer span.End()
|
||||
|
||||
parentRoot := bytesutil.ToBytes32(b.ParentRoot)
|
||||
parentRoot := bytesutil.ToBytes32(b.ParentRoot())
|
||||
// Loosen the check to HasBlock because state summary gets saved in batches
|
||||
// during initial syncing. There's no risk given a state summary object is just a
|
||||
// a subset of the block object.
|
||||
if !s.beaconDB.HasStateSummary(ctx, parentRoot) && !s.beaconDB.HasBlock(ctx, parentRoot) {
|
||||
if !s.cfg.BeaconDB.HasStateSummary(ctx, parentRoot) && !s.cfg.BeaconDB.HasBlock(ctx, parentRoot) {
|
||||
return errors.New("could not reconstruct parent state")
|
||||
}
|
||||
|
||||
if err := s.VerifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot)); err != nil {
|
||||
if err := s.VerifyBlkDescendant(ctx, bytesutil.ToBytes32(b.ParentRoot())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
has, err := s.stateGen.HasState(ctx, parentRoot)
|
||||
has, err := s.cfg.StateGen.HasState(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !has {
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return errors.Wrap(err, "could not save initial sync blocks")
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
@@ -90,15 +93,15 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.VerifyBlkDescendant")
|
||||
defer span.End()
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.finalizedCheckpt.Root))
|
||||
finalizedBlkSigned, err := s.beaconDB.Block(ctx, fRoot)
|
||||
finalizedBlkSigned, err := s.cfg.BeaconDB.Block(ctx, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if finalizedBlkSigned == nil || finalizedBlkSigned.Block == nil {
|
||||
if finalizedBlkSigned == nil || finalizedBlkSigned.IsNil() || finalizedBlkSigned.Block().IsNil() {
|
||||
return errors.New("nil finalized block")
|
||||
}
|
||||
finalizedBlk := finalizedBlkSigned.Block
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot)
|
||||
finalizedBlk := finalizedBlkSigned.Block()
|
||||
bFinalizedRoot, err := s.ancestor(ctx, root[:], finalizedBlk.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block root")
|
||||
}
|
||||
@@ -108,7 +111,7 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
|
||||
|
||||
if !bytes.Equal(bFinalizedRoot, fRoot[:]) {
|
||||
err := fmt.Errorf("block %#x is not a descendent of the current finalized block slot %d, %#x != %#x",
|
||||
bytesutil.Trunc(root[:]), finalizedBlk.Slot, bytesutil.Trunc(bFinalizedRoot),
|
||||
bytesutil.Trunc(root[:]), finalizedBlk.Slot(), bytesutil.Trunc(bFinalizedRoot),
|
||||
bytesutil.Trunc(fRoot[:]))
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -118,13 +121,13 @@ func (s *Service) VerifyBlkDescendant(ctx context.Context, root [32]byte) error
|
||||
|
||||
// verifyBlkFinalizedSlot validates input block is not less than or equal
|
||||
// to current finalized slot.
|
||||
func (s *Service) verifyBlkFinalizedSlot(b *ethpb.BeaconBlock) error {
|
||||
func (s *Service) verifyBlkFinalizedSlot(b block.BeaconBlock) error {
|
||||
finalizedSlot, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if finalizedSlot >= b.Slot {
|
||||
return fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot, finalizedSlot)
|
||||
if finalizedSlot >= b.Slot() {
|
||||
return fmt.Errorf("block is equal or earlier than finalized block, slot %d < slot %d", b.Slot(), finalizedSlot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -137,45 +140,45 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
|
||||
if helpers.SlotsSinceEpochStarts(s.CurrentSlot()) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
||||
return true, nil
|
||||
}
|
||||
var newJustifiedBlockSigned *ethpb.SignedBeaconBlock
|
||||
var newJustifiedBlockSigned block.SignedBeaconBlock
|
||||
justifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(newJustifiedCheckpt.Root))
|
||||
var err error
|
||||
if s.hasInitSyncBlock(justifiedRoot) {
|
||||
newJustifiedBlockSigned = s.getInitSyncBlock(justifiedRoot)
|
||||
} else {
|
||||
newJustifiedBlockSigned, err = s.beaconDB.Block(ctx, justifiedRoot)
|
||||
newJustifiedBlockSigned, err = s.cfg.BeaconDB.Block(ctx, justifiedRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
if newJustifiedBlockSigned == nil || newJustifiedBlockSigned.Block == nil {
|
||||
if newJustifiedBlockSigned == nil || newJustifiedBlockSigned.IsNil() || newJustifiedBlockSigned.Block().IsNil() {
|
||||
return false, errors.New("nil new justified block")
|
||||
}
|
||||
|
||||
newJustifiedBlock := newJustifiedBlockSigned.Block
|
||||
newJustifiedBlock := newJustifiedBlockSigned.Block()
|
||||
jSlot, err := helpers.StartSlot(s.justifiedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if newJustifiedBlock.Slot <= jSlot {
|
||||
if newJustifiedBlock.Slot() <= jSlot {
|
||||
return false, nil
|
||||
}
|
||||
var justifiedBlockSigned *ethpb.SignedBeaconBlock
|
||||
var justifiedBlockSigned block.SignedBeaconBlock
|
||||
cachedJustifiedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))
|
||||
if s.hasInitSyncBlock(cachedJustifiedRoot) {
|
||||
justifiedBlockSigned = s.getInitSyncBlock(cachedJustifiedRoot)
|
||||
} else {
|
||||
justifiedBlockSigned, err = s.beaconDB.Block(ctx, cachedJustifiedRoot)
|
||||
justifiedBlockSigned, err = s.cfg.BeaconDB.Block(ctx, cachedJustifiedRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
if justifiedBlockSigned == nil || justifiedBlockSigned.Block == nil {
|
||||
if justifiedBlockSigned == nil || justifiedBlockSigned.IsNil() || justifiedBlockSigned.Block().IsNil() {
|
||||
return false, errors.New("nil justified block")
|
||||
}
|
||||
justifiedBlock := justifiedBlockSigned.Block
|
||||
b, err := s.ancestor(ctx, justifiedRoot[:], justifiedBlock.Slot)
|
||||
justifiedBlock := justifiedBlockSigned.Block()
|
||||
b, err := s.ancestor(ctx, justifiedRoot[:], justifiedBlock.Slot())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -185,7 +188,7 @@ func (s *Service) shouldUpdateCurrentJustified(ctx context.Context, newJustified
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *Service) updateJustified(ctx context.Context, state *stateTrie.BeaconState) error {
|
||||
func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaconState) error {
|
||||
cpt := state.CurrentJustifiedCheckpoint()
|
||||
if cpt.Epoch > s.bestJustifiedCheckpt.Epoch {
|
||||
s.bestJustifiedCheckpt = cpt
|
||||
@@ -203,7 +206,7 @@ func (s *Service) updateJustified(ctx context.Context, state *stateTrie.BeaconSt
|
||||
}
|
||||
}
|
||||
|
||||
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cpt)
|
||||
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cpt)
|
||||
}
|
||||
|
||||
// This caches input checkpoint as justified for the service struct. It rotates current justified to previous justified,
|
||||
@@ -216,26 +219,27 @@ func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpo
|
||||
return err
|
||||
}
|
||||
|
||||
return s.beaconDB.SaveJustifiedCheckpoint(ctx, cp)
|
||||
return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp)
|
||||
}
|
||||
|
||||
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
// Blocks need to be saved so that we can retrieve finalized block from
|
||||
// DB when migrating states.
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
s.clearInitSyncBlocks()
|
||||
|
||||
if err := s.beaconDB.SaveFinalizedCheckpoint(ctx, cp); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, cp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = cp
|
||||
if !featureconfig.Get().UpdateHeadTimely {
|
||||
s.prevFinalizedCheckpt = s.finalizedCheckpt
|
||||
s.finalizedCheckpt = cp
|
||||
}
|
||||
|
||||
fRoot := bytesutil.ToBytes32(cp.Root)
|
||||
if err := s.stateGen.MigrateToCold(ctx, fRoot); err != nil {
|
||||
if err := s.cfg.StateGen.MigrateToCold(ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not migrate to cold")
|
||||
}
|
||||
|
||||
@@ -254,7 +258,7 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
// else:
|
||||
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
|
||||
// return root
|
||||
func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byte, error) {
|
||||
func (s *Service) ancestor(ctx context.Context, root []byte, slot types.Slot) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
|
||||
defer span.End()
|
||||
|
||||
@@ -275,18 +279,18 @@ func (s *Service) ancestor(ctx context.Context, root []byte, slot uint64) ([]byt
|
||||
}
|
||||
|
||||
// This retrieves an ancestor root using fork choice store. The look up is looping through the a flat array structure.
|
||||
func (s *Service) ancestorByForkChoiceStore(ctx context.Context, r [32]byte, slot uint64) ([]byte, error) {
|
||||
func (s *Service) ancestorByForkChoiceStore(ctx context.Context, r [32]byte, slot types.Slot) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByForkChoiceStore")
|
||||
defer span.End()
|
||||
|
||||
if !s.forkChoiceStore.HasParent(r) {
|
||||
if !s.cfg.ForkChoiceStore.HasParent(r) {
|
||||
return nil, errors.New("could not find root in fork choice store")
|
||||
}
|
||||
return s.forkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
return s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
}
|
||||
|
||||
// This retrieves an ancestor root using DB. The look up is recursively looking up DB. Slower than `ancestorByForkChoiceStore`.
|
||||
func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot uint64) ([]byte, error) {
|
||||
func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot types.Slot) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ancestorByDB")
|
||||
defer span.End()
|
||||
|
||||
@@ -295,7 +299,7 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot uint64) ([]
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
signed, err := s.beaconDB.Block(ctx, r)
|
||||
signed, err := s.cfg.BeaconDB.Block(ctx, r)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get ancestor block")
|
||||
}
|
||||
@@ -304,15 +308,15 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot uint64) ([]
|
||||
signed = s.getInitSyncBlock(r)
|
||||
}
|
||||
|
||||
if signed == nil || signed.Block == nil {
|
||||
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
|
||||
return nil, errors.New("nil block")
|
||||
}
|
||||
b := signed.Block
|
||||
if b.Slot == slot || b.Slot < slot {
|
||||
b := signed.Block()
|
||||
if b.Slot() == slot || b.Slot() < slot {
|
||||
return r[:], nil
|
||||
}
|
||||
|
||||
return s.ancestorByDB(ctx, bytesutil.ToBytes32(b.ParentRoot), slot)
|
||||
return s.ancestorByDB(ctx, bytesutil.ToBytes32(b.ParentRoot()), slot)
|
||||
}
|
||||
|
||||
// This updates justified check point in store, if the new justified is later than stored justified or
|
||||
@@ -330,7 +334,7 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot uint64) ([]
|
||||
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
|
||||
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
|
||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||
func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *stateTrie.BeaconState) error {
|
||||
func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state.BeaconState) error {
|
||||
// Update justified if it's different than the one cached in the store.
|
||||
if !attestationutil.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) {
|
||||
if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch {
|
||||
@@ -360,13 +364,13 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state *state
|
||||
|
||||
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.BeaconBlock,
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk block.BeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
pendingNodes := make([]*ethpb.BeaconBlock, 0)
|
||||
pendingNodes := make([]block.BeaconBlock, 0)
|
||||
pendingRoots := make([][32]byte, 0)
|
||||
|
||||
parentRoot := bytesutil.ToBytes32(blk.ParentRoot)
|
||||
slot := blk.Slot
|
||||
parentRoot := bytesutil.ToBytes32(blk.ParentRoot())
|
||||
slot := blk.Slot()
|
||||
// Fork choice only matters from last finalized slot.
|
||||
fSlot, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
@@ -374,17 +378,17 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.
|
||||
}
|
||||
higherThanFinalized := slot > fSlot
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
for !s.forkChoiceStore.HasNode(parentRoot) && s.beaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
|
||||
b, err := s.beaconDB.Block(ctx, parentRoot)
|
||||
for !s.cfg.ForkChoiceStore.HasNode(parentRoot) && s.cfg.BeaconDB.HasBlock(ctx, parentRoot) && higherThanFinalized {
|
||||
b, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pendingNodes = append(pendingNodes, b.Block)
|
||||
pendingNodes = append(pendingNodes, b.Block())
|
||||
copiedRoot := parentRoot
|
||||
pendingRoots = append(pendingRoots, copiedRoot)
|
||||
parentRoot = bytesutil.ToBytes32(b.Block.ParentRoot)
|
||||
slot = b.Block.Slot
|
||||
parentRoot = bytesutil.ToBytes32(b.Block().ParentRoot())
|
||||
slot = b.Block().Slot()
|
||||
higherThanFinalized = slot > fSlot
|
||||
}
|
||||
|
||||
@@ -393,8 +397,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.
|
||||
for i := len(pendingNodes) - 1; i >= 0; i-- {
|
||||
b := pendingNodes[i]
|
||||
r := pendingRoots[i]
|
||||
if err := s.forkChoiceStore.ProcessBlock(ctx,
|
||||
b.Slot, r, bytesutil.ToBytes32(b.ParentRoot), bytesutil.ToBytes32(b.Body.Graffiti),
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
b.Slot(), r, bytesutil.ToBytes32(b.ParentRoot()), bytesutil.ToBytes32(b.Body().Graffiti()),
|
||||
jCheckpoint.Epoch,
|
||||
fCheckpoint.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not process block for proto array fork choice")
|
||||
@@ -404,15 +408,37 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk *ethpb.
|
||||
return nil
|
||||
}
|
||||
|
||||
// inserts finalized deposits into our finalized deposit trie.
|
||||
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertFinalizedDeposits")
|
||||
defer span.End()
|
||||
|
||||
// Update deposit cache.
|
||||
finalizedState, err := s.cfg.StateGen.StateByRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not fetch finalized state")
|
||||
}
|
||||
// We update the cache up to the last deposit index in the finalized block's state.
|
||||
// We can be confident that these deposits will be included in some block
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
eth1DepositIndex := int64(finalizedState.Eth1Data().DepositCount - 1)
|
||||
s.cfg.DepositCache.InsertFinalizedDeposits(ctx, eth1DepositIndex)
|
||||
// Deposit proofs are only used during state transition and can be safely removed to save space.
|
||||
if err = s.cfg.DepositCache.PruneProofs(ctx, eth1DepositIndex); err != nil {
|
||||
return errors.Wrap(err, "could not prune deposit proofs")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The deletes input attestations from the attestation pool, so proposers don't include them in a block for the future.
|
||||
func (s *Service) deletePoolAtts(atts []*ethpb.Attestation) error {
|
||||
for _, att := range atts {
|
||||
if helpers.IsAggregated(att) {
|
||||
if err := s.attPool.DeleteAggregatedAttestation(att); err != nil {
|
||||
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.attPool.DeleteUnaggregatedAttestation(att); err != nil {
|
||||
if err := s.cfg.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,20 +3,26 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -35,33 +41,34 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st := testutil.NewBeaconState()
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(beaconDB, validGenesisRoot[:])
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
random := testutil.NewBeaconBlock()
|
||||
random.Block.Slot = 1
|
||||
random.Block.ParentRoot = validGenesisRoot[:]
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, random))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(random)))
|
||||
randomParentRoot, err := random.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), randomParentRoot))
|
||||
randomParentRoot2 := roots[1]
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: st.Slot(), Root: randomParentRoot2}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), bytesutil.ToBytes32(randomParentRoot2)))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blk *ethpb.SignedBeaconBlock
|
||||
s *stateTrie.BeaconState
|
||||
s state.BeaconState
|
||||
time uint64
|
||||
wantErrString string
|
||||
}{
|
||||
@@ -76,7 +83,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
blk: func() *ethpb.SignedBeaconBlock {
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.ParentRoot = randomParentRoot2
|
||||
b.Block.Slot = params.BeaconConfig().FarFutureEpoch
|
||||
b.Block.Slot = params.BeaconConfig().FarFutureSlot
|
||||
return b
|
||||
}(),
|
||||
s: st.Copy(),
|
||||
@@ -115,7 +122,7 @@ func TestStore_OnBlock(t *testing.T) {
|
||||
|
||||
root, err := tt.blk.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
err = service.onBlock(ctx, tt.blk, root)
|
||||
err = service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.blk), root)
|
||||
assert.ErrorContains(t, tt.wantErrString, err)
|
||||
})
|
||||
}
|
||||
@@ -129,45 +136,47 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Root: gRoot[:],
|
||||
}
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
service.saveInitSyncBlock(gRoot, genesis)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
|
||||
|
||||
st, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
|
||||
bState := st.Copy()
|
||||
|
||||
var blks []*ethpb.SignedBeaconBlock
|
||||
var blks []block.SignedBeaconBlock
|
||||
var blkRoots [][32]byte
|
||||
var firstState *stateTrie.BeaconState
|
||||
var firstState state.BeaconState
|
||||
for i := 1; i < 10; i++ {
|
||||
b, err := testutil.GenerateFullBlock(bState, keys, testutil.DefaultBlockGenConfig(), uint64(i))
|
||||
b, err := testutil.GenerateFullBlock(bState, keys, testutil.DefaultBlockGenConfig(), types.Slot(i))
|
||||
require.NoError(t, err)
|
||||
bState, err = state.ExecuteStateTransition(ctx, bState, b)
|
||||
bState, err = core.ExecuteStateTransition(ctx, bState, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
require.NoError(t, err)
|
||||
if i == 1 {
|
||||
firstState = bState.Copy()
|
||||
}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.saveInitSyncBlock(root, b)
|
||||
blks = append(blks, b)
|
||||
service.saveInitSyncBlock(root, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
blks = append(blks, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
blkRoots = append(blkRoots, root)
|
||||
}
|
||||
|
||||
blks[0].Block.ParentRoot = gRoot[:]
|
||||
rBlock, err := blks[0].PbPhase0Block()
|
||||
assert.NoError(t, err)
|
||||
rBlock.Block.ParentRoot = gRoot[:]
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), blks[0]))
|
||||
require.NoError(t, service.stateGen.SaveState(ctx, blkRoots[0], firstState))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, blkRoots[0], firstState))
|
||||
_, _, err = service.onBlockBatch(ctx, blks[1:], blkRoots[1:])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -179,7 +188,7 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = time.Now()
|
||||
|
||||
@@ -195,10 +204,10 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) {
|
||||
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
|
||||
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, newJustifiedBlk))
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, lastJustifiedBlk))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
|
||||
|
||||
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
||||
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
||||
update, err = service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: newJustifiedRoot[:]})
|
||||
@@ -213,7 +222,7 @@ func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
defer params.UseMainnetConfig()
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
lastJustifiedBlk := testutil.NewBeaconBlock()
|
||||
lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32)
|
||||
@@ -223,10 +232,10 @@ func TestShouldUpdateJustified_ReturnFalse(t *testing.T) {
|
||||
newJustifiedBlk.Block.ParentRoot = bytesutil.PadTo(lastJustifiedRoot[:], 32)
|
||||
newJustifiedRoot, err := newJustifiedBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, newJustifiedBlk))
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, lastJustifiedBlk))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(newJustifiedBlk)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(lastJustifiedBlk)))
|
||||
|
||||
diff := (params.BeaconConfig().SlotsPerEpoch - 1) * params.BeaconConfig().SecondsPerSlot
|
||||
diff := params.BeaconConfig().SlotsPerEpoch.Sub(1).Mul(params.BeaconConfig().SecondsPerSlot)
|
||||
service.genesisTime = time.Unix(time.Now().Unix()-int64(diff), 0)
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: lastJustifiedRoot[:]}
|
||||
|
||||
@@ -243,29 +252,29 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, err := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Root: gRoot[:],
|
||||
}
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
service.saveInitSyncBlock(gRoot, genesis)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
b.Block.ParentRoot = gRoot[:]
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: gRoot[:]}))
|
||||
require.NoError(t, service.stateGen.SaveState(ctx, gRoot, s))
|
||||
require.NoError(t, service.verifyBlkPreState(ctx, b.Block))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: 1, Root: gRoot[:]}))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
|
||||
require.NoError(t, service.verifyBlkPreState(ctx, wrapper.WrappedPhase0BeaconBlock(b.Block)))
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
@@ -276,33 +285,33 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) {
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Root: gRoot[:],
|
||||
}
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
service.saveInitSyncBlock(gRoot, genesis)
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{})
|
||||
service.saveInitSyncBlock(gRoot, wrapper.WrappedPhase0SignedBeaconBlock(genesis))
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
err = service.verifyBlkPreState(ctx, b.Block)
|
||||
err = service.verifyBlkPreState(ctx, wrapper.WrappedPhase0BeaconBlock(b.Block))
|
||||
wanted := "could not reconstruct parent state"
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
|
||||
b.Block.ParentRoot = gRoot[:]
|
||||
s, err := stateTrie.InitializeFromProto(&pb.BeaconState{Slot: 1})
|
||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Slot: 1})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: gRoot[:]}))
|
||||
require.NoError(t, service.stateGen.SaveState(ctx, gRoot, s))
|
||||
require.NoError(t, service.verifyBlkPreState(ctx, b.Block))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: 1, Root: gRoot[:]}))
|
||||
require.NoError(t, service.cfg.StateGen.SaveState(ctx, gRoot, s))
|
||||
require.NoError(t, service.verifyBlkPreState(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b).Block()))
|
||||
}
|
||||
|
||||
func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
@@ -310,20 +319,22 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
signedBlock := testutil.NewBeaconBlock()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, signedBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(signedBlock)))
|
||||
r, err := signedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.justifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
service.bestJustifiedCheckpt = ðpb.Checkpoint{Root: []byte{'A'}}
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, st.Copy(), r))
|
||||
|
||||
// Could update
|
||||
s := testutil.NewBeaconState()
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetCurrentJustifiedCheckpoint(ðpb.Checkpoint{Epoch: 1, Root: r[:]}))
|
||||
require.NoError(t, service.updateJustified(context.Background(), s))
|
||||
|
||||
@@ -333,7 +344,7 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) {
|
||||
service.bestJustifiedCheckpt.Epoch = 2
|
||||
require.NoError(t, service.updateJustified(context.Background(), s))
|
||||
|
||||
assert.Equal(t, uint64(2), service.bestJustifiedCheckpt.Epoch, "Incorrect justified epoch in service")
|
||||
assert.Equal(t, types.Epoch(2), service.bestJustifiedCheckpt.Epoch, "Incorrect justified epoch in service")
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
@@ -341,20 +352,21 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
@@ -363,14 +375,14 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
block.Block.ParentRoot = roots[8]
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), block.Block, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 5, len(service.forkChoiceStore.Nodes()), "Miss match nodes")
|
||||
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
|
||||
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
|
||||
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
|
||||
assert.Equal(t, 5, len(service.cfg.ForkChoiceStore.Nodes()), "Miss match nodes")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[4])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[6])), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(roots[8])), "Didn't save node")
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
@@ -378,20 +390,21 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)}
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
roots, err := blockTree1(t, beaconDB, validGenesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
@@ -400,16 +413,16 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
block.Block.ParentRoot = roots[8]
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), block.Block, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
assert.Equal(t, 5, len(service.forkChoiceStore.Nodes()), "Miss match nodes")
|
||||
assert.Equal(t, 5, len(service.cfg.ForkChoiceStore.Nodes()), "Miss match nodes")
|
||||
// Ensure all roots and their respective blocks exist.
|
||||
wantedRoots := [][]byte{roots[0], roots[3], roots[4], roots[6], roots[8]}
|
||||
for i, rt := range wantedRoots {
|
||||
assert.Equal(t, true, service.forkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
|
||||
assert.Equal(t, true, service.beaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(bytesutil.ToBytes32(rt)), fmt.Sprintf("Didn't save node: %d", i))
|
||||
assert.Equal(t, true, service.cfg.BeaconDB.HasBlock(context.Background(), bytesutil.ToBytes32(rt)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -418,48 +431,49 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.forkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'})
|
||||
// Set finalized epoch to 1.
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1}
|
||||
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
validGenesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st.Copy(), validGenesisRoot))
|
||||
|
||||
// Define a tree branch, slot 63 <- 64 <- 65
|
||||
b63 := testutil.NewBeaconBlock()
|
||||
b63.Block.Slot = 63
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b63))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b63)))
|
||||
r63, err := b63.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b64 := testutil.NewBeaconBlock()
|
||||
b64.Block.Slot = 64
|
||||
b64.Block.ParentRoot = r63[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b64))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b64)))
|
||||
r64, err := b64.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b65 := testutil.NewBeaconBlock()
|
||||
b65.Block.Slot = 65
|
||||
b65.Block.ParentRoot = r64[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b65))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b65)))
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), b65.Block, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b65).Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be 2 nodes, block 65 and block 64.
|
||||
assert.Equal(t, 2, len(service.forkChoiceStore.Nodes()), "Miss match nodes")
|
||||
assert.Equal(t, 2, len(service.cfg.ForkChoiceStore.Nodes()), "Miss match nodes")
|
||||
|
||||
// Block with slot 63 should be in fork choice because it's less than finalized epoch 1.
|
||||
assert.Equal(t, true, service.forkChoiceStore.HasNode(r63), "Didn't save node")
|
||||
assert.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r63), "Didn't save node")
|
||||
}
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
@@ -467,7 +481,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
// B0 /- B5 - B7
|
||||
// \- B3 - B4 - B6 - B8
|
||||
// (B1, and B3 are all from the same slots)
|
||||
func blockTree1(beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
func blockTree1(t *testing.T, beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
genesisRoot = bytesutil.PadTo(genesisRoot, 32)
|
||||
b0 := testutil.NewBeaconBlock()
|
||||
b0.Block.Slot = 0
|
||||
@@ -525,13 +539,14 @@ func blockTree1(beaconDB db.Database, genesisRoot []byte) ([][]byte, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st := testutil.NewBeaconState()
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, b := range []*ethpb.SignedBeaconBlock{b0, b1, b3, b4, b5, b6, b7, b8} {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
if err := beaconDB.SaveBlock(context.Background(), beaconBlock); err != nil {
|
||||
if err := beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconDB.SaveState(context.Background(), st.Copy(), bytesutil.ToBytes32(beaconBlock.Block.ParentRoot)); err != nil {
|
||||
@@ -554,11 +569,11 @@ func TestCurrentSlot_HandlesOverflow(t *testing.T) {
|
||||
svc := Service{genesisTime: timeutils.Now().Add(1 * time.Hour)}
|
||||
|
||||
slot := svc.CurrentSlot()
|
||||
require.Equal(t, uint64(0), slot, "Unexpected slot")
|
||||
require.Equal(t, types.Slot(0), slot, "Unexpected slot")
|
||||
}
|
||||
func TestAncestorByDB_CtxErr(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
service, err := New(ctx, &Config{})
|
||||
service, err := NewService(ctx, &Config{})
|
||||
require.NoError(t, err)
|
||||
|
||||
cancel()
|
||||
@@ -571,7 +586,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
@@ -593,7 +608,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), beaconBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock)))
|
||||
}
|
||||
|
||||
// Slots 100 to 200 are skip slots. Requesting root at 150 will yield root at 100. The last physical block.
|
||||
@@ -614,7 +629,7 @@ func TestAncestor_HandleSkipSlot(t *testing.T) {
|
||||
func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
@@ -638,7 +653,7 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), [32]byte{}, 0, 0)) // Saves blocks to fork choice store.
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), [32]byte{}, 0, 0)) // Saves blocks to fork choice store.
|
||||
}
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
@@ -653,7 +668,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
@@ -675,10 +690,10 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), beaconBlock)) // Saves blocks to DB.
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock))) // Saves blocks to DB.
|
||||
}
|
||||
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, [32]byte{}, 0, 0))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(context.Background(), 200, r200, r200, [32]byte{}, 0, 0))
|
||||
|
||||
r, err := service.ancestor(context.Background(), r200[:], 150)
|
||||
require.NoError(t, err)
|
||||
@@ -690,7 +705,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
func TestEnsureRootNotZeroHashes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
service.genesisRoot = [32]byte{'a'}
|
||||
|
||||
@@ -741,14 +756,16 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
beaconState := testutil.NewBeaconState()
|
||||
beaconState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(test.args.stateCheckPoint))
|
||||
service, err := New(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
require.NoError(t, err)
|
||||
service.justifiedCheckpt = test.args.cachedCheckPoint
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
|
||||
genesisState := testutil.NewBeaconState()
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, genesisState, bytesutil.ToBytes32(test.want.Root)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)}))
|
||||
genesisState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, bytesutil.ToBytes32(test.want.Root)))
|
||||
|
||||
if test.args.diffFinalizedCheckPoint {
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
@@ -765,7 +782,7 @@ func TestFinalizedImpliesNewJustified(t *testing.T) {
|
||||
beaconBlock := testutil.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
require.NoError(t, service.beaconDB.SaveBlock(context.Background(), beaconBlock))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(beaconBlock)))
|
||||
}
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: []byte{'c'}, Epoch: 1}
|
||||
service.justifiedCheckpt.Root = r100[:]
|
||||
@@ -784,14 +801,14 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
b.Block.Slot = 1
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
|
||||
b1 := testutil.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
b1.Block.Body.Graffiti = bytesutil.PadTo([]byte{'a'}, 32)
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, b1))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b1)))
|
||||
|
||||
type args struct {
|
||||
parentRoot [32]byte
|
||||
@@ -834,7 +851,7 @@ func TestVerifyBlkDescendant(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
service, err := New(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
service, err := NewService(ctx, &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})})
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{
|
||||
Root: tt.args.finalizedRoot[:],
|
||||
@@ -852,17 +869,17 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
cfg := &Config{BeaconDB: beaconDB}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
gBlk := testutil.NewBeaconBlock()
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, gBlk))
|
||||
require.NoError(t, service.beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Root: gRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(gBlk)))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: gRoot[:]}))
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, beaconState, gRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, beaconState, gRoot))
|
||||
service.genesisRoot = gRoot
|
||||
currentCp := ðpb.Checkpoint{Epoch: 1}
|
||||
service.justifiedCheckpt = currentCp
|
||||
@@ -870,29 +887,31 @@ func TestUpdateJustifiedInitSync(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.updateJustifiedInitSync(ctx, newCp))
|
||||
|
||||
assert.DeepEqual(t, currentCp, service.prevJustifiedCheckpt, "Incorrect previous justified checkpoint")
|
||||
assert.DeepEqual(t, newCp, service.CurrentJustifiedCheckpt(), "Incorrect current justified checkpoint in cache")
|
||||
cp, err := service.beaconDB.JustifiedCheckpoint(ctx)
|
||||
assert.DeepSSZEqual(t, currentCp, service.prevJustifiedCheckpt, "Incorrect previous justified checkpoint")
|
||||
assert.DeepSSZEqual(t, newCp, service.CurrentJustifiedCheckpt(), "Incorrect current justified checkpoint in cache")
|
||||
cp, err := service.cfg.BeaconDB.JustifiedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, newCp, cp, "Incorrect current justified checkpoint in db")
|
||||
assert.DeepSSZEqual(t, newCp, cp, "Incorrect current justified checkpoint in db")
|
||||
}
|
||||
|
||||
func TestHandleEpochBoundary_BadMetrics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := testutil.NewBeaconState()
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
service.head = &head{}
|
||||
service.head = &head{state: (*v1.BeaconState)(nil)}
|
||||
|
||||
require.ErrorContains(t, "failed to initialize precompute: nil inner state", service.handleEpochBoundary(ctx, s))
|
||||
}
|
||||
|
||||
func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := &Config{}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s, _ := testutil.DeterministicGenesisState(t, 1024)
|
||||
@@ -912,28 +931,72 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, keys := testutil.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gBlk, err := service.beaconDB.GenesisBlock(ctx)
|
||||
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
|
||||
testState := gs.Copy()
|
||||
for i := uint64(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {
|
||||
blk, err := testutil.GenerateFullBlock(testState, keys, testutil.DefaultBlockGenConfig(), i)
|
||||
require.NoError(t, err)
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, blk, r))
|
||||
testState, err = service.stateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, service.onBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r))
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, uint64(3), service.CurrentJustifiedCheckpt().Epoch)
|
||||
require.Equal(t, uint64(2), service.FinalizedCheckpt().Epoch)
|
||||
require.Equal(t, types.Epoch(3), service.CurrentJustifiedCheckpt().Epoch)
|
||||
require.Equal(t, types.Epoch(2), service.FinalizedCheckpt().Epoch)
|
||||
}
|
||||
|
||||
func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
depositCache, err := depositcache.New()
|
||||
require.NoError(t, err)
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
DepositCache: depositCache,
|
||||
}
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
gs, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
gBlk, err := service.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
gs = gs.Copy()
|
||||
assert.NoError(t, gs.SetEth1Data(ðpb.Eth1Data{DepositCount: 10}))
|
||||
assert.NoError(t, service.cfg.StateGen.SaveState(ctx, [32]byte{'m', 'o', 'c', 'k'}, gs))
|
||||
zeroSig := [96]byte{}
|
||||
for i := uint64(0); i < uint64(4*params.BeaconConfig().SlotsPerEpoch); i++ {
|
||||
root := []byte(strconv.Itoa(int(i)))
|
||||
assert.NoError(t, depositCache.InsertDeposit(ctx, ðpb.Deposit{Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.FromBytes48([48]byte{}),
|
||||
WithdrawalCredentials: params.BeaconConfig().ZeroHash[:],
|
||||
Amount: 0,
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
}
|
||||
assert.NoError(t, service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'}))
|
||||
fDeposits := depositCache.FinalizedDeposits(ctx)
|
||||
assert.Equal(t, 9, int(fDeposits.MerkleTrieIndex), "Finalized deposits not inserted correctly")
|
||||
deps := depositCache.AllDeposits(ctx, big.NewInt(109))
|
||||
for _, d := range deps {
|
||||
assert.DeepEqual(t, [][]byte(nil), d.Proof, "Proofs are not empty")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
|
||||
type AttestationReceiver interface {
|
||||
ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Attestation) error
|
||||
AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*state.BeaconState, error)
|
||||
AttestationPreState(ctx context.Context, att *ethpb.Attestation) (state.BeaconState, error)
|
||||
VerifyLmdFfgConsistency(ctx context.Context, att *ethpb.Attestation) error
|
||||
VerifyFinalizedConsistency(ctx context.Context, root []byte) error
|
||||
}
|
||||
@@ -39,16 +39,11 @@ func (s *Service) ReceiveAttestationNoPubsub(ctx context.Context, att *ethpb.Att
|
||||
return errors.Wrap(err, "could not process attestation")
|
||||
}
|
||||
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttestationPreState returns the pre state of attestation.
|
||||
func (s *Service) AttestationPreState(ctx context.Context, att *ethpb.Attestation) (*state.BeaconState, error) {
|
||||
func (s *Service) AttestationPreState(ctx context.Context, att *ethpb.Attestation) (state.BeaconState, error) {
|
||||
ss, err := helpers.StartSlot(att.Data.Target.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -81,7 +76,7 @@ func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a *ethpb.Attestat
|
||||
func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) error {
|
||||
// A canonical root implies the root to has an ancestor that aligns with finalized check point.
|
||||
// In this case, we could exit early to save on additional computation.
|
||||
if s.forkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
|
||||
if s.cfg.ForkChoiceStore.IsCanonical(bytesutil.ToBytes32(root)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -102,10 +97,10 @@ func (s *Service) VerifyFinalizedConsistency(ctx context.Context, root []byte) e
|
||||
}
|
||||
|
||||
// This routine processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan struct{}) {
|
||||
func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- struct{}) {
|
||||
// Wait for state to be initialized.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
subscribedToStateEvents <- struct{}{}
|
||||
<-stateChannel
|
||||
stateSub.Unsubscribe()
|
||||
@@ -124,14 +119,22 @@ func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan struct
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-st.C():
|
||||
// Continue when there's no fork choice attestation, there's nothing to process and update head.
|
||||
// This covers the condition when the node is still initial syncing to the head of the chain.
|
||||
if s.cfg.AttPool.ForkchoiceAttestationCount() == 0 {
|
||||
continue
|
||||
}
|
||||
s.processAttestations(s.ctx)
|
||||
if err := s.updateHead(s.ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.Warnf("Resolving fork due to new attestation: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestations(ctx context.Context) {
|
||||
atts := s.attPool.ForkchoiceAttestations()
|
||||
atts := s.cfg.AttPool.ForkchoiceAttestations()
|
||||
for _, a := range atts {
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
@@ -141,13 +144,13 @@ func (s *Service) processAttestations(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
hasState := s.beaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.Data.BeaconBlockRoot))
|
||||
if !(hasState && hasBlock) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.attPool.DeleteForkchoiceAttestation(a); err != nil {
|
||||
if err := s.cfg.AttPool.DeleteForkchoiceAttestation(a); err != nil {
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
|
||||
@@ -5,13 +5,15 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
@@ -27,7 +29,7 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) {
|
||||
chainService := setupBeaconChain(t, beaconDB)
|
||||
chainService.genesisTime = time.Now()
|
||||
|
||||
e := helpers.MaxSlotBuffer/params.BeaconConfig().SlotsPerEpoch + 1
|
||||
e := types.Epoch(helpers.MaxSlotBuffer/uint64(params.BeaconConfig().SlotsPerEpoch) + 1)
|
||||
_, err := chainService.AttestationPreState(context.Background(), ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Epoch: e}}})
|
||||
require.ErrorContains(t, "exceeds max allowed value relative to the local clock", err)
|
||||
}
|
||||
@@ -37,18 +39,18 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -65,18 +67,18 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
cfg := &Config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
b32 := testutil.NewBeaconBlock()
|
||||
b32.Block.Slot = 32
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b32))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b32)))
|
||||
r32, err := b32.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
b33 := testutil.NewBeaconBlock()
|
||||
b33.Block.Slot = 33
|
||||
b33.Block.ParentRoot = r32[:]
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b33))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b33)))
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -99,7 +101,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
StateGen: stategen.New(beaconDB),
|
||||
AttPool: attestations.NewPool(),
|
||||
}
|
||||
service, err := New(ctx, cfg)
|
||||
service, err := NewService(ctx, cfg)
|
||||
service.genesisTime = timeutils.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
require.NoError(t, err)
|
||||
genesisState, pks := testutil.DeterministicGenesisState(t, 64)
|
||||
@@ -111,10 +113,10 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
copied := genesisState.Copy()
|
||||
copied, err = state.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.forkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.attPool.SaveForkchoiceAttestations(atts))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, tRoot, tRoot, tRoot, 1, 1))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
service.processAttestations(ctx)
|
||||
require.Equal(t, 0, len(service.attPool.ForkchoiceAttestations()))
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
require.LogsDoNotContain(t, hook, "Could not process attestation for fork choice")
|
||||
}
|
||||
|
||||
@@ -4,24 +4,24 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
|
||||
var epochsSinceFinalitySaveHotStateDB = 100
|
||||
var epochsSinceFinalitySaveHotStateDB = types.Epoch(100)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service receive and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error
|
||||
ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedBeaconBlock, blkRoots [][32]byte) error
|
||||
ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, blockRoot [32]byte) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBeaconBlock, blkRoots [][32]byte) error
|
||||
HasInitSyncBlock(root [32]byte) bool
|
||||
}
|
||||
|
||||
@@ -30,10 +30,11 @@ type BlockReceiver interface {
|
||||
// 1. Validate block, apply state transition and update check points
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
blockCopy := stateTrie.CopySignedBeaconBlock(block)
|
||||
receivedTime := timeutils.Now()
|
||||
blockCopy := block.Copy()
|
||||
|
||||
// Apply state transition on the new block.
|
||||
if err := s.onBlock(ctx, blockCopy, blockRoot); err != nil {
|
||||
@@ -43,23 +44,24 @@ func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlo
|
||||
}
|
||||
|
||||
// Update and save head block after fork choice.
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
if !featureconfig.Get().UpdateHeadTimely {
|
||||
if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block().Slot(),
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: blockCopy,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block.Slot,
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: blockCopy,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Handle post block operations such as attestations and exits.
|
||||
if err := s.handlePostBlockOperations(blockCopy.Block); err != nil {
|
||||
if err := s.handlePostBlockOperations(blockCopy.Block()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -69,51 +71,14 @@ func (s *Service) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlo
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
|
||||
// Log block sync status.
|
||||
logBlockSyncStatus(blockCopy.Block, blockRoot, s.finalizedCheckpt)
|
||||
|
||||
// Log state transition data.
|
||||
logStateTransitionData(blockCopy.Block)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlockInitialSync processes the input block for the purpose of initial syncing.
|
||||
// This method should only be used on blocks during initial syncing phase.
|
||||
func (s *Service) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockNoVerify")
|
||||
defer span.End()
|
||||
blockCopy := stateTrie.CopySignedBeaconBlock(block)
|
||||
|
||||
// Apply state transition on the new block.
|
||||
if err := s.onBlockInitialSyncStateTransition(ctx, blockCopy, blockRoot); err != nil {
|
||||
err := errors.Wrap(err, "could not process block")
|
||||
traceutil.AnnotateError(span, err)
|
||||
if err := logBlockSyncStatus(blockCopy.Block(), blockRoot, s.finalizedCheckpt, receivedTime, uint64(s.genesisTime.Unix())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block.Slot,
|
||||
BlockRoot: blockRoot,
|
||||
SignedBlock: blockCopy,
|
||||
Verified: true,
|
||||
},
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
|
||||
// Log state transition data.
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blockCopy.Block.Slot,
|
||||
"attestations": len(blockCopy.Block.Body.Attestations),
|
||||
"deposits": len(blockCopy.Block.Body.Deposits),
|
||||
}).Debug("Finished applying state transition")
|
||||
logStateTransitionData(blockCopy.Block())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -121,7 +86,7 @@ func (s *Service) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.Sign
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedBeaconBlock, blkRoots [][32]byte) error {
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBeaconBlock, blkRoots [][32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -134,16 +99,16 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedB
|
||||
}
|
||||
|
||||
for i, b := range blocks {
|
||||
blockCopy := stateTrie.CopySignedBeaconBlock(b)
|
||||
blockCopy := b.Copy()
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: blockCopy.Block.Slot,
|
||||
Slot: blockCopy.Block().Slot(),
|
||||
BlockRoot: blkRoots[i],
|
||||
SignedBlock: blockCopy,
|
||||
Verified: true,
|
||||
@@ -151,7 +116,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []*ethpb.SignedB
|
||||
})
|
||||
|
||||
// Reports on blockCopy and fork choice metrics.
|
||||
reportSlotMetrics(blockCopy.Block.Slot, s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
reportSlotMetrics(blockCopy.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), s.finalizedCheckpt)
|
||||
}
|
||||
|
||||
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
|
||||
@@ -169,25 +134,25 @@ func (s *Service) HasInitSyncBlock(root [32]byte) bool {
|
||||
return s.hasInitSyncBlock(root)
|
||||
}
|
||||
|
||||
func (s *Service) handlePostBlockOperations(b *ethpb.BeaconBlock) error {
|
||||
func (s *Service) handlePostBlockOperations(b block.BeaconBlock) error {
|
||||
// Delete the processed block attestations from attestation pool.
|
||||
if err := s.deletePoolAtts(b.Body.Attestations); err != nil {
|
||||
if err := s.deletePoolAtts(b.Body().Attestations()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add block attestations to the fork choice pool to compute head.
|
||||
if err := s.attPool.SaveBlockAttestations(b.Body.Attestations); err != nil {
|
||||
if err := s.cfg.AttPool.SaveBlockAttestations(b.Body().Attestations()); err != nil {
|
||||
log.Errorf("Could not save block attestations for fork choice: %v", err)
|
||||
return nil
|
||||
}
|
||||
// Mark block exits as seen so we don't include same ones in future blocks.
|
||||
for _, e := range b.Body.VoluntaryExits {
|
||||
s.exitPool.MarkIncluded(e)
|
||||
for _, e := range b.Body().VoluntaryExits() {
|
||||
s.cfg.ExitPool.MarkIncluded(e)
|
||||
}
|
||||
|
||||
// Mark attester slashings as seen so we don't include same ones in future blocks.
|
||||
for _, as := range b.Body.AttesterSlashings {
|
||||
s.slashingPool.MarkIncludedAttesterSlashing(as)
|
||||
for _, as := range b.Body().AttesterSlashings() {
|
||||
s.cfg.SlashingPool.MarkIncludedAttesterSlashing(as)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -197,15 +162,15 @@ func (s *Service) handlePostBlockOperations(b *ethpb.BeaconBlock) error {
|
||||
func (s *Service) checkSaveHotStateDB(ctx context.Context) error {
|
||||
currentEpoch := helpers.SlotToEpoch(s.CurrentSlot())
|
||||
// Prevent `sinceFinality` going underflow.
|
||||
var sinceFinality uint64
|
||||
var sinceFinality types.Epoch
|
||||
if currentEpoch > s.finalizedCheckpt.Epoch {
|
||||
sinceFinality = currentEpoch - s.finalizedCheckpt.Epoch
|
||||
}
|
||||
|
||||
if sinceFinality >= uint64(epochsSinceFinalitySaveHotStateDB) {
|
||||
s.stateGen.EnableSaveHotStateToDB(ctx)
|
||||
if sinceFinality >= epochsSinceFinalitySaveHotStateDB {
|
||||
s.cfg.StateGen.EnableSaveHotStateToDB(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.stateGen.DisableSaveHotStateToDB(ctx)
|
||||
return s.cfg.StateGen.DisableSaveHotStateToDB(ctx)
|
||||
}
|
||||
|
||||
@@ -6,14 +6,19 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
blockchainTesting "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -25,7 +30,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot uint64) *ethpb.SignedBeaconBlock {
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
|
||||
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
|
||||
assert.NoError(t, err)
|
||||
return blk
|
||||
@@ -52,7 +57,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
if hs := s.head.state.Slot(); hs != 2 {
|
||||
t.Errorf("Unexpected state slot. Got %d but wanted %d", hs, 2)
|
||||
}
|
||||
if bs := s.head.block.Block.Slot; bs != 2 {
|
||||
if bs := s.head.block.Block().Slot(); bs != 2 {
|
||||
t.Errorf("Unexpected head block slot. Got %d but wanted %d", bs, 2)
|
||||
}
|
||||
},
|
||||
@@ -72,7 +77,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
if baCount := len(s.attPool.BlockAttestations()); baCount != 2 {
|
||||
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 2 {
|
||||
t.Errorf("Did not get the correct number of block attestations saved to the pool. "+
|
||||
"Got %d but wanted %d", baCount, 2)
|
||||
}
|
||||
@@ -92,7 +97,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
pending := s.exitPool.PendingExits(genesis, 1, true /* no limit */)
|
||||
pending := s.cfg.ExitPool.PendingExits(genesis, 1, true /* no limit */)
|
||||
if len(pending) != 0 {
|
||||
t.Errorf(
|
||||
"Did not mark the correct number of exits. Got %d pending but wanted %d",
|
||||
@@ -108,7 +113,7 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
},
|
||||
@@ -133,17 +138,17 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := New(ctx, cfg)
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.beaconDB.GenesisBlock(ctx)
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.ReceiveBlock(ctx, tt.args.block, root)
|
||||
err = s.ReceiveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(tt.args.block), root)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
@@ -174,12 +179,12 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := New(ctx, cfg)
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.saveGenesisData(ctx, genesis))
|
||||
gBlk, err := s.beaconDB.GenesisBlock(ctx)
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
@@ -187,103 +192,22 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
require.NoError(t, s.ReceiveBlock(ctx, b, root))
|
||||
require.NoError(t, s.ReceiveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b), root))
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
// Verify fork choice has processed the block. (Genesis block and the new block)
|
||||
assert.Equal(t, 2, len(s.forkChoiceStore.Nodes()))
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockInitialSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot uint64) *ethpb.SignedBeaconBlock {
|
||||
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
|
||||
assert.NoError(t, err)
|
||||
return blk
|
||||
}
|
||||
|
||||
type args struct {
|
||||
block *ethpb.SignedBeaconBlock
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantedErr string
|
||||
check func(*testing.T, *Service)
|
||||
}{
|
||||
{
|
||||
name: "applies block with state transition",
|
||||
args: args{
|
||||
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 2 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, uint64(2), s.head.state.Slot(), "Incorrect head state slot")
|
||||
assert.Equal(t, uint64(2), s.head.block.Block.Slot, "Incorrect head block slot")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "notifies block processed on state feed",
|
||||
args: args{
|
||||
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot := bytesutil.ToBytes32(nil)
|
||||
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := New(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
gBlk, err := s.beaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.ReceiveBlockInitialSync(ctx, tt.args.block, root)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
tt.check(t, s)
|
||||
}
|
||||
})
|
||||
}
|
||||
assert.Equal(t, 2, len(s.cfg.ForkChoiceStore.Nodes()))
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot uint64) *ethpb.SignedBeaconBlock {
|
||||
genFullBlock := func(t *testing.T, conf *testutil.BlockGenConfig, slot types.Slot) *ethpb.SignedBeaconBlock {
|
||||
blk, err := testutil.GenerateFullBlock(genesis, keys, conf, slot)
|
||||
assert.NoError(t, err)
|
||||
return blk
|
||||
@@ -304,8 +228,8 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 2 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
assert.Equal(t, uint64(2), s.head.state.Slot(), "Incorrect head state slot")
|
||||
assert.Equal(t, uint64(2), s.head.block.Block.Slot, "Incorrect head block slot")
|
||||
assert.Equal(t, types.Slot(2), s.head.state.Slot(), "Incorrect head state slot")
|
||||
assert.Equal(t, types.Slot(2), s.head.block.Block().Slot(), "Incorrect head block slot")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -314,7 +238,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
block: genFullBlock(t, testutil.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
if recvd := len(s.stateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
},
|
||||
@@ -336,19 +260,19 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := New(ctx, cfg)
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
gBlk, err := s.beaconDB.GenesisBlock(ctx)
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
gRoot, err := gBlk.Block.HashTreeRoot()
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
root, err := tt.args.block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
blks := []*ethpb.SignedBeaconBlock{tt.args.block}
|
||||
blks := []block.SignedBeaconBlock{wrapper.WrappedPhase0SignedBeaconBlock(tt.args.block)}
|
||||
roots := [][32]byte{root}
|
||||
err = s.ReceiveBlockBatch(ctx, blks, roots)
|
||||
if tt.wantedErr != "" {
|
||||
@@ -361,14 +285,70 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_ReceiveBlockBatch_UpdateFinalizedCheckpoint(t *testing.T) {
|
||||
// Must enable head timely feature flag to test this.
|
||||
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{
|
||||
UpdateHeadTimely: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
ctx := context.Background()
|
||||
genesis, keys := testutil.DeterministicGenesisState(t, 64)
|
||||
|
||||
// Generate 5 epochs worth of blocks.
|
||||
var blks []block.SignedBeaconBlock
|
||||
var roots [][32]byte
|
||||
copied := genesis.Copy()
|
||||
for i := types.Slot(1); i < params.BeaconConfig().SlotsPerEpoch*5; i++ {
|
||||
b, err := testutil.GenerateFullBlock(copied, keys, testutil.DefaultBlockGenConfig(), i)
|
||||
assert.NoError(t, err)
|
||||
copied, err = state.ExecuteStateTransition(context.Background(), copied, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
assert.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, wrapper.WrappedPhase0SignedBeaconBlock(b))
|
||||
roots = append(roots, r)
|
||||
}
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
genesisBlockRoot, err := genesis.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
cfg := &Config{
|
||||
BeaconDB: beaconDB,
|
||||
ForkChoiceStore: protoarray.New(
|
||||
0, // justifiedEpoch
|
||||
0, // finalizedEpoch
|
||||
genesisBlockRoot,
|
||||
),
|
||||
StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: false},
|
||||
StateGen: stategen.New(beaconDB),
|
||||
}
|
||||
s, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err)
|
||||
err = s.saveGenesisData(ctx, genesis)
|
||||
require.NoError(t, err)
|
||||
gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
gRoot, err := gBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Root: gRoot[:]}
|
||||
|
||||
// Process 5 epochs worth of blocks.
|
||||
require.NoError(t, s.ReceiveBlockBatch(ctx, blks, roots))
|
||||
|
||||
// Finalized epoch must be updated.
|
||||
require.Equal(t, types.Epoch(2), s.finalizedCheckpt.Epoch)
|
||||
}
|
||||
|
||||
func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
s, err := New(context.Background(), &Config{StateNotifier: &blockchainTesting.MockStateNotifier{}})
|
||||
s, err := NewService(context.Background(), &Config{StateNotifier: &blockchainTesting.MockStateNotifier{}})
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{'a'}
|
||||
if s.HasInitSyncBlock(r) {
|
||||
t.Error("Should not have block")
|
||||
}
|
||||
s.saveInitSyncBlock(r, testutil.NewBeaconBlock())
|
||||
s.saveInitSyncBlock(r, wrapper.WrappedPhase0SignedBeaconBlock(testutil.NewBeaconBlock()))
|
||||
if !s.HasInitSyncBlock(r) {
|
||||
t.Error("Should have block")
|
||||
}
|
||||
@@ -377,9 +357,9 @@ func TestService_HasInitSyncBlock(t *testing.T) {
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := New(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch * uint64(epochsSinceFinalitySaveHotStateDB)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
|
||||
@@ -390,7 +370,7 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := New(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{}
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
@@ -403,7 +383,7 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
func TestCheckSaveHotStateDB_Overflow(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
hook := logTest.NewGlobal()
|
||||
s, err := New(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
s, err := NewService(context.Background(), &Config{StateGen: stategen.New(beaconDB)})
|
||||
require.NoError(t, err)
|
||||
s.finalizedCheckpt = ðpb.Checkpoint{Epoch: 10000000}
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Package blockchain defines the life-cycle of the blockchain at the core of
|
||||
// eth2, including processing of new blocks and attestations using casper
|
||||
// proof of stake.
|
||||
// Ethereum, including processing of new blocks and attestations using proof of stake.
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
@@ -11,16 +10,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
core "github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
f "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
@@ -28,10 +25,13 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/copyutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/slotutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -45,85 +45,58 @@ const headSyncMinEpochsAfterCheckpoint = 128
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
beaconDB db.HeadAccessDatabase
|
||||
depositCache *depositcache.DepositCache
|
||||
chainStartFetcher powchain.ChainStartFetcher
|
||||
attPool attestations.Pool
|
||||
slashingPool *slashings.Pool
|
||||
exitPool *voluntaryexits.Pool
|
||||
genesisTime time.Time
|
||||
p2p p2p.Broadcaster
|
||||
maxRoutines int
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
stateNotifier statefeed.Notifier
|
||||
genesisRoot [32]byte
|
||||
forkChoiceStore f.ForkChoicer
|
||||
justifiedCheckpt *ethpb.Checkpoint
|
||||
prevJustifiedCheckpt *ethpb.Checkpoint
|
||||
bestJustifiedCheckpt *ethpb.Checkpoint
|
||||
finalizedCheckpt *ethpb.Checkpoint
|
||||
prevFinalizedCheckpt *ethpb.Checkpoint
|
||||
nextEpochBoundarySlot uint64
|
||||
nextEpochBoundarySlot types.Slot
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
stateGen *stategen.State
|
||||
opsService *attestations.Service
|
||||
initSyncBlocks map[[32]byte]*ethpb.SignedBeaconBlock
|
||||
initSyncBlocks map[[32]byte]block.SignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
justifiedBalances []uint64
|
||||
justifiedBalancesLock sync.RWMutex
|
||||
wsEpoch uint64
|
||||
wsRoot []byte
|
||||
wsVerified bool
|
||||
}
|
||||
|
||||
// Config options for the service.
|
||||
type Config struct {
|
||||
BeaconBlockBuf int
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache *depositcache.DepositCache
|
||||
AttPool attestations.Pool
|
||||
ExitPool *voluntaryexits.Pool
|
||||
SlashingPool *slashings.Pool
|
||||
P2p p2p.Broadcaster
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
OpsService *attestations.Service
|
||||
StateGen *stategen.State
|
||||
WspBlockRoot []byte
|
||||
WspEpoch uint64
|
||||
BeaconBlockBuf int
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache *depositcache.DepositCache
|
||||
AttPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
AttService *attestations.Service
|
||||
StateGen *stategen.State
|
||||
WeakSubjectivityCheckpt *ethpb.Checkpoint
|
||||
}
|
||||
|
||||
// New instantiates a new block service instance that will
|
||||
// NewService instantiates a new block service instance that will
|
||||
// be registered into a running beacon node.
|
||||
func New(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &Service{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: cfg.BeaconDB,
|
||||
depositCache: cfg.DepositCache,
|
||||
chainStartFetcher: cfg.ChainStartFetcher,
|
||||
attPool: cfg.AttPool,
|
||||
exitPool: cfg.ExitPool,
|
||||
slashingPool: cfg.SlashingPool,
|
||||
p2p: cfg.P2p,
|
||||
maxRoutines: cfg.MaxRoutines,
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
forkChoiceStore: cfg.ForkChoiceStore,
|
||||
boundaryRoots: [][32]byte{},
|
||||
checkpointStateCache: cache.NewCheckpointStateCache(),
|
||||
opsService: cfg.OpsService,
|
||||
stateGen: cfg.StateGen,
|
||||
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
|
||||
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
|
||||
justifiedBalances: make([]uint64, 0),
|
||||
wsEpoch: cfg.WspEpoch,
|
||||
wsRoot: cfg.WspBlockRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -132,7 +105,7 @@ func (s *Service) Start() {
|
||||
// For running initial sync with state cache, in an event of restart, we use
|
||||
// last finalized check point as start point to sync instead of head
|
||||
// state. This is because we no longer save state every slot during sync.
|
||||
cp, err := s.beaconDB.FinalizedCheckpoint(s.ctx)
|
||||
cp, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
@@ -142,18 +115,18 @@ func (s *Service) Start() {
|
||||
// the finalized root is defined as zero hashes instead of genesis root hash.
|
||||
// We want to use genesis root to retrieve for state.
|
||||
if r == params.BeaconConfig().ZeroHash {
|
||||
genesisBlock, err := s.beaconDB.GenesisBlock(s.ctx)
|
||||
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch finalized cp: %v", err)
|
||||
}
|
||||
if genesisBlock != nil {
|
||||
r, err = genesisBlock.Block.HashTreeRoot()
|
||||
if genesisBlock != nil && !genesisBlock.IsNil() {
|
||||
r, err = genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not tree hash genesis block: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
beaconState, err := s.stateGen.StateByRoot(s.ctx, r)
|
||||
beaconState, err := s.cfg.StateGen.StateByRoot(s.ctx, r)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not fetch beacon state by root: %v", err)
|
||||
}
|
||||
@@ -162,16 +135,16 @@ func (s *Service) Start() {
|
||||
attestationProcessorSubscribed := make(chan struct{}, 1)
|
||||
|
||||
// If the chain has already been initialized, simply start the block processing routine.
|
||||
if beaconState != nil {
|
||||
if beaconState != nil && !beaconState.IsNil() {
|
||||
log.Info("Blockchain data already exists in DB, initializing...")
|
||||
s.genesisTime = time.Unix(int64(beaconState.GenesisTime()), 0)
|
||||
s.opsService.SetGenesisTime(beaconState.GenesisTime())
|
||||
s.cfg.AttService.SetGenesisTime(beaconState.GenesisTime())
|
||||
if err := s.initializeChainInfo(s.ctx); err != nil {
|
||||
log.Fatalf("Could not set up chain info: %v", err)
|
||||
}
|
||||
|
||||
// We start a counter to genesis, if needed.
|
||||
gState, err := s.beaconDB.GenesisState(s.ctx)
|
||||
gState, err := s.cfg.BeaconDB.GenesisState(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not retrieve genesis state: %v", err)
|
||||
}
|
||||
@@ -181,35 +154,35 @@ func (s *Service) Start() {
|
||||
}
|
||||
go slotutil.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
|
||||
justifiedCheckpoint, err := s.beaconDB.JustifiedCheckpoint(s.ctx)
|
||||
justifiedCheckpoint, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get justified checkpoint: %v", err)
|
||||
}
|
||||
finalizedCheckpoint, err := s.beaconDB.FinalizedCheckpoint(s.ctx)
|
||||
finalizedCheckpoint, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get finalized checkpoint: %v", err)
|
||||
}
|
||||
|
||||
// Resume fork choice.
|
||||
s.justifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.justifiedCheckpt = copyutil.CopyCheckpoint(justifiedCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(s.ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))); err != nil {
|
||||
log.Fatalf("Could not cache justified state balances: %v", err)
|
||||
}
|
||||
s.prevJustifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.bestJustifiedCheckpt = stateTrie.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.finalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.prevJustifiedCheckpt = copyutil.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.bestJustifiedCheckpt = copyutil.CopyCheckpoint(justifiedCheckpoint)
|
||||
s.finalizedCheckpt = copyutil.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.prevFinalizedCheckpt = copyutil.CopyCheckpoint(finalizedCheckpoint)
|
||||
s.resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint)
|
||||
|
||||
ss, err := helpers.StartSlot(s.finalizedCheckpt.Epoch)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not get start slot of finalized epoch: %v", err)
|
||||
}
|
||||
h := s.headBlock().Block
|
||||
if h.Slot > ss {
|
||||
h := s.headBlock().Block()
|
||||
if h.Slot() > ss {
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": ss,
|
||||
"endSlot": h.Slot,
|
||||
"endSlot": h.Slot(),
|
||||
}).Info("Loading blocks to fork choice store, this may take a while.")
|
||||
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, s.finalizedCheckpt, s.justifiedCheckpt); err != nil {
|
||||
log.Fatalf("Could not fill in fork choice store missing blocks: %v", err)
|
||||
@@ -221,7 +194,7 @@ func (s *Service) Start() {
|
||||
log.Fatalf("Could not verify weak subjectivity checkpoint: %v", err)
|
||||
}
|
||||
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: s.genesisTime,
|
||||
@@ -230,13 +203,13 @@ func (s *Service) Start() {
|
||||
})
|
||||
} else {
|
||||
log.Info("Waiting to reach the validator deposit threshold to start the beacon chain...")
|
||||
if s.chainStartFetcher == nil {
|
||||
if s.cfg.ChainStartFetcher == nil {
|
||||
log.Fatal("Not configured web3Service for POW chain")
|
||||
return // return need for TestStartUninitializedChainWithoutConfigPOWChain.
|
||||
}
|
||||
go func() {
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := s.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
stateSub := s.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
<-attestationProcessorSubscribed
|
||||
for {
|
||||
@@ -269,8 +242,8 @@ func (s *Service) Start() {
|
||||
// processChainStartTime initializes a series of deposits from the ChainStart deposits in the eth1
|
||||
// deposit contract, initializes the beacon chain's state, and kicks off the beacon chain.
|
||||
func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Time) {
|
||||
preGenesisState := s.chainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.chainStartFetcher.ChainStartEth1Data())
|
||||
preGenesisState := s.cfg.ChainStartFetcher.PreGenesisState()
|
||||
initializedState, err := s.initializeBeaconChain(ctx, genesisTime, preGenesisState, s.cfg.ChainStartFetcher.ChainStartEth1Data())
|
||||
if err != nil {
|
||||
log.Fatalf("Could not initialize beacon chain: %v", err)
|
||||
}
|
||||
@@ -283,7 +256,7 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
|
||||
|
||||
// We send out a state initialized event to the rest of the services
|
||||
// running in the beacon node.
|
||||
s.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: genesisTime,
|
||||
@@ -298,14 +271,14 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
|
||||
func (s *Service) initializeBeaconChain(
|
||||
ctx context.Context,
|
||||
genesisTime time.Time,
|
||||
preGenesisState *stateTrie.BeaconState,
|
||||
eth1data *ethpb.Eth1Data) (*stateTrie.BeaconState, error) {
|
||||
preGenesisState state.BeaconState,
|
||||
eth1data *ethpb.Eth1Data) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "beacon-chain.Service.initializeBeaconChain")
|
||||
defer span.End()
|
||||
s.genesisTime = genesisTime
|
||||
unixTime := uint64(genesisTime.Unix())
|
||||
|
||||
genesisState, err := state.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
genesisState, err := core.OptimizedGenesisBeaconState(unixTime, preGenesisState, eth1data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not initialize genesis state")
|
||||
}
|
||||
@@ -317,17 +290,17 @@ func (s *Service) initializeBeaconChain(
|
||||
log.Info("Initialized beacon chain genesis state")
|
||||
|
||||
// Clear out all pre-genesis data now that the state is initialized.
|
||||
s.chainStartFetcher.ClearPreGenesisData()
|
||||
s.cfg.ChainStartFetcher.ClearPreGenesisData()
|
||||
|
||||
// Update committee shuffled indices for genesis epoch.
|
||||
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
if err := helpers.UpdateProposerIndicesInCache(genesisState); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.opsService.SetGenesisTime(genesisState.GenesisTime())
|
||||
s.cfg.AttService.SetGenesisTime(genesisState.GenesisTime())
|
||||
|
||||
return genesisState, nil
|
||||
}
|
||||
@@ -336,14 +309,14 @@ func (s *Service) initializeBeaconChain(
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
|
||||
if s.stateGen != nil && s.head != nil && s.head.state != nil {
|
||||
if err := s.stateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
|
||||
if s.cfg.StateGen != nil && s.head != nil && s.head.state != nil {
|
||||
if err := s.cfg.StateGen.ForceCheckpoint(s.ctx, s.head.state.FinalizedCheckpoint().Root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Save initial sync cached blocks to the DB before stop.
|
||||
return s.beaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
|
||||
return s.cfg.BeaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
|
||||
}
|
||||
|
||||
// Status always returns nil unless there is an error condition that causes
|
||||
@@ -352,61 +325,43 @@ func (s *Service) Status() error {
|
||||
if s.genesisRoot == params.BeaconConfig().ZeroHash {
|
||||
return errors.New("genesis state has not been created")
|
||||
}
|
||||
if runtime.NumGoroutine() > s.maxRoutines {
|
||||
if runtime.NumGoroutine() > s.cfg.MaxRoutines {
|
||||
return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called when beacon chain is first initialized to save genesis data (state, block, and more) in db.
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.BeaconState) error {
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
func (s *Service) saveGenesisData(ctx context.Context, genesisState state.BeaconState) error {
|
||||
if err := s.cfg.BeaconDB.SaveGenesisData(ctx, genesisState); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis data")
|
||||
}
|
||||
genesisBlk := blocks.NewGenesisBlock(stateRoot[:])
|
||||
genesisBlkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
genesisBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil || genesisBlk == nil || genesisBlk.IsNil() {
|
||||
return fmt.Errorf("could not load genesis block: %v", err)
|
||||
}
|
||||
genesisBlkRoot, err := genesisBlk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
|
||||
if err := s.beaconDB.SaveBlock(ctx, genesisBlk); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis block")
|
||||
}
|
||||
if err := s.beaconDB.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state")
|
||||
}
|
||||
if err := s.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{
|
||||
Slot: 0,
|
||||
Root: genesisBlkRoot[:],
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.stateGen.SaveFinalizedState(0, genesisBlkRoot, genesisState)
|
||||
|
||||
if err := s.beaconDB.SaveHeadBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head block root")
|
||||
}
|
||||
if err := s.beaconDB.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis block root")
|
||||
}
|
||||
s.cfg.StateGen.SaveFinalizedState(0 /*slot*/, genesisBlkRoot, genesisState)
|
||||
|
||||
// Finalized checkpoint at genesis is a zero hash.
|
||||
genesisCheckpoint := genesisState.FinalizedCheckpoint()
|
||||
|
||||
s.justifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.justifiedCheckpt = copyutil.CopyCheckpoint(genesisCheckpoint)
|
||||
if err := s.cacheJustifiedStateBalances(ctx, genesisBlkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
s.prevJustifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.bestJustifiedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.finalizedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.prevFinalizedCheckpt = stateTrie.CopyCheckpoint(genesisCheckpoint)
|
||||
s.prevJustifiedCheckpt = copyutil.CopyCheckpoint(genesisCheckpoint)
|
||||
s.bestJustifiedCheckpt = copyutil.CopyCheckpoint(genesisCheckpoint)
|
||||
s.finalizedCheckpt = copyutil.CopyCheckpoint(genesisCheckpoint)
|
||||
s.prevFinalizedCheckpt = copyutil.CopyCheckpoint(genesisCheckpoint)
|
||||
|
||||
if err := s.forkChoiceStore.ProcessBlock(ctx,
|
||||
genesisBlk.Block.Slot,
|
||||
if err := s.cfg.ForkChoiceStore.ProcessBlock(ctx,
|
||||
genesisBlk.Block().Slot(),
|
||||
genesisBlkRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
[32]byte{},
|
||||
@@ -416,26 +371,25 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState *stateTrie.B
|
||||
}
|
||||
|
||||
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This gets called to initialize chain info variables using the finalized checkpoint stored in DB
|
||||
func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
genesisBlock, err := s.beaconDB.GenesisBlock(ctx)
|
||||
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get genesis block from db")
|
||||
}
|
||||
if genesisBlock == nil {
|
||||
if genesisBlock == nil || genesisBlock.IsNil() {
|
||||
return errors.New("no genesis block in db")
|
||||
}
|
||||
genesisBlkRoot, err := genesisBlock.Block.HashTreeRoot()
|
||||
genesisBlkRoot, err := genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get signing root of genesis block")
|
||||
}
|
||||
s.genesisRoot = genesisBlkRoot
|
||||
|
||||
finalized, err := s.beaconDB.FinalizedCheckpoint(ctx)
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint from db")
|
||||
}
|
||||
@@ -445,37 +399,37 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
return errors.New("no finalized epoch in the database")
|
||||
}
|
||||
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
var finalizedState *stateTrie.BeaconState
|
||||
var finalizedState state.BeaconState
|
||||
|
||||
finalizedState, err = s.stateGen.Resume(ctx)
|
||||
finalizedState, err = s.cfg.StateGen.Resume(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
|
||||
if flags.Get().HeadSync {
|
||||
headBlock, err := s.beaconDB.HeadBlock(ctx)
|
||||
headBlock, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head block")
|
||||
}
|
||||
headEpoch := helpers.SlotToEpoch(headBlock.Block.Slot)
|
||||
var epochsSinceFinality uint64
|
||||
headEpoch := helpers.SlotToEpoch(headBlock.Block().Slot())
|
||||
var epochsSinceFinality types.Epoch
|
||||
if headEpoch > finalized.Epoch {
|
||||
epochsSinceFinality = headEpoch - finalized.Epoch
|
||||
}
|
||||
// Head sync when node is far enough beyond known finalized epoch,
|
||||
// this becomes really useful during long period of non-finality.
|
||||
if epochsSinceFinality >= headSyncMinEpochsAfterCheckpoint {
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
headRoot, err := headBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not hash head block")
|
||||
}
|
||||
finalizedState, err := s.stateGen.Resume(ctx)
|
||||
finalizedState, err := s.cfg.StateGen.Resume(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
log.Infof("Regenerating state from the last checkpoint at slot %d to current head slot of %d."+
|
||||
"This process may take a while, please wait.", finalizedState.Slot(), headBlock.Block.Slot)
|
||||
headState, err := s.stateGen.StateByRoot(ctx, headRoot)
|
||||
"This process may take a while, please wait.", finalizedState.Slot(), headBlock.Block().Slot())
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state")
|
||||
}
|
||||
@@ -488,12 +442,12 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
finalizedBlock, err := s.beaconDB.Block(ctx, finalizedRoot)
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block from db")
|
||||
}
|
||||
|
||||
if finalizedState == nil || finalizedBlock == nil {
|
||||
if finalizedState == nil || finalizedState.IsNil() || finalizedBlock == nil || finalizedBlock.IsNil() {
|
||||
return errors.New("finalized state and block can't be nil")
|
||||
}
|
||||
s.setHead(finalizedRoot, finalizedBlock, finalizedState)
|
||||
@@ -505,7 +459,7 @@ func (s *Service) initializeChainInfo(ctx context.Context) error {
|
||||
// information to fork choice service to initializes fork choice store.
|
||||
func (s *Service) resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||
store := protoarray.New(justifiedCheckpoint.Epoch, finalizedCheckpoint.Epoch, bytesutil.ToBytes32(finalizedCheckpoint.Root))
|
||||
s.forkChoiceStore = store
|
||||
s.cfg.ForkChoiceStore = store
|
||||
}
|
||||
|
||||
// This returns true if block has been processed before. Two ways to verify the block has been processed:
|
||||
@@ -513,9 +467,9 @@ func (s *Service) resumeForkChoice(justifiedCheckpoint, finalizedCheckpoint *eth
|
||||
// 2.) Check DB.
|
||||
// Checking 1.) is ten times faster than checking 2.)
|
||||
func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
|
||||
if s.forkChoiceStore.HasNode(root) {
|
||||
if s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return true
|
||||
}
|
||||
|
||||
return s.beaconDB.HasBlock(ctx, root)
|
||||
return s.cfg.BeaconDB.HasBlock(ctx, root)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ func init() {
|
||||
func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
cfg: &Config{BeaconDB: beaconDB},
|
||||
}
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
@@ -18,15 +16,16 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
beaconstate "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
protodb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -34,6 +33,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type mockBeaconNode struct {
|
||||
@@ -62,6 +62,11 @@ func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ *
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
@@ -70,30 +75,32 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
var web3Service *powchain.Service
|
||||
var err error
|
||||
bState, _ := testutil.DeterministicGenesisState(t, 10)
|
||||
err = beaconDB.SavePowchainData(ctx, &protodb.ETH1ChainData{
|
||||
BeaconState: bState.InnerStateUnsafe(),
|
||||
Trie: &protodb.SparseMerkleTrie{},
|
||||
CurrentEth1Data: &protodb.LatestETH1Data{
|
||||
pbState, err := v1.ProtobufBeaconState(bState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
err = beaconDB.SavePowchainData(ctx, ðpb.ETH1ChainData{
|
||||
BeaconState: pbState,
|
||||
Trie: ðpb.SparseMerkleTrie{},
|
||||
CurrentEth1Data: ðpb.LatestETH1Data{
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
ChainstartData: &protodb.ChainStartData{
|
||||
ChainstartData: ðpb.ChainStartData{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
DepositCount: 0,
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
DepositContainers: []*protodb.DepositContainer{},
|
||||
DepositContainers: []*ethpb.DepositContainer{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
web3Service, err = powchain.New(ctx, &powchain.Web3ServiceConfig{
|
||||
web3Service, err = powchain.NewService(ctx, &powchain.Web3ServiceConfig{
|
||||
BeaconDB: beaconDB,
|
||||
HTTPEndpoints: []string{endpoint},
|
||||
HttpEndpoints: []string{endpoint},
|
||||
DepositContract: common.Address{},
|
||||
})
|
||||
require.NoError(t, err, "Unable to set up web3 service")
|
||||
|
||||
opsService, err := attestations.New(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
attService, err := attestations.NewService(ctx, &attestations.Config{Pool: attestations.NewPool()})
|
||||
require.NoError(t, err)
|
||||
|
||||
depositCache, err := depositcache.New()
|
||||
@@ -109,13 +116,13 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
AttPool: attestations.NewPool(),
|
||||
StateGen: stategen.New(beaconDB),
|
||||
ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash),
|
||||
OpsService: opsService,
|
||||
AttService: attService,
|
||||
}
|
||||
|
||||
// Safe a state in stategen to purposes of testing a service stop / shutdown.
|
||||
require.NoError(t, cfg.StateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState))
|
||||
|
||||
chainService, err := New(ctx, cfg)
|
||||
chainService, err := NewService(ctx, cfg)
|
||||
require.NoError(t, err, "Unable to setup chain service")
|
||||
chainService.genesisTime = time.Unix(1, 0) // non-zero time
|
||||
|
||||
@@ -132,8 +139,9 @@ func TestChainStartStop_Initialized(t *testing.T) {
|
||||
genesisBlk := testutil.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlk))
|
||||
s := testutil.NewBeaconState()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(1))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -161,8 +169,9 @@ func TestChainStartStop_GenesisZeroHashes(t *testing.T) {
|
||||
genesisBlk := testutil.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlk))
|
||||
s := testutil.NewBeaconState()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}))
|
||||
@@ -229,8 +238,9 @@ func TestChainService_CorrectGenesisRoots(t *testing.T) {
|
||||
genesisBlk := testutil.NewBeaconBlock()
|
||||
blkRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlk))
|
||||
s := testutil.NewBeaconState()
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlk)))
|
||||
s, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(0))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, s, blkRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, blkRoot))
|
||||
@@ -255,29 +265,30 @@ func TestChainService_InitializeChainInfo(t *testing.T) {
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := testutil.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: helpers.SlotToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
|
||||
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
headBlk, err := c.HeadBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headBlock, headBlk, "Head block incorrect")
|
||||
assert.DeepEqual(t, headBlock, headBlk.Proto(), "Head block incorrect")
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, c.HeadSlot(), headBlock.Block.Slot, "Head slot incorrect")
|
||||
r, err := c.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -295,27 +306,28 @@ func TestChainService_InitializeChainInfo_SetHeadAtGenesis(t *testing.T) {
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesis))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis)))
|
||||
|
||||
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
|
||||
headBlock := testutil.NewBeaconBlock()
|
||||
headBlock.Block.Slot = finalizedSlot
|
||||
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(finalizedSlot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
|
||||
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
assert.DeepEqual(t, genesis, c.head.block)
|
||||
assert.DeepEqual(t, genesis, c.head.block.Proto())
|
||||
}
|
||||
|
||||
func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
@@ -336,14 +348,14 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
genesisRoot, err := genesisBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, genesisBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
|
||||
|
||||
finalizedBlock := testutil.NewBeaconBlock()
|
||||
finalizedBlock.Block.Slot = finalizedSlot
|
||||
finalizedBlock.Block.ParentRoot = genesisRoot[:]
|
||||
finalizedRoot, err := finalizedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, finalizedBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(finalizedBlock)))
|
||||
|
||||
// Set head slot close to the finalization point, no head sync is triggered.
|
||||
headBlock := testutil.NewBeaconBlock()
|
||||
@@ -351,9 +363,10 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
headBlock.Block.ParentRoot = finalizedRoot[:]
|
||||
headRoot, err := headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
|
||||
headState := testutil.NewBeaconState()
|
||||
headState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, headState.SetSlot(headBlock.Block.Slot))
|
||||
require.NoError(t, headState.SetGenesisValidatorRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
@@ -365,15 +378,15 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
Root: finalizedRoot[:],
|
||||
}))
|
||||
|
||||
c := &Service{beaconDB: beaconDB, stateGen: stategen.New(beaconDB)}
|
||||
c := &Service{cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)}}
|
||||
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
s, err := c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
// Since head sync is not triggered, chain is initialized to the last finalization checkpoint.
|
||||
assert.DeepEqual(t, finalizedBlock, c.head.block)
|
||||
assert.DeepEqual(t, finalizedBlock, c.head.block.Proto())
|
||||
assert.LogsContain(t, hook, "resetting head from the checkpoint ('--head-sync' flag is ignored)")
|
||||
assert.LogsDoNotContain(t, hook, "Regenerating state from the last checkpoint at slot")
|
||||
|
||||
@@ -383,7 +396,7 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
headBlock.Block.ParentRoot = finalizedRoot[:]
|
||||
headRoot, err = headBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, headBlock))
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(headBlock)))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, headRoot))
|
||||
|
||||
@@ -391,10 +404,10 @@ func TestChainService_InitializeChainInfo_HeadSync(t *testing.T) {
|
||||
require.NoError(t, c.initializeChainInfo(ctx))
|
||||
s, err = c.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.DeepSSZEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Head state incorrect")
|
||||
assert.Equal(t, genesisRoot, c.genesisRoot, "Genesis block root incorrect")
|
||||
// Head slot is far beyond the latest finalized checkpoint, head sync is triggered.
|
||||
assert.DeepEqual(t, headBlock, c.head.block)
|
||||
assert.DeepEqual(t, headBlock, c.head.block.Proto())
|
||||
assert.LogsContain(t, hook, "Regenerating state from the last checkpoint at slot 225")
|
||||
assert.LogsDoNotContain(t, hook, "resetting head from the checkpoint ('--head-sync' flag is ignored)")
|
||||
}
|
||||
@@ -403,18 +416,18 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
stateGen: stategen.New(beaconDB),
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
}
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 1
|
||||
r, err := blk.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
newState := testutil.NewBeaconState()
|
||||
require.NoError(t, s.stateGen.SaveState(ctx, r, newState))
|
||||
require.NoError(t, s.saveHeadNoDB(ctx, blk, r, newState))
|
||||
newState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.StateGen.SaveState(ctx, r, newState))
|
||||
require.NoError(t, s.saveHeadNoDB(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk), r, newState))
|
||||
|
||||
newB, err := s.beaconDB.HeadBlock(ctx)
|
||||
newB, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
||||
require.NoError(t, err)
|
||||
if reflect.DeepEqual(newB, blk) {
|
||||
t.Error("head block should not be equal")
|
||||
@@ -425,15 +438,15 @@ func TestHasBlock_ForkChoiceAndDB(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
finalizedCheckpt: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
beaconDB: beaconDB,
|
||||
}
|
||||
block := testutil.NewBeaconBlock()
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
beaconState := testutil.NewBeaconState()
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, beaconState))
|
||||
beaconState, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
@@ -443,30 +456,29 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
s := &Service{
|
||||
cfg: &Config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
beaconDB: beaconDB,
|
||||
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
|
||||
stateGen: stategen.New(beaconDB),
|
||||
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
|
||||
}
|
||||
b := testutil.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
block := testutil.NewBeaconBlock()
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
s.saveInitSyncBlock(r, b)
|
||||
s.saveInitSyncBlock(r, wrapper.WrappedPhase0SignedBeaconBlock(block))
|
||||
require.NoError(t, s.Stop())
|
||||
require.Equal(t, true, s.beaconDB.HasBlock(ctx, r))
|
||||
require.Equal(t, true, s.cfg.BeaconDB.HasBlock(ctx, r))
|
||||
}
|
||||
|
||||
func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := service.stateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
stateSub := service.cfg.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
service.processChainStartTime(context.Background(), time.Now())
|
||||
|
||||
stateEvent := <-stateChannel
|
||||
require.Equal(t, int(stateEvent.Type), int(statefeed.Initialized))
|
||||
require.Equal(t, int(stateEvent.Type), statefeed.Initialized)
|
||||
_, ok := stateEvent.Data.(*statefeed.InitializedData)
|
||||
require.Equal(t, true, ok)
|
||||
}
|
||||
@@ -475,16 +487,16 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
cfg: &Config{BeaconDB: beaconDB},
|
||||
}
|
||||
block := testutil.NewBeaconBlock()
|
||||
require.NoError(b, s.beaconDB.SaveBlock(ctx, block))
|
||||
require.NoError(b, s.cfg.BeaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block)))
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
require.Equal(b, true, s.beaconDB.HasBlock(ctx, r), "Block is not in DB")
|
||||
require.Equal(b, true, s.cfg.BeaconDB.HasBlock(ctx, r), "Block is not in DB")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -492,20 +504,19 @@ func BenchmarkHasBlockForkChoiceStore(b *testing.B) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
forkChoiceStore: protoarray.New(0, 0, [32]byte{}),
|
||||
cfg: &Config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), BeaconDB: beaconDB},
|
||||
finalizedCheckpt: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
beaconDB: beaconDB,
|
||||
}
|
||||
block := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
bs := &pb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := beaconstate.InitializeFromProto(bs)
|
||||
bs := ðpb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := v1.InitializeFromProto(bs)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, block.Block, r, beaconState))
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wrapper.WrappedPhase0SignedBeaconBlock(block).Block(), r, beaconState))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
require.Equal(b, true, s.forkChoiceStore.HasNode(r), "Block is not in fork choice store")
|
||||
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,11 +19,14 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/event:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
blockfeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/block"
|
||||
@@ -18,8 +18,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -27,18 +30,18 @@ import (
|
||||
|
||||
// ChainService defines the mock interface for testing
|
||||
type ChainService struct {
|
||||
State *stateTrie.BeaconState
|
||||
State state.BeaconState
|
||||
Root []byte
|
||||
Block *ethpb.SignedBeaconBlock
|
||||
Block block.SignedBeaconBlock
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckPoint *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckPoint *ethpb.Checkpoint
|
||||
BlocksReceived []*ethpb.SignedBeaconBlock
|
||||
BlocksReceived []block.SignedBeaconBlock
|
||||
Balance *precompute.Balance
|
||||
Genesis time.Time
|
||||
ValidatorsRoot [32]byte
|
||||
CanonicalRoots map[[32]byte]bool
|
||||
Fork *pb.Fork
|
||||
Fork *ethpb.Fork
|
||||
ETH1Data *ethpb.Eth1Data
|
||||
DB db.Database
|
||||
stateNotifier statefeed.Notifier
|
||||
@@ -47,7 +50,14 @@ type ChainService struct {
|
||||
ValidAttestation bool
|
||||
ForkChoiceStore *protoarray.Store
|
||||
VerifyBlkDescendantErr error
|
||||
Slot *uint64 // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
Slot *types.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
CurrentSyncCommitteeIndices []types.CommitteeIndex
|
||||
NextSyncCommitteeIndices []types.CommitteeIndex
|
||||
SyncCommitteeDomain []byte
|
||||
SyncSelectionProofDomain []byte
|
||||
SyncContributionProofDomain []byte
|
||||
PublicKey [48]byte
|
||||
SyncCommitteePubkeys [][]byte
|
||||
}
|
||||
|
||||
// StateNotifier mocks the same method in the chain service.
|
||||
@@ -146,18 +156,18 @@ func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
|
||||
}
|
||||
|
||||
// ReceiveBlockInitialSync mocks ReceiveBlockInitialSync method in chain service.
|
||||
func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethpb.SignedBeaconBlock, _ [32]byte) error {
|
||||
func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &stateTrie.BeaconState{}
|
||||
s.State = &v1.BeaconState{}
|
||||
}
|
||||
if !bytes.Equal(s.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block.ParentRoot)
|
||||
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
|
||||
}
|
||||
if err := s.State.SetSlot(block.Block.Slot); err != nil {
|
||||
if err := s.State.SetSlot(block.Block().Slot()); err != nil {
|
||||
return err
|
||||
}
|
||||
s.BlocksReceived = append(s.BlocksReceived, block)
|
||||
signingRoot, err := block.Block.HashTreeRoot()
|
||||
signingRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -165,7 +175,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethpb
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
}
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
@@ -173,19 +183,19 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block *ethpb
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.SignedBeaconBlock, _ [][32]byte) error {
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []block.SignedBeaconBlock, _ [][32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &stateTrie.BeaconState{}
|
||||
s.State = &v1.BeaconState{}
|
||||
}
|
||||
for _, block := range blks {
|
||||
if !bytes.Equal(s.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block.ParentRoot)
|
||||
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
|
||||
}
|
||||
if err := s.State.SetSlot(block.Block.Slot); err != nil {
|
||||
if err := s.State.SetSlot(block.Block().Slot()); err != nil {
|
||||
return err
|
||||
}
|
||||
s.BlocksReceived = append(s.BlocksReceived, block)
|
||||
signingRoot, err := block.Block.HashTreeRoot()
|
||||
signingRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -193,7 +203,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.Sign
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
}
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
@@ -202,18 +212,18 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []*ethpb.Sign
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeaconBlock, _ [32]byte) error {
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
|
||||
if s.State == nil {
|
||||
s.State = &stateTrie.BeaconState{}
|
||||
s.State = &v1.BeaconState{}
|
||||
}
|
||||
if !bytes.Equal(s.Root, block.Block.ParentRoot) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block.ParentRoot)
|
||||
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
|
||||
}
|
||||
if err := s.State.SetSlot(block.Block.Slot); err != nil {
|
||||
if err := s.State.SetSlot(block.Block().Slot()); err != nil {
|
||||
return err
|
||||
}
|
||||
s.BlocksReceived = append(s.BlocksReceived, block)
|
||||
signingRoot, err := block.Block.HashTreeRoot()
|
||||
signingRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -221,7 +231,7 @@ func (s *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeac
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block.Slot)
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
}
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
@@ -229,7 +239,7 @@ func (s *ChainService) ReceiveBlock(ctx context.Context, block *ethpb.SignedBeac
|
||||
}
|
||||
|
||||
// HeadSlot mocks HeadSlot method in chain service.
|
||||
func (s *ChainService) HeadSlot() uint64 {
|
||||
func (s *ChainService) HeadSlot() types.Slot {
|
||||
if s.State == nil {
|
||||
return 0
|
||||
}
|
||||
@@ -245,17 +255,17 @@ func (s *ChainService) HeadRoot(_ context.Context) ([]byte, error) {
|
||||
}
|
||||
|
||||
// HeadBlock mocks HeadBlock method in chain service.
|
||||
func (s *ChainService) HeadBlock(context.Context) (*ethpb.SignedBeaconBlock, error) {
|
||||
func (s *ChainService) HeadBlock(context.Context) (block.SignedBeaconBlock, error) {
|
||||
return s.Block, nil
|
||||
}
|
||||
|
||||
// HeadState mocks HeadState method in chain service.
|
||||
func (s *ChainService) HeadState(context.Context) (*stateTrie.BeaconState, error) {
|
||||
func (s *ChainService) HeadState(context.Context) (state.BeaconState, error) {
|
||||
return s.State, nil
|
||||
}
|
||||
|
||||
// CurrentFork mocks HeadState method in chain service.
|
||||
func (s *ChainService) CurrentFork() *pb.Fork {
|
||||
func (s *ChainService) CurrentFork() *ethpb.Fork {
|
||||
return s.Fork
|
||||
}
|
||||
|
||||
@@ -285,20 +295,20 @@ func (s *ChainService) ReceiveAttestationNoPubsub(context.Context, *ethpb.Attest
|
||||
}
|
||||
|
||||
// AttestationPreState mocks AttestationPreState method in chain service.
|
||||
func (s *ChainService) AttestationPreState(_ context.Context, _ *ethpb.Attestation) (*stateTrie.BeaconState, error) {
|
||||
func (s *ChainService) AttestationPreState(_ context.Context, _ *ethpb.Attestation) (state.BeaconState, error) {
|
||||
return s.State, nil
|
||||
}
|
||||
|
||||
// HeadValidatorsIndices mocks the same method in the chain service.
|
||||
func (s *ChainService) HeadValidatorsIndices(_ context.Context, epoch uint64) ([]uint64, error) {
|
||||
func (s *ChainService) HeadValidatorsIndices(_ context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error) {
|
||||
if s.State == nil {
|
||||
return []uint64{}, nil
|
||||
return []types.ValidatorIndex{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(s.State, epoch)
|
||||
}
|
||||
|
||||
// HeadSeed mocks the same method in the chain service.
|
||||
func (s *ChainService) HeadSeed(_ context.Context, epoch uint64) ([32]byte, error) {
|
||||
func (s *ChainService) HeadSeed(_ context.Context, epoch types.Epoch) ([32]byte, error) {
|
||||
return helpers.Seed(s.State, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
}
|
||||
|
||||
@@ -323,11 +333,11 @@ func (s *ChainService) GenesisValidatorRoot() [32]byte {
|
||||
}
|
||||
|
||||
// CurrentSlot mocks the same method in the chain service.
|
||||
func (s *ChainService) CurrentSlot() uint64 {
|
||||
func (s *ChainService) CurrentSlot() types.Slot {
|
||||
if s.Slot != nil {
|
||||
return *s.Slot
|
||||
}
|
||||
return uint64(time.Now().Unix()-s.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot
|
||||
return types.Slot(uint64(time.Now().Unix()-s.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot)
|
||||
}
|
||||
|
||||
// Participation mocks the same method in the chain service.
|
||||
@@ -380,3 +390,52 @@ func (s *ChainService) VerifyFinalizedConsistency(_ context.Context, r []byte) e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChainHeads mocks ChainHeads and always return nil.
|
||||
func (s *ChainService) ChainHeads() ([][32]byte, []types.Slot) {
|
||||
return [][32]byte{
|
||||
bytesutil.ToBytes32(bytesutil.PadTo([]byte("foo"), 32)),
|
||||
bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)),
|
||||
},
|
||||
[]types.Slot{0, 1}
|
||||
}
|
||||
|
||||
// HeadPublicKeyToValidatorIndex mocks HeadPublicKeyToValidatorIndex and always return 0 and true.
|
||||
func (s *ChainService) HeadPublicKeyToValidatorIndex(ctx context.Context, pubKey [48]byte) (types.ValidatorIndex, bool) {
|
||||
return 0, true
|
||||
}
|
||||
|
||||
// HeadValidatorIndexToPublicKey mocks HeadValidatorIndexToPublicKey and always return empty and nil.
|
||||
func (s *ChainService) HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([48]byte, error) {
|
||||
return s.PublicKey, nil
|
||||
}
|
||||
|
||||
// HeadCurrentSyncCommitteeIndices mocks HeadCurrentSyncCommitteeIndices and always return `CurrentSyncCommitteeIndices`.
|
||||
func (s *ChainService) HeadCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
||||
return s.CurrentSyncCommitteeIndices, nil
|
||||
}
|
||||
|
||||
// HeadNextSyncCommitteeIndices mocks HeadNextSyncCommitteeIndices and always return `HeadNextSyncCommitteeIndices`.
|
||||
func (s *ChainService) HeadNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
||||
return s.NextSyncCommitteeIndices, nil
|
||||
}
|
||||
|
||||
// HeadSyncCommitteePubKeys mocks HeadSyncCommitteePubKeys and always return empty nil.
|
||||
func (s *ChainService) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error) {
|
||||
return s.SyncCommitteePubkeys, nil
|
||||
}
|
||||
|
||||
// HeadSyncCommitteeDomain mocks HeadSyncCommitteeDomain and always return empty nil.
|
||||
func (s *ChainService) HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
return s.SyncCommitteeDomain, nil
|
||||
}
|
||||
|
||||
// HeadSyncSelectionProofDomain mocks HeadSyncSelectionProofDomain and always return empty nil.
|
||||
func (s *ChainService) HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
return s.SyncSelectionProofDomain, nil
|
||||
}
|
||||
|
||||
// HeadSyncContributionProofDomain mocks HeadSyncContributionProofDomain and always return empty nil.
|
||||
func (s *ChainService) HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
||||
return s.SyncContributionProofDomain, nil
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
// Reference design: https://github.com/ethereum/eth2.0-specs/blob/master/specs/phase0/weak-subjectivity.md#weak-subjectivity-sync-procedure
|
||||
func (s *Service) VerifyWeakSubjectivityRoot(ctx context.Context) error {
|
||||
// TODO(7342): Remove the following to fully use weak subjectivity in production.
|
||||
if len(s.wsRoot) == 0 || s.wsEpoch == 0 {
|
||||
if s.cfg.WeakSubjectivityCheckpt == nil || len(s.cfg.WeakSubjectivityCheckpt.Root) == 0 || s.cfg.WeakSubjectivityCheckpt.Epoch == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -23,28 +23,28 @@ func (s *Service) VerifyWeakSubjectivityRoot(ctx context.Context) error {
|
||||
if s.wsVerified {
|
||||
return nil
|
||||
}
|
||||
if s.wsEpoch > s.finalizedCheckpt.Epoch {
|
||||
if s.cfg.WeakSubjectivityCheckpt.Epoch > s.finalizedCheckpt.Epoch {
|
||||
return nil
|
||||
}
|
||||
|
||||
r := bytesutil.ToBytes32(s.wsRoot)
|
||||
log.Infof("Performing weak subjectivity check for root %#x in epoch %d", r, s.wsEpoch)
|
||||
r := bytesutil.ToBytes32(s.cfg.WeakSubjectivityCheckpt.Root)
|
||||
log.Infof("Performing weak subjectivity check for root %#x in epoch %d", r, s.cfg.WeakSubjectivityCheckpt.Epoch)
|
||||
// Save initial sync cached blocks to DB.
|
||||
if err := s.beaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
// A node should have the weak subjectivity block in the DB.
|
||||
if !s.beaconDB.HasBlock(ctx, r) {
|
||||
if !s.cfg.BeaconDB.HasBlock(ctx, r) {
|
||||
return fmt.Errorf("node does not have root in DB: %#x", r)
|
||||
}
|
||||
|
||||
startSlot, err := helpers.StartSlot(s.wsEpoch)
|
||||
startSlot, err := helpers.StartSlot(s.cfg.WeakSubjectivityCheckpt.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// A node should have the weak subjectivity block corresponds to the correct epoch in the DB.
|
||||
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(startSlot + params.BeaconConfig().SlotsPerEpoch)
|
||||
roots, err := s.beaconDB.BlockRoots(ctx, filter)
|
||||
roots, err := s.cfg.BeaconDB.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -56,5 +56,5 @@ func (s *Service) VerifyWeakSubjectivityRoot(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("node does not have root in db corresponding to epoch: %#x %d", r, s.wsEpoch)
|
||||
return fmt.Errorf("node does not have root in db corresponding to epoch: %#x %d", r, s.cfg.WeakSubjectivityCheckpt.Epoch)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,11 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
@@ -15,15 +18,14 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 32
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), b))
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b)))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
wsVerified bool
|
||||
wantErr bool
|
||||
wsRoot [32]byte
|
||||
wsEpoch uint64
|
||||
finalizedEpoch uint64
|
||||
checkpt *ethpb.Checkpoint
|
||||
finalizedEpoch types.Epoch
|
||||
errString string
|
||||
name string
|
||||
}{
|
||||
@@ -33,37 +35,34 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "already verified",
|
||||
wsEpoch: 2,
|
||||
checkpt: ðpb.Checkpoint{Epoch: 2},
|
||||
finalizedEpoch: 2,
|
||||
wsVerified: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "not yet to verify, ws epoch higher than finalized epoch",
|
||||
wsEpoch: 2,
|
||||
checkpt: ðpb.Checkpoint{Epoch: 2},
|
||||
finalizedEpoch: 1,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "can't find the block in DB",
|
||||
wsEpoch: 1,
|
||||
wsRoot: [32]byte{'a'},
|
||||
checkpt: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, 32), Epoch: 1},
|
||||
finalizedEpoch: 3,
|
||||
wantErr: true,
|
||||
errString: "node does not have root in DB",
|
||||
},
|
||||
{
|
||||
name: "can't find the block corresponds to ws epoch in DB",
|
||||
wsEpoch: 2,
|
||||
wsRoot: r, // Root belongs in epoch 1.
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: 2}, // Root belongs in epoch 1.
|
||||
finalizedEpoch: 3,
|
||||
wantErr: true,
|
||||
errString: "node does not have root in db corresponding to epoch",
|
||||
},
|
||||
{
|
||||
name: "can verify and pass",
|
||||
wsEpoch: 1,
|
||||
wsRoot: r,
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: 1},
|
||||
finalizedEpoch: 3,
|
||||
wantErr: false,
|
||||
},
|
||||
@@ -71,9 +70,7 @@ func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
beaconDB: beaconDB,
|
||||
wsRoot: tt.wsRoot[:],
|
||||
wsEpoch: tt.wsEpoch,
|
||||
cfg: &Config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},
|
||||
wsVerified: tt.wsVerified,
|
||||
finalizedCheckpt: ðpb.Checkpoint{Epoch: tt.finalizedEpoch},
|
||||
}
|
||||
|
||||
38
beacon-chain/cache/BUILD.bazel
vendored
38
beacon-chain/cache/BUILD.bazel
vendored
@@ -1,8 +1,7 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
# gazelle:ignore committee_disabled.go
|
||||
# gazelle:ignore proposer_indices_disabled.go
|
||||
# gazelle:exclude committee_disabled.go
|
||||
# gazelle:exclude proposer_indices_disabled.go
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
@@ -11,17 +10,21 @@ go_library(
|
||||
"committees.go",
|
||||
"common.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"proposer_indices_type.go",
|
||||
"skip_slot_cache.go",
|
||||
"subnet_ids.go",
|
||||
"proposer_indices_type.go",
|
||||
"sync_subnet_ids.go",
|
||||
] + select({
|
||||
"//fuzz:fuzzing_enabled": [
|
||||
"committee_disabled.go",
|
||||
"proposer_indices_disabled.go"
|
||||
"proposer_indices_disabled.go",
|
||||
"sync_committee_disabled.go",
|
||||
],
|
||||
"//conditions:default": [
|
||||
"committee.go",
|
||||
"proposer_indices.go",
|
||||
"sync_committee.go",
|
||||
],
|
||||
}),
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
|
||||
@@ -32,16 +35,18 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/copyutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/rand:go_default_library",
|
||||
"//shared/sliceutil:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@io_k8s_client_go//tools/cache:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
@@ -52,25 +57,28 @@ go_test(
|
||||
size = "small",
|
||||
srcs = [
|
||||
"attestation_data_test.go",
|
||||
"cache_test.go",
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"cache_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"subnet_ids_test.go",
|
||||
"proposer_indices_test.go"
|
||||
"sync_committee_test.go",
|
||||
"sync_subnet_ids_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
6
beacon-chain/cache/attestation_data.go
vendored
6
beacon-chain/cache/attestation_data.go
vendored
@@ -10,8 +10,8 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/copyutil"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
@@ -98,7 +98,7 @@ func (c *AttestationCache) Get(ctx context.Context, req *ethpb.AttestationDataRe
|
||||
|
||||
if exists && item != nil && item.(*attestationReqResWrapper).res != nil {
|
||||
attestationCacheHit.Inc()
|
||||
return state.CopyAttestationData(item.(*attestationReqResWrapper).res), nil
|
||||
return copyutil.CopyAttestationData(item.(*attestationReqResWrapper).res), nil
|
||||
}
|
||||
attestationCacheMiss.Inc()
|
||||
return nil, nil
|
||||
|
||||
4
beacon-chain/cache/attestation_data_test.go
vendored
4
beacon-chain/cache/attestation_data_test.go
vendored
@@ -4,10 +4,10 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestAttestationCache_RoundTrip(t *testing.T) {
|
||||
|
||||
4
beacon-chain/cache/cache_test.go
vendored
4
beacon-chain/cache/cache_test.go
vendored
@@ -2,12 +2,8 @@ package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{EnableEth1DataVoteCache: true})
|
||||
defer resetCfg()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
10
beacon-chain/cache/checkpoint_state.go
vendored
10
beacon-chain/cache/checkpoint_state.go
vendored
@@ -6,8 +6,8 @@ import (
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
)
|
||||
|
||||
@@ -47,7 +47,7 @@ func NewCheckpointStateCache() *CheckpointStateCache {
|
||||
|
||||
// StateByCheckpoint fetches state by checkpoint. Returns true with a
|
||||
// reference to the CheckpointState info, if exists. Otherwise returns false, nil.
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*stateTrie.BeaconState, error) {
|
||||
func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.BeaconState, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
h, err := hashutil.HashProto(cp)
|
||||
@@ -60,7 +60,7 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*stateTr
|
||||
if exists && item != nil {
|
||||
checkpointStateHit.Inc()
|
||||
// Copy here is unnecessary since the return will only be used to verify attestation signature.
|
||||
return item.(*stateTrie.BeaconState), nil
|
||||
return item.(state.BeaconState), nil
|
||||
}
|
||||
|
||||
checkpointStateMiss.Inc()
|
||||
@@ -69,7 +69,7 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (*stateTr
|
||||
|
||||
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
|
||||
// recently added CheckpointState object if the cache size has ready the max cache size limit.
|
||||
func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s *stateTrie.BeaconState) error {
|
||||
func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s state.ReadOnlyBeaconState) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
h, err := hashutil.HashProto(cp)
|
||||
|
||||
39
beacon-chain/cache/checkpoint_state_test.go
vendored
39
beacon-chain/cache/checkpoint_state_test.go
vendored
@@ -3,65 +3,70 @@ package cache
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestCheckpointStateCache_StateByCheckpoint(t *testing.T) {
|
||||
cache := NewCheckpointStateCache()
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
st, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:],
|
||||
Slot: 64,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
state, err := cache.StateByCheckpoint(cp1)
|
||||
s, err := cache.StateByCheckpoint(cp1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*stateTrie.BeaconState)(nil), state, "Expected state not to exist in empty cache")
|
||||
assert.Equal(t, state.BeaconState(nil), s, "Expected state not to exist in empty cache")
|
||||
|
||||
require.NoError(t, cache.AddCheckpointState(cp1, st))
|
||||
|
||||
state, err = cache.StateByCheckpoint(cp1)
|
||||
s, err = cache.StateByCheckpoint(cp1)
|
||||
require.NoError(t, err)
|
||||
|
||||
if !proto.Equal(state.InnerStateUnsafe(), st.InnerStateUnsafe()) {
|
||||
pbState1, err := v1.ProtobufBeaconState(s.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
pbstate, err := v1.ProtobufBeaconState(st.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
if !proto.Equal(pbState1, pbstate) {
|
||||
t.Error("incorrectly cached state")
|
||||
}
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
|
||||
st2, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
st2, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 128,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, cache.AddCheckpointState(cp2, st2))
|
||||
|
||||
state, err = cache.StateByCheckpoint(cp2)
|
||||
s, err = cache.StateByCheckpoint(cp2)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, st2.CloneInnerState(), state.CloneInnerState(), "incorrectly cached state")
|
||||
assert.DeepEqual(t, st2.CloneInnerState(), s.CloneInnerState(), "incorrectly cached state")
|
||||
|
||||
state, err = cache.StateByCheckpoint(cp1)
|
||||
s, err = cache.StateByCheckpoint(cp1)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, st.CloneInnerState(), state.CloneInnerState(), "incorrectly cached state")
|
||||
assert.DeepEqual(t, st.CloneInnerState(), s.CloneInnerState(), "incorrectly cached state")
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_MaxSize(t *testing.T) {
|
||||
c := NewCheckpointStateCache()
|
||||
st, err := stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
st, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := uint64(0); i < uint64(maxCheckpointStateSize+100); i++ {
|
||||
require.NoError(t, st.SetSlot(i))
|
||||
require.NoError(t, c.AddCheckpointState(ðpb.Checkpoint{Epoch: i, Root: make([]byte, 32)}, st))
|
||||
require.NoError(t, st.SetSlot(types.Slot(i)))
|
||||
require.NoError(t, c.AddCheckpointState(ðpb.Checkpoint{Epoch: types.Epoch(i), Root: make([]byte, 32)}, st))
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCheckpointStateSize, len(c.cache.Keys()))
|
||||
|
||||
80
beacon-chain/cache/committee.go
vendored
80
beacon-chain/cache/committee.go
vendored
@@ -6,11 +6,12 @@ import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -32,7 +33,7 @@ var (
|
||||
|
||||
// CommitteeCache is a struct with 1 queue for looking up shuffled indices list by seed.
|
||||
type CommitteeCache struct {
|
||||
CommitteeCache *cache.FIFO
|
||||
CommitteeCache *lru.Cache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
@@ -42,28 +43,29 @@ func committeeKeyFn(obj interface{}) (string, error) {
|
||||
if !ok {
|
||||
return "", ErrNotCommittee
|
||||
}
|
||||
|
||||
return key(info.Seed), nil
|
||||
}
|
||||
|
||||
// NewCommitteesCache creates a new committee cache for storing/accessing shuffled indices of a committee.
|
||||
func NewCommitteesCache() *CommitteeCache {
|
||||
cCache, err := lru.New(int(maxCommitteesCacheSize))
|
||||
// An error is only returned if the size of the cache is
|
||||
// <= 0.
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &CommitteeCache{
|
||||
CommitteeCache: cache.NewFIFO(committeeKeyFn),
|
||||
CommitteeCache: cCache,
|
||||
}
|
||||
}
|
||||
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
|
||||
func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]uint64, error) {
|
||||
func (c *CommitteeCache) Committee(slot types.Slot, seed [32]byte, index types.CommitteeIndex) ([]types.ValidatorIndex, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, exists := c.CommitteeCache.Get(key(seed))
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
@@ -77,11 +79,11 @@ func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]
|
||||
}
|
||||
|
||||
committeeCountPerSlot := uint64(1)
|
||||
if item.CommitteeCount/params.BeaconConfig().SlotsPerEpoch > 1 {
|
||||
committeeCountPerSlot = item.CommitteeCount / params.BeaconConfig().SlotsPerEpoch
|
||||
if item.CommitteeCount/uint64(params.BeaconConfig().SlotsPerEpoch) > 1 {
|
||||
committeeCountPerSlot = item.CommitteeCount / uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
indexOffSet := index + (slot%params.BeaconConfig().SlotsPerEpoch)*committeeCountPerSlot
|
||||
indexOffSet := uint64(index) + uint64(slot.ModSlot(params.BeaconConfig().SlotsPerEpoch).Mul(committeeCountPerSlot))
|
||||
start, end := startEndIndices(item, indexOffSet)
|
||||
|
||||
if end > uint64(len(item.ShuffledIndices)) || end < start {
|
||||
@@ -96,22 +98,19 @@ func (c *CommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]
|
||||
func (c *CommitteeCache) AddCommitteeShuffledList(committees *Committees) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if err := c.CommitteeCache.AddIfNotPresent(committees); err != nil {
|
||||
key, err := committeeKeyFn(committees)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trim(c.CommitteeCache, maxCommitteesCacheSize)
|
||||
_ = c.CommitteeCache.Add(key, committees)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveIndices returns the active indices of a given seed stored in cache.
|
||||
func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
|
||||
func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]types.ValidatorIndex, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, exists := c.CommitteeCache.Get(key(seed))
|
||||
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
@@ -128,15 +127,40 @@ func (c *CommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
|
||||
return item.SortedIndices, nil
|
||||
}
|
||||
|
||||
// ActiveBalance returns the total active balance of a given seed stored in cache.
|
||||
func (c *CommitteeCache) ActiveBalance(seed [32]byte) (uint64, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists := c.CommitteeCache.Get(key(seed))
|
||||
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
CommitteeCacheMiss.Inc()
|
||||
return 0, ErrNonCommitteeKey
|
||||
}
|
||||
|
||||
item, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return 0, ErrNotCommittee
|
||||
}
|
||||
if item == nil {
|
||||
return 0, errors.New("item is nil")
|
||||
}
|
||||
|
||||
// Return `ErrNonCommitteeKey` if active balance field doesnt exist in item.
|
||||
if !item.ActiveBalance.Exist {
|
||||
return 0, ErrNonCommitteeKey
|
||||
}
|
||||
|
||||
return item.ActiveBalance.Total, nil
|
||||
}
|
||||
|
||||
// ActiveIndicesCount returns the active indices count of a given seed stored in cache.
|
||||
func (c *CommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.CommitteeCache.GetByKey(key(seed))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
obj, exists := c.CommitteeCache.Get(key(seed))
|
||||
if exists {
|
||||
CommitteeCacheHit.Inc()
|
||||
} else {
|
||||
@@ -154,8 +178,8 @@ func (c *CommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
|
||||
|
||||
// HasEntry returns true if the committee cache has a value.
|
||||
func (c *CommitteeCache) HasEntry(seed string) bool {
|
||||
_, ok, err := c.CommitteeCache.GetByKey(seed)
|
||||
return err == nil && ok
|
||||
_, ok := c.CommitteeCache.Get(seed)
|
||||
return ok
|
||||
}
|
||||
|
||||
func startEndIndices(c *Committees, index uint64) (uint64, uint64) {
|
||||
|
||||
15
beacon-chain/cache/committee_disabled.go
vendored
15
beacon-chain/cache/committee_disabled.go
vendored
@@ -3,6 +3,8 @@
|
||||
// This file is used in fuzzer builds to bypass global committee caches.
|
||||
package cache
|
||||
|
||||
import types "github.com/prysmaticlabs/eth2-types"
|
||||
|
||||
// FakeCommitteeCache is a struct with 1 queue for looking up shuffled indices list by seed.
|
||||
type FakeCommitteeCache struct {
|
||||
}
|
||||
@@ -14,7 +16,7 @@ func NewCommitteesCache() *FakeCommitteeCache {
|
||||
|
||||
// Committee fetches the shuffled indices by slot and committee index. Every list of indices
|
||||
// represent one committee. Returns true if the list exists with slot and committee index. Otherwise returns false, nil.
|
||||
func (c *FakeCommitteeCache) Committee(slot uint64, seed [32]byte, index uint64) ([]uint64, error) {
|
||||
func (c *FakeCommitteeCache) Committee(slot types.Slot, seed [32]byte, index types.CommitteeIndex) ([]types.ValidatorIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -25,12 +27,12 @@ func (c *FakeCommitteeCache) AddCommitteeShuffledList(committees *Committees) er
|
||||
}
|
||||
|
||||
// AddProposerIndicesList updates the committee shuffled list with proposer indices.
|
||||
func (c *FakeCommitteeCache) AddProposerIndicesList(seed [32]byte, indices []uint64) error {
|
||||
func (c *FakeCommitteeCache) AddProposerIndicesList(seed [32]byte, indices []types.ValidatorIndex) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ActiveIndices returns the active indices of a given seed stored in cache.
|
||||
func (c *FakeCommitteeCache) ActiveIndices(seed [32]byte) ([]uint64, error) {
|
||||
func (c *FakeCommitteeCache) ActiveIndices(seed [32]byte) ([]types.ValidatorIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -39,8 +41,13 @@ func (c *FakeCommitteeCache) ActiveIndicesCount(seed [32]byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// ActiveBalance returns the active balance of a given seed stored in cache.
|
||||
func (c *FakeCommitteeCache) ActiveBalance(seed [32]byte) (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices of a given seed.
|
||||
func (c *FakeCommitteeCache) ProposerIndices(seed [32]byte) ([]uint64, error) {
|
||||
func (c *FakeCommitteeCache) ProposerIndices(seed [32]byte) ([]types.ValidatorIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
4
beacon-chain/cache/committee_fuzz_test.go
vendored
4
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -32,7 +32,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.ListKeys())), "Incorrect key size")
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
|
||||
}
|
||||
|
||||
func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
@@ -49,5 +49,5 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
assert.DeepEqual(t, c.SortedIndices, indices)
|
||||
}
|
||||
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.ListKeys())), "Incorrect key size")
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(cache.CommitteeCache.Keys())), "Incorrect key size")
|
||||
}
|
||||
|
||||
49
beacon-chain/cache/committee_test.go
vendored
49
beacon-chain/cache/committee_test.go
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -16,7 +17,7 @@ func TestCommitteeKeyFn_OK(t *testing.T) {
|
||||
item := &Committees{
|
||||
CommitteeCount: 1,
|
||||
Seed: [32]byte{'A'},
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4, 5},
|
||||
ShuffledIndices: []types.ValidatorIndex{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
k, err := committeeKeyFn(item)
|
||||
@@ -33,13 +34,13 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{
|
||||
ShuffledIndices: []uint64{1, 2, 3, 4, 5, 6},
|
||||
ShuffledIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6},
|
||||
Seed: [32]byte{'A'},
|
||||
CommitteeCount: 3,
|
||||
}
|
||||
|
||||
slot := params.BeaconConfig().SlotsPerEpoch
|
||||
committeeIndex := uint64(1)
|
||||
committeeIndex := types.CommitteeIndex(1)
|
||||
indices, err := cache.Committee(slot, item.Seed, committeeIndex)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
@@ -47,18 +48,18 @@ func TestCommitteeCache_CommitteesByEpoch(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
|
||||
wantedIndex := uint64(0)
|
||||
wantedIndex := types.CommitteeIndex(0)
|
||||
indices, err = cache.Committee(slot, item.Seed, wantedIndex)
|
||||
require.NoError(t, err)
|
||||
|
||||
start, end := startEndIndices(item, wantedIndex)
|
||||
start, end := startEndIndices(item, uint64(wantedIndex))
|
||||
assert.DeepEqual(t, item.ShuffledIndices[start:end], indices)
|
||||
}
|
||||
|
||||
func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
indices, err := cache.ActiveIndices(item.Seed)
|
||||
require.NoError(t, err)
|
||||
if indices != nil {
|
||||
@@ -75,7 +76,7 @@ func TestCommitteeCache_ActiveIndices(t *testing.T) {
|
||||
func TestCommitteeCache_ActiveCount(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
item := &Committees{Seed: [32]byte{'A'}, SortedIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
count, err := cache.ActiveIndicesCount(item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, count, "Expected active count not to exist in empty cache")
|
||||
@@ -87,6 +88,24 @@ func TestCommitteeCache_ActiveCount(t *testing.T) {
|
||||
assert.Equal(t, len(item.SortedIndices), count)
|
||||
}
|
||||
|
||||
func TestCommitteeCache_ActiveBalance(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
balances := &Balance{
|
||||
Exist: true,
|
||||
Total: uint64(10000),
|
||||
}
|
||||
item := &Committees{Seed: [32]byte{'A'}, ActiveBalance: balances}
|
||||
_, err := cache.ActiveBalance(item.Seed)
|
||||
require.Equal(t, ErrNonCommitteeKey, err)
|
||||
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
|
||||
got, err := cache.ActiveBalance(item.Seed)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, balances.Total, got)
|
||||
}
|
||||
|
||||
func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
|
||||
@@ -99,11 +118,11 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(item))
|
||||
}
|
||||
|
||||
k := cache.CommitteeCache.ListKeys()
|
||||
k := cache.CommitteeCache.Keys()
|
||||
assert.Equal(t, maxCommitteesCacheSize, uint64(len(k)))
|
||||
|
||||
sort.Slice(k, func(i, j int) bool {
|
||||
return k[i] < k[j]
|
||||
return k[i].(string) < k[j].(string)
|
||||
})
|
||||
wanted := end - int(maxCommitteesCacheSize)
|
||||
s := bytesutil.ToBytes32([]byte(strconv.Itoa(wanted)))
|
||||
@@ -116,13 +135,15 @@ func TestCommitteeCache_CanRotate(t *testing.T) {
|
||||
func TestCommitteeCacheOutOfRange(t *testing.T) {
|
||||
cache := NewCommitteesCache()
|
||||
seed := bytesutil.ToBytes32([]byte("foo"))
|
||||
err := cache.CommitteeCache.Add(&Committees{
|
||||
comms := &Committees{
|
||||
CommitteeCount: 1,
|
||||
Seed: seed,
|
||||
ShuffledIndices: []uint64{0},
|
||||
SortedIndices: []uint64{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ShuffledIndices: []types.ValidatorIndex{0},
|
||||
SortedIndices: []types.ValidatorIndex{},
|
||||
}
|
||||
key, err := committeeKeyFn(comms)
|
||||
assert.NoError(t, err)
|
||||
_ = cache.CommitteeCache.Add(key, comms)
|
||||
|
||||
_, err = cache.Committee(0, seed, math.MaxUint64) // Overflow!
|
||||
require.NotNil(t, err, "Did not fail as expected")
|
||||
|
||||
22
beacon-chain/cache/committees.go
vendored
22
beacon-chain/cache/committees.go
vendored
@@ -1,15 +1,31 @@
|
||||
package cache
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"errors"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
)
|
||||
|
||||
// ErrNotCommittee will be returned when a cache object is not a pointer to
|
||||
// a Committee struct.
|
||||
var ErrNotCommittee = errors.New("object is not a committee struct")
|
||||
|
||||
// ErrNonCommitteeKey will be returned when the committee key does not exist in cache.
|
||||
var ErrNonCommitteeKey = errors.New("committee key does not exist")
|
||||
|
||||
// Committees defines the shuffled committees seed.
|
||||
type Committees struct {
|
||||
CommitteeCount uint64
|
||||
Seed [32]byte
|
||||
ShuffledIndices []uint64
|
||||
SortedIndices []uint64
|
||||
ShuffledIndices []types.ValidatorIndex
|
||||
SortedIndices []types.ValidatorIndex
|
||||
ActiveBalance *Balance
|
||||
}
|
||||
|
||||
// Balance tracks active balance.
|
||||
// Given default uint64 is 0, `Exist` is used to distinguish whether
|
||||
// this field has been filed.
|
||||
type Balance struct {
|
||||
Exist bool
|
||||
Total uint64
|
||||
}
|
||||
|
||||
2
beacon-chain/cache/common.go
vendored
2
beacon-chain/cache/common.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
var (
|
||||
// maxCacheSize is 4x of the epoch length for additional cache padding.
|
||||
// Requests should be only accessing committees within defined epoch length.
|
||||
maxCacheSize = 4 * params.BeaconConfig().SlotsPerEpoch
|
||||
maxCacheSize = uint64(4 * params.BeaconConfig().SlotsPerEpoch)
|
||||
)
|
||||
|
||||
// trim the FIFO queue to the maxSize.
|
||||
|
||||
12
beacon-chain/cache/depositcache/BUILD.bazel
vendored
12
beacon-chain/cache/depositcache/BUILD.bazel
vendored
@@ -1,5 +1,4 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@@ -11,14 +10,14 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//proto/beacon/db:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
@@ -32,14 +31,13 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//proto/beacon/db:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_gogo_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -12,10 +12,11 @@ import (
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/trieutil"
|
||||
@@ -74,7 +75,7 @@ func New() (*DepositCache, error) {
|
||||
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.InsertDeposit")
|
||||
defer span.End()
|
||||
if d == nil {
|
||||
@@ -84,15 +85,22 @@ func (dc *DepositCache) InsertDeposit(ctx context.Context, d *ethpb.Deposit, blo
|
||||
"index": index,
|
||||
"deposit root": hex.EncodeToString(depositRoot[:]),
|
||||
}).Warn("Ignoring nil deposit insertion")
|
||||
return
|
||||
return errors.New("nil deposit inserted into the cache")
|
||||
}
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
if int(index) != len(dc.deposits) {
|
||||
return errors.Errorf("wanted deposit with index %d to be inserted but received %d", len(dc.deposits), index)
|
||||
}
|
||||
// Keep the slice sorted on insertion in order to avoid costly sorting on retrieval.
|
||||
heightIdx := sort.Search(len(dc.deposits), func(i int) bool { return dc.deposits[i].Index >= index })
|
||||
newDeposits := append([]*dbpb.DepositContainer{{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}}, dc.deposits[heightIdx:]...)
|
||||
newDeposits := append(
|
||||
[]*dbpb.DepositContainer{{Deposit: d, Eth1BlockHeight: blockNum, DepositRoot: depositRoot[:], Index: index}},
|
||||
dc.deposits[heightIdx:]...)
|
||||
dc.deposits = append(dc.deposits[:heightIdx], newDeposits...)
|
||||
historicalDepositsCount.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertDepositContainers inserts a set of deposit containers into our deposit cache.
|
||||
@@ -165,7 +173,7 @@ func (dc *DepositCache) AllDeposits(ctx context.Context, untilBlk *big.Int) []*e
|
||||
return deposits
|
||||
}
|
||||
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made prior to blockheight and the
|
||||
// DepositsNumberAndRootAtHeight returns number of deposits made up to blockheight and the
|
||||
// root that corresponds to the latest deposit at that blockheight.
|
||||
func (dc *DepositCache) DepositsNumberAndRootAtHeight(ctx context.Context, blockHeight *big.Int) (uint64, [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "DepositsCache.DepositsNumberAndRootAtHeight")
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -26,7 +26,7 @@ func TestInsertDeposit_LogsOnNilDepositInsertion(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.InsertDeposit(context.Background(), nil, 1, 0, [32]byte{})
|
||||
assert.ErrorContains(t, "nil deposit inserted into the cache", dc.InsertDeposit(context.Background(), nil, 1, 0, [32]byte{}))
|
||||
|
||||
require.Equal(t, 0, len(dc.deposits), "Number of deposits changed")
|
||||
assert.Equal(t, nilDepositErr, hook.LastEntry().Message)
|
||||
@@ -37,37 +37,52 @@ func TestInsertDeposit_MaintainsSortedOrderByIndex(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
insertions := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 0,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 0,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 3,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 3,
|
||||
expectedErr: "wanted deposit with index 1 to be inserted but received 3",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 1,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 1,
|
||||
expectedErr: "",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 4,
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 4,
|
||||
expectedErr: "wanted deposit with index 2 to be inserted but received 4",
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{},
|
||||
index: 2,
|
||||
expectedErr: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range insertions {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
if ins.expectedErr != "" {
|
||||
assert.ErrorContains(t, ins.expectedErr, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
} else {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
}
|
||||
|
||||
expectedIndices := []int64{0, 1, 3, 4}
|
||||
expectedIndices := []int64{0, 1, 2}
|
||||
for i, ei := range expectedIndices {
|
||||
assert.Equal(t, ei, dc.deposits[i].Index,
|
||||
fmt.Sprintf("dc.deposits[%d].Index = %d, wanted %d", i, dc.deposits[i].Index, ei))
|
||||
@@ -154,121 +169,148 @@ func TestAllDeposits_FiltersDepositUpToAndIncludingBlockNumber(t *testing.T) {
|
||||
assert.Equal(t, 5, len(d))
|
||||
}
|
||||
|
||||
func TestDepositsNumberAndRootAtHeight_ReturnsAppropriateCountAndRoot(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
func TestDepositsNumberAndRootAtHeight(t *testing.T) {
|
||||
wantedRoot := bytesutil.PadTo([]byte("root"), 32)
|
||||
t.Run("requesting_last_item_works", func(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Index: 0,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Index: 1,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 11,
|
||||
Index: 2,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 13,
|
||||
Index: 3,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(13))
|
||||
assert.Equal(t, 4, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
t.Run("only_one_item", func(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Index: 0,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
DepositRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 12,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10))
|
||||
assert.Equal(t, 1, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
t.Run("none_at_height_some_below", func(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(11))
|
||||
assert.Equal(t, 5, int(n))
|
||||
assert.Equal(t, bytesutil.ToBytes32([]byte("root")), root)
|
||||
}
|
||||
|
||||
func TestDepositsNumberAndRootAtHeight_ReturnsEmptyTrieIfBlockHeightLessThanOldestDeposit(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 8,
|
||||
Index: 0,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
DepositRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 11,
|
||||
Deposit: ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 9,
|
||||
Index: 1,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
DepositRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
},
|
||||
}
|
||||
{
|
||||
Eth1BlockHeight: 11,
|
||||
Index: 2,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10))
|
||||
assert.Equal(t, 2, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
t.Run("none_at_height_none_below", func(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(2))
|
||||
assert.Equal(t, 0, int(n))
|
||||
assert.Equal(t, [32]byte{}, root)
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 8,
|
||||
Index: 0,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(7))
|
||||
assert.Equal(t, 0, int(n))
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, root)
|
||||
})
|
||||
t.Run("none_at_height_one_below", func(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 8,
|
||||
Index: 0,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(10))
|
||||
assert.Equal(t, 1, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
t.Run("some_greater_some_lower", func(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
dc.deposits = []*dbpb.DepositContainer{
|
||||
{
|
||||
Eth1BlockHeight: 8,
|
||||
Index: 0,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 8,
|
||||
Index: 1,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 9,
|
||||
Index: 2,
|
||||
Deposit: ðpb.Deposit{},
|
||||
DepositRoot: wantedRoot,
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Index: 3,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
{
|
||||
Eth1BlockHeight: 10,
|
||||
Index: 4,
|
||||
Deposit: ðpb.Deposit{},
|
||||
},
|
||||
}
|
||||
n, root := dc.DepositsNumberAndRootAtHeight(context.Background(), big.NewInt(9))
|
||||
assert.Equal(t, 3, int(n))
|
||||
require.DeepEqual(t, wantedRoot, root[:])
|
||||
})
|
||||
}
|
||||
|
||||
func TestDepositByPubkey_ReturnsFirstMatchingDeposit(t *testing.T) {
|
||||
@@ -606,7 +648,7 @@ func TestPruneProofs_Ok(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 1))
|
||||
@@ -649,7 +691,7 @@ func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 2))
|
||||
@@ -689,7 +731,7 @@ func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 99))
|
||||
@@ -732,7 +774,7 @@ func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 4))
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
@@ -5,11 +5,11 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/beacon/db"
|
||||
dbpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
var _ PendingDepositsFetcher = (*DepositCache)(nil)
|
||||
@@ -90,7 +90,7 @@ func TestPendingDeposits_OK(t *testing.T) {
|
||||
{Proof: [][]byte{[]byte("A")}},
|
||||
{Proof: [][]byte{[]byte("B")}},
|
||||
}
|
||||
assert.DeepEqual(t, expected, deposits)
|
||||
assert.DeepSSZEqual(t, expected, deposits)
|
||||
|
||||
all := dc.PendingDeposits(context.Background(), nil)
|
||||
assert.Equal(t, len(dc.pendingDeposits), len(all), "PendingDeposits(ctx, nil) did not return all deposits")
|
||||
|
||||
2
beacon-chain/cache/doc.go
vendored
2
beacon-chain/cache/doc.go
vendored
@@ -1,5 +1,5 @@
|
||||
// Package cache includes all important caches for the runtime
|
||||
// of an eth2 beacon node, ensuring the node does not spend
|
||||
// of an Ethereum Beacon Node, ensuring the node does not spend
|
||||
// resources computing duplicate operations such as committee
|
||||
// calculations for validators during the same epoch, etc.
|
||||
package cache
|
||||
|
||||
9
beacon-chain/cache/error.go
vendored
Normal file
9
beacon-chain/cache/error.go
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
package cache
|
||||
|
||||
import "errors"
|
||||
|
||||
// Sync committee cache related errors
|
||||
|
||||
// ErrNonExistingSyncCommitteeKey when sync committee key (root) does not exist in cache.
|
||||
var ErrNonExistingSyncCommitteeKey = errors.New("does not exist sync committee key")
|
||||
var errNotSyncCommitteeIndexPosition = errors.New("not syncCommitteeIndexPosition struct")
|
||||
3
beacon-chain/cache/proposer_indices.go
vendored
3
beacon-chain/cache/proposer_indices.go
vendored
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
@@ -75,7 +76,7 @@ func (c *ProposerIndicesCache) HasProposerIndices(r [32]byte) (bool, error) {
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices of a block root seed.
|
||||
func (c *ProposerIndicesCache) ProposerIndices(r [32]byte) ([]uint64, error) {
|
||||
func (c *ProposerIndicesCache) ProposerIndices(r [32]byte) ([]types.ValidatorIndex, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
obj, exists, err := c.ProposerIndicesCache.GetByKey(key(r))
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
// This file is used in fuzzer builds to bypass proposer indices caches.
|
||||
package cache
|
||||
|
||||
import types "github.com/prysmaticlabs/eth2-types"
|
||||
|
||||
// FakeProposerIndicesCache is a struct with 1 queue for looking up proposer indices by root.
|
||||
type FakeProposerIndicesCache struct {
|
||||
}
|
||||
@@ -19,7 +21,7 @@ func (c *FakeProposerIndicesCache) AddProposerIndices(p *ProposerIndices) error
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices of a block root seed.
|
||||
func (c *FakeProposerIndicesCache) ProposerIndices(r [32]byte) ([]uint64, error) {
|
||||
func (c *FakeProposerIndicesCache) ProposerIndices(r [32]byte) ([]types.ValidatorIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
5
beacon-chain/cache/proposer_indices_test.go
vendored
5
beacon-chain/cache/proposer_indices_test.go
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
@@ -12,7 +13,7 @@ import (
|
||||
func TestProposerKeyFn_OK(t *testing.T) {
|
||||
item := &ProposerIndices{
|
||||
BlockRoot: [32]byte{'A'},
|
||||
ProposerIndices: []uint64{1, 2, 3, 4, 5},
|
||||
ProposerIndices: []types.ValidatorIndex{1, 2, 3, 4, 5},
|
||||
}
|
||||
|
||||
k, err := proposerIndicesKeyFn(item)
|
||||
@@ -48,7 +49,7 @@ func TestProposerCache_AddProposerIndicesList(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, has)
|
||||
|
||||
item := &ProposerIndices{BlockRoot: [32]byte{'B'}, ProposerIndices: []uint64{1, 2, 3, 4, 5, 6}}
|
||||
item := &ProposerIndices{BlockRoot: [32]byte{'B'}, ProposerIndices: []types.ValidatorIndex{1, 2, 3, 4, 5, 6}}
|
||||
require.NoError(t, cache.AddProposerIndices(item))
|
||||
|
||||
received, err = cache.ProposerIndices(item.BlockRoot)
|
||||
|
||||
8
beacon-chain/cache/proposer_indices_type.go
vendored
8
beacon-chain/cache/proposer_indices_type.go
vendored
@@ -1,6 +1,10 @@
|
||||
package cache
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"errors"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
)
|
||||
|
||||
// ErrNotProposerIndices will be returned when a cache object is not a pointer to
|
||||
// a ProposerIndices struct.
|
||||
@@ -9,5 +13,5 @@ var ErrNotProposerIndices = errors.New("object is not a proposer indices struct"
|
||||
// ProposerIndices defines the cached struct for proposer indices.
|
||||
type ProposerIndices struct {
|
||||
BlockRoot [32]byte
|
||||
ProposerIndices []uint64
|
||||
ProposerIndices []types.ValidatorIndex
|
||||
}
|
||||
|
||||
8
beacon-chain/cache/skip_slot_cache.go
vendored
8
beacon-chain/cache/skip_slot_cache.go
vendored
@@ -9,7 +9,7 @@ import (
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -57,7 +57,7 @@ func (c *SkipSlotCache) Disable() {
|
||||
|
||||
// Get waits for any in progress calculation to complete before returning a
|
||||
// cached response, if any.
|
||||
func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (*stateTrie.BeaconState, error) {
|
||||
func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "skipSlotCache.Get")
|
||||
defer span.End()
|
||||
if c.disabled {
|
||||
@@ -97,7 +97,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (*stateTrie.BeaconS
|
||||
if exists && item != nil {
|
||||
skipSlotCacheHit.Inc()
|
||||
span.AddAttributes(trace.BoolAttribute("hit", true))
|
||||
return item.(*stateTrie.BeaconState).Copy(), nil
|
||||
return item.(state.BeaconState).Copy(), nil
|
||||
}
|
||||
skipSlotCacheMiss.Inc()
|
||||
span.AddAttributes(trace.BoolAttribute("hit", false))
|
||||
@@ -136,7 +136,7 @@ func (c *SkipSlotCache) MarkNotInProgress(r [32]byte) error {
|
||||
}
|
||||
|
||||
// Put the response in the cache.
|
||||
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state *stateTrie.BeaconState) error {
|
||||
func (c *SkipSlotCache) Put(_ context.Context, r [32]byte, state state.BeaconState) error {
|
||||
if c.disabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
15
beacon-chain/cache/skip_slot_cache_test.go
vendored
15
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -5,8 +5,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
@@ -16,21 +17,21 @@ func TestSkipSlotCache_RoundTrip(t *testing.T) {
|
||||
c := cache.NewSkipSlotCache()
|
||||
|
||||
r := [32]byte{'a'}
|
||||
state, err := c.Get(ctx, r)
|
||||
s, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (*stateTrie.BeaconState)(nil), state, "Empty cache returned an object")
|
||||
assert.Equal(t, state.BeaconState(nil), s, "Empty cache returned an object")
|
||||
|
||||
require.NoError(t, c.MarkInProgress(r))
|
||||
|
||||
state, err = stateTrie.InitializeFromProto(&pb.BeaconState{
|
||||
s, err = v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 10,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, c.Put(ctx, r, state))
|
||||
require.NoError(t, c.Put(ctx, r, s))
|
||||
require.NoError(t, c.MarkNotInProgress(r))
|
||||
|
||||
res, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, res.CloneInnerState(), state.CloneInnerState(), "Expected equal protos to return from cache")
|
||||
assert.DeepEqual(t, res.CloneInnerState(), s.CloneInnerState(), "Expected equal protos to return from cache")
|
||||
}
|
||||
|
||||
32
beacon-chain/cache/subnet_ids.go
vendored
32
beacon-chain/cache/subnet_ids.go
vendored
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/patrickmn/go-cache"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
)
|
||||
@@ -25,7 +26,7 @@ var SubnetIDs = newSubnetIDs()
|
||||
func newSubnetIDs() *subnetIDs {
|
||||
// Given a node can calculate committee assignments of current epoch and next epoch.
|
||||
// Max size is set to 2 epoch length.
|
||||
cacheSize := int(params.BeaconConfig().MaxCommitteesPerSlot * params.BeaconConfig().SlotsPerEpoch * 2)
|
||||
cacheSize := int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxCommitteesPerSlot * 2))
|
||||
attesterCache, err := lru.New(cacheSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -34,14 +35,14 @@ func newSubnetIDs() *subnetIDs {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().SecondsPerSlot)
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
subLength := epochDuration * time.Duration(params.BeaconConfig().EpochsPerRandomSubnetSubscription)
|
||||
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
|
||||
return &subnetIDs{attester: attesterCache, aggregator: aggregatorCache, persistentSubnets: persistentCache}
|
||||
}
|
||||
|
||||
// AddAttesterSubnetID adds the subnet index for subscribing subnet for the attester of a given slot.
|
||||
func (s *subnetIDs) AddAttesterSubnetID(slot, subnetID uint64) {
|
||||
func (s *subnetIDs) AddAttesterSubnetID(slot types.Slot, subnetID uint64) {
|
||||
s.attesterLock.Lock()
|
||||
defer s.attesterLock.Unlock()
|
||||
|
||||
@@ -54,7 +55,7 @@ func (s *subnetIDs) AddAttesterSubnetID(slot, subnetID uint64) {
|
||||
}
|
||||
|
||||
// GetAttesterSubnetIDs gets the subnet IDs for subscribed subnets for attesters of the slot.
|
||||
func (s *subnetIDs) GetAttesterSubnetIDs(slot uint64) []uint64 {
|
||||
func (s *subnetIDs) GetAttesterSubnetIDs(slot types.Slot) []uint64 {
|
||||
s.attesterLock.RLock()
|
||||
defer s.attesterLock.RUnlock()
|
||||
|
||||
@@ -69,7 +70,7 @@ func (s *subnetIDs) GetAttesterSubnetIDs(slot uint64) []uint64 {
|
||||
}
|
||||
|
||||
// AddAggregatorSubnetID adds the subnet ID for subscribing subnet for the aggregator of a given slot.
|
||||
func (s *subnetIDs) AddAggregatorSubnetID(slot, subnetID uint64) {
|
||||
func (s *subnetIDs) AddAggregatorSubnetID(slot types.Slot, subnetID uint64) {
|
||||
s.aggregatorLock.Lock()
|
||||
defer s.aggregatorLock.Unlock()
|
||||
|
||||
@@ -82,7 +83,7 @@ func (s *subnetIDs) AddAggregatorSubnetID(slot, subnetID uint64) {
|
||||
}
|
||||
|
||||
// GetAggregatorSubnetIDs gets the subnet IDs for subscribing subnet for aggregator of the slot.
|
||||
func (s *subnetIDs) GetAggregatorSubnetIDs(slot uint64) []uint64 {
|
||||
func (s *subnetIDs) GetAggregatorSubnetIDs(slot types.Slot) []uint64 {
|
||||
s.aggregatorLock.RLock()
|
||||
defer s.aggregatorLock.RUnlock()
|
||||
|
||||
@@ -132,3 +133,22 @@ func (s *subnetIDs) AddPersistentCommittee(pubkey []byte, comIndex []uint64, dur
|
||||
|
||||
s.persistentSubnets.Set(string(pubkey), comIndex, duration)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
// entries on them. This should only ever be used for testing, in normal
|
||||
// production, handling of the relevant subnets for each role is done
|
||||
// separately.
|
||||
func (s *subnetIDs) EmptyAllCaches() {
|
||||
// Clear the caches.
|
||||
s.attesterLock.Lock()
|
||||
s.attester.Purge()
|
||||
s.attesterLock.Unlock()
|
||||
|
||||
s.aggregatorLock.Lock()
|
||||
s.aggregator.Purge()
|
||||
s.aggregatorLock.Unlock()
|
||||
|
||||
s.subnetsLock.Lock()
|
||||
s.persistentSubnets.Flush()
|
||||
s.subnetsLock.Unlock()
|
||||
}
|
||||
|
||||
3
beacon-chain/cache/subnet_ids_test.go
vendored
3
beacon-chain/cache/subnet_ids_test.go
vendored
@@ -3,13 +3,14 @@ package cache
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestSubnetIDsCache_RoundTrip(t *testing.T) {
|
||||
c := newSubnetIDs()
|
||||
slot := uint64(100)
|
||||
slot := types.Slot(100)
|
||||
committeeIDs := c.GetAggregatorSubnetIDs(slot)
|
||||
assert.Equal(t, 0, len(committeeIDs), "Empty cache returned an object")
|
||||
|
||||
|
||||
187
beacon-chain/cache/sync_committee.go
vendored
Normal file
187
beacon-chain/cache/sync_committee.go
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
// +build !libfuzzer
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
maxSyncCommitteeSize = uint64(3) // Allows 3 forks to happen around `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` boundary.
|
||||
|
||||
// SyncCommitteeCacheMiss tracks the number of committee requests that aren't present in the cache.
|
||||
SyncCommitteeCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sync_committee_index_cache_miss",
|
||||
Help: "The number of committee requests that aren't present in the sync committee index cache.",
|
||||
})
|
||||
// SyncCommitteeCacheHit tracks the number of committee requests that are in the cache.
|
||||
SyncCommitteeCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sync_committee_index_cache_hit",
|
||||
Help: "The number of committee requests that are present in the sync committee index cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// SyncCommitteeCache utilizes a FIFO cache to sufficiently cache validator position within sync committee.
|
||||
// It is thread safe with concurrent read write.
|
||||
type SyncCommitteeCache struct {
|
||||
cache *cache.FIFO
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// Index position of all validators in sync committee where `currentSyncCommitteeRoot` is the
|
||||
// key and `vIndexToPositionMap` is value. Inside `vIndexToPositionMap`, validator positions
|
||||
// are cached where key is the validator index and the value is the `positionInCommittee` struct.
|
||||
type syncCommitteeIndexPosition struct {
|
||||
currentSyncCommitteeRoot [32]byte
|
||||
vIndexToPositionMap map[types.ValidatorIndex]*positionInCommittee
|
||||
}
|
||||
|
||||
// Index position of individual validator of current period and next period sync committee.
|
||||
type positionInCommittee struct {
|
||||
currentPeriod []types.CommitteeIndex
|
||||
nextPeriod []types.CommitteeIndex
|
||||
}
|
||||
|
||||
// NewSyncCommittee initializes and returns a new SyncCommitteeCache.
|
||||
func NewSyncCommittee() *SyncCommitteeCache {
|
||||
return &SyncCommitteeCache{
|
||||
cache: cache.NewFIFO(keyFn),
|
||||
}
|
||||
}
|
||||
|
||||
// CurrentPeriodIndexPosition returns current period index position of a validator index with respect with
|
||||
// sync committee. If the input validator index has no assignment, an empty list will be returned.
|
||||
// If the input root does not exist in cache, ErrNonExistingSyncCommitteeKey is returned.
|
||||
// Then performing manual checking of state for index position in state is recommended.
|
||||
func (s *SyncCommitteeCache) CurrentPeriodIndexPosition(root [32]byte, valIdx types.ValidatorIndex) ([]types.CommitteeIndex, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
pos, err := s.idxPositionInCommittee(root, valIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pos == nil {
|
||||
return []types.CommitteeIndex{}, nil
|
||||
}
|
||||
|
||||
return pos.currentPeriod, nil
|
||||
}
|
||||
|
||||
// NextPeriodIndexPosition returns next period index position of a validator index in respect with sync committee.
|
||||
// If the input validator index has no assignment, an empty list will be returned.
|
||||
// If the input root does not exist in cache, ErrNonExistingSyncCommitteeKey is returned.
|
||||
// Then performing manual checking of state for index position in state is recommended.
|
||||
func (s *SyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx types.ValidatorIndex) ([]types.CommitteeIndex, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
pos, err := s.idxPositionInCommittee(root, valIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pos == nil {
|
||||
return []types.CommitteeIndex{}, nil
|
||||
}
|
||||
return pos.nextPeriod, nil
|
||||
}
|
||||
|
||||
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
|
||||
// of validator index to its index(s) position in the sync committee.
|
||||
func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
root [32]byte, valIdx types.ValidatorIndex,
|
||||
) (*positionInCommittee, error) {
|
||||
obj, exists, err := s.cache.GetByKey(key(root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
SyncCommitteeCacheMiss.Inc()
|
||||
return nil, ErrNonExistingSyncCommitteeKey
|
||||
}
|
||||
item, ok := obj.(*syncCommitteeIndexPosition)
|
||||
if !ok {
|
||||
return nil, errNotSyncCommitteeIndexPosition
|
||||
}
|
||||
idxInCommittee, ok := item.vIndexToPositionMap[valIdx]
|
||||
if !ok {
|
||||
SyncCommitteeCacheMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
SyncCommitteeCacheHit.Inc()
|
||||
return idxInCommittee, nil
|
||||
}
|
||||
|
||||
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to
|
||||
// current epoch and next epoch. This should be called when `current_sync_committee` and `next_sync_committee`
|
||||
// change and that happens every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`.
|
||||
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.BeaconStateAltair) error {
|
||||
csc, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
positionsMap := make(map[types.ValidatorIndex]*positionInCommittee)
|
||||
for i, pubkey := range csc.Pubkeys {
|
||||
p := bytesutil.ToBytes48(pubkey)
|
||||
validatorIndex, ok := st.ValidatorIndexByPubkey(p)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := positionsMap[validatorIndex]; !ok {
|
||||
m := &positionInCommittee{currentPeriod: []types.CommitteeIndex{types.CommitteeIndex(i)}, nextPeriod: []types.CommitteeIndex{}}
|
||||
positionsMap[validatorIndex] = m
|
||||
} else {
|
||||
positionsMap[validatorIndex].currentPeriod = append(positionsMap[validatorIndex].currentPeriod, types.CommitteeIndex(i))
|
||||
}
|
||||
}
|
||||
|
||||
nsc, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i, pubkey := range nsc.Pubkeys {
|
||||
p := bytesutil.ToBytes48(pubkey)
|
||||
validatorIndex, ok := st.ValidatorIndexByPubkey(p)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := positionsMap[validatorIndex]; !ok {
|
||||
m := &positionInCommittee{nextPeriod: []types.CommitteeIndex{types.CommitteeIndex(i)}, currentPeriod: []types.CommitteeIndex{}}
|
||||
positionsMap[validatorIndex] = m
|
||||
} else {
|
||||
positionsMap[validatorIndex].nextPeriod = append(positionsMap[validatorIndex].nextPeriod, types.CommitteeIndex(i))
|
||||
}
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if err := s.cache.Add(&syncCommitteeIndexPosition{
|
||||
currentSyncCommitteeRoot: syncCommitteeBoundaryRoot,
|
||||
vIndexToPositionMap: positionsMap,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
trim(s.cache, maxSyncCommitteeSize)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Given the `syncCommitteeIndexPosition` object, this returns the key of the object.
|
||||
// The key is the `currentSyncCommitteeRoot` within the field.
|
||||
// Error gets returned if input does not comply with `currentSyncCommitteeRoot` object.
|
||||
func keyFn(obj interface{}) (string, error) {
|
||||
info, ok := obj.(*syncCommitteeIndexPosition)
|
||||
if !ok {
|
||||
return "", errNotSyncCommitteeIndexPosition
|
||||
}
|
||||
|
||||
return string(info.currentSyncCommitteeRoot[:]), nil
|
||||
}
|
||||
32
beacon-chain/cache/sync_committee_disabled.go
vendored
Normal file
32
beacon-chain/cache/sync_committee_disabled.go
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// +build libfuzzer
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
)
|
||||
|
||||
// FakeSyncCommitteeCache is a fake `SyncCommitteeCache` to satisfy fuzzing.
|
||||
type FakeSyncCommitteeCache struct {
|
||||
}
|
||||
|
||||
// NewSyncCommittee initializes and returns a new SyncCommitteeCache.
|
||||
func NewSyncCommittee() *FakeSyncCommitteeCache {
|
||||
return &FakeSyncCommitteeCache{}
|
||||
}
|
||||
|
||||
// CurrentEpochIndexPosition -- fake.
|
||||
func (s *FakeSyncCommitteeCache) CurrentPeriodIndexPosition(root [32]byte, valIdx types.ValidatorIndex) ([]types.CommitteeIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NextEpochIndexPosition -- fake.
|
||||
func (s *FakeSyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx types.ValidatorIndex) ([]types.CommitteeIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// UpdatePositionsInCommittee -- fake.
|
||||
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.BeaconStateAltair) error {
|
||||
return nil
|
||||
}
|
||||
222
beacon-chain/cache/sync_committee_test.go
vendored
Normal file
222
beacon-chain/cache/sync_committee_test.go
vendored
Normal file
@@ -0,0 +1,222 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) {
|
||||
numValidators := 101
|
||||
deterministicState, _ := testutil.DeterministicGenesisStateAltair(t, uint64(numValidators))
|
||||
pubKeys := make([][]byte, deterministicState.NumValidators())
|
||||
for i, val := range deterministicState.Validators() {
|
||||
pubKeys[i] = val.PublicKey
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
currentSyncCommittee *ethpb.SyncCommittee
|
||||
nextSyncCommittee *ethpb.SyncCommittee
|
||||
currentSyncMap map[types.ValidatorIndex][]types.CommitteeIndex
|
||||
nextSyncMap map[types.ValidatorIndex][]types.CommitteeIndex
|
||||
}{
|
||||
{
|
||||
name: "only current epoch",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{}),
|
||||
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {0},
|
||||
2: {1, 3, 4},
|
||||
3: {2},
|
||||
},
|
||||
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {},
|
||||
2: {},
|
||||
3: {},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only next epoch",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2],
|
||||
}),
|
||||
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {},
|
||||
2: {},
|
||||
3: {},
|
||||
},
|
||||
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {0},
|
||||
2: {1, 3, 4},
|
||||
3: {2},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "some current epoch and some next epoch",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
pubKeys[1],
|
||||
pubKeys[2],
|
||||
pubKeys[3],
|
||||
pubKeys[2],
|
||||
pubKeys[2],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
pubKeys[7],
|
||||
pubKeys[6],
|
||||
pubKeys[5],
|
||||
pubKeys[4],
|
||||
pubKeys[7],
|
||||
}),
|
||||
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {0},
|
||||
2: {1, 3, 4},
|
||||
3: {2},
|
||||
},
|
||||
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
7: {0, 4},
|
||||
6: {1},
|
||||
5: {2},
|
||||
4: {3},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "some current epoch and some next epoch duplicated across",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
pubKeys[1],
|
||||
pubKeys[2],
|
||||
pubKeys[3],
|
||||
pubKeys[2],
|
||||
pubKeys[2],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
pubKeys[2],
|
||||
pubKeys[1],
|
||||
pubKeys[3],
|
||||
pubKeys[2],
|
||||
pubKeys[1],
|
||||
}),
|
||||
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {0},
|
||||
2: {1, 3, 4},
|
||||
3: {2},
|
||||
},
|
||||
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {1, 4},
|
||||
2: {0, 3},
|
||||
3: {2},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all duplicated",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
}),
|
||||
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
100: {0, 1, 2, 3},
|
||||
},
|
||||
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
100: {0, 1, 2, 3},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown keys",
|
||||
currentSyncCommittee: convertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
}),
|
||||
nextSyncCommittee: convertToCommittee([][]byte{
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
pubKeys[100],
|
||||
}),
|
||||
currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {},
|
||||
},
|
||||
nextSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{
|
||||
1: {},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, uint64(numValidators))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(tt.currentSyncCommittee))
|
||||
require.NoError(t, s.SetNextSyncCommittee(tt.nextSyncCommittee))
|
||||
cache := cache.NewSyncCommittee()
|
||||
r := [32]byte{'a'}
|
||||
require.NoError(t, cache.UpdatePositionsInCommittee(r, s))
|
||||
for key, indices := range tt.currentSyncMap {
|
||||
pos, err := cache.CurrentPeriodIndexPosition(r, key)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, indices, pos)
|
||||
}
|
||||
for key, indices := range tt.nextSyncMap {
|
||||
pos, err := cache.NextPeriodIndexPosition(r, key)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, indices, pos)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncCommitteeCache_RootDoesNotExist(t *testing.T) {
|
||||
c := cache.NewSyncCommittee()
|
||||
_, err := c.CurrentPeriodIndexPosition([32]byte{}, 0)
|
||||
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
|
||||
}
|
||||
|
||||
func TestSyncCommitteeCache_CanRotate(t *testing.T) {
|
||||
c := cache.NewSyncCommittee()
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{1}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'a'}, s))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{2}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'b'}, s))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{3}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'c'}, s))
|
||||
require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{4}})))
|
||||
require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'d'}, s))
|
||||
|
||||
_, err := c.CurrentPeriodIndexPosition([32]byte{'a'}, 0)
|
||||
require.Equal(t, cache.ErrNonExistingSyncCommitteeKey, err)
|
||||
|
||||
_, err = c.CurrentPeriodIndexPosition([32]byte{'c'}, 0)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func convertToCommittee(inputKeys [][]byte) *ethpb.SyncCommittee {
|
||||
var pubKeys [][]byte
|
||||
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ {
|
||||
if i < uint64(len(inputKeys)) {
|
||||
pubKeys = append(pubKeys, bytesutil.PadTo(inputKeys[i], params.BeaconConfig().BLSPubkeyLength))
|
||||
} else {
|
||||
pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength))
|
||||
}
|
||||
}
|
||||
|
||||
return ðpb.SyncCommittee{
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
}
|
||||
120
beacon-chain/cache/sync_subnet_ids.go
vendored
Normal file
120
beacon-chain/cache/sync_subnet_ids.go
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/rand"
|
||||
"github.com/prysmaticlabs/prysm/shared/sliceutil"
|
||||
)
|
||||
|
||||
type syncSubnetIDs struct {
|
||||
sCommittee *cache.Cache
|
||||
sCommiteeLock sync.RWMutex
|
||||
}
|
||||
|
||||
// SyncSubnetIDs for sync committee participant.
|
||||
var SyncSubnetIDs = newSyncSubnetIDs()
|
||||
|
||||
func newSyncSubnetIDs() *syncSubnetIDs {
|
||||
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
|
||||
subLength := epochDuration * time.Duration(params.BeaconConfig().EpochsPerSyncCommitteePeriod)
|
||||
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
|
||||
return &syncSubnetIDs{sCommittee: persistentCache}
|
||||
}
|
||||
|
||||
// GetSyncCommitteeSubnets retrieves the sync committee subnet and expiration time of that validator's
|
||||
// subscription.
|
||||
func (s *syncSubnetIDs) GetSyncCommitteeSubnets(pubkey []byte, epoch types.Epoch) ([]uint64, types.Epoch, bool, time.Time) {
|
||||
s.sCommiteeLock.RLock()
|
||||
defer s.sCommiteeLock.RUnlock()
|
||||
|
||||
id, duration, ok := s.sCommittee.GetWithExpiration(keyBuilder(pubkey, epoch))
|
||||
if !ok {
|
||||
return []uint64{}, 0, ok, time.Time{}
|
||||
}
|
||||
// Retrieve the slot from the cache.
|
||||
idxs, ok := id.([]uint64)
|
||||
if !ok {
|
||||
return []uint64{}, 0, ok, time.Time{}
|
||||
}
|
||||
// If no committees are saved, we return
|
||||
// nothing.
|
||||
if len(idxs) <= 1 {
|
||||
return []uint64{}, 0, ok, time.Time{}
|
||||
}
|
||||
return idxs[1:], types.Epoch(idxs[0]), ok, duration
|
||||
}
|
||||
|
||||
// GetAllSubnets retrieves all the non-expired subscribed subnets of all the validators
|
||||
// in the cache. This method also takes the epoch as an argument to only retrieve
|
||||
// assingments for epochs that have happened.
|
||||
func (s *syncSubnetIDs) GetAllSubnets(currEpoch types.Epoch) []uint64 {
|
||||
s.sCommiteeLock.RLock()
|
||||
defer s.sCommiteeLock.RUnlock()
|
||||
|
||||
itemsMap := s.sCommittee.Items()
|
||||
var committees []uint64
|
||||
|
||||
for _, v := range itemsMap {
|
||||
if v.Expired() {
|
||||
continue
|
||||
}
|
||||
idxs, ok := v.Object.([]uint64)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if len(idxs) <= 1 {
|
||||
continue
|
||||
}
|
||||
// Check if the subnet is valid in the current epoch.
|
||||
if types.Epoch(idxs[0]) > currEpoch {
|
||||
continue
|
||||
}
|
||||
// Ignore the first index as that represents the
|
||||
// epoch of the validator's assignments.
|
||||
committees = append(committees, idxs[1:]...)
|
||||
}
|
||||
return sliceutil.SetUint64(committees)
|
||||
}
|
||||
|
||||
// AddSyncCommitteeSubnets adds the relevant committee for that particular validator along with its
|
||||
// expiration period. An Epoch argument here denotes the epoch from which the sync committee subnets
|
||||
// will be active.
|
||||
func (s *syncSubnetIDs) AddSyncCommitteeSubnets(pubkey []byte, epoch types.Epoch, comIndex []uint64, duration time.Duration) {
|
||||
s.sCommiteeLock.Lock()
|
||||
defer s.sCommiteeLock.Unlock()
|
||||
subComCount := params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
// To join a sync committee subnet, select a random number of epochs before the end of the
|
||||
// current sync committee period between 1 and SYNC_COMMITTEE_SUBNET_COUNT, inclusive.
|
||||
diff := (rand.NewGenerator().Uint64() % subComCount) + 1
|
||||
joinEpoch, err := epoch.SafeSub(diff)
|
||||
if err != nil {
|
||||
// If we overflow here, we simply set the value to join
|
||||
// at 0.
|
||||
joinEpoch = 0
|
||||
}
|
||||
// Append the epoch of the subnet into the first index here.
|
||||
s.sCommittee.Set(keyBuilder(pubkey, epoch), append([]uint64{uint64(joinEpoch)}, comIndex...), duration)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
// entries on them. This should only ever be used for testing, in normal
|
||||
// production, handling of the relevant subnets for each role is done
|
||||
// separately.
|
||||
func (s *syncSubnetIDs) EmptyAllCaches() {
|
||||
// Clear the cache.
|
||||
|
||||
s.sCommiteeLock.Lock()
|
||||
s.sCommittee.Flush()
|
||||
s.sCommiteeLock.Unlock()
|
||||
}
|
||||
|
||||
func keyBuilder(pubkey []byte, epoch types.Epoch) string {
|
||||
epochBytes := bytesutil.Bytes8(uint64(epoch))
|
||||
return string(append(pubkey, epochBytes...))
|
||||
}
|
||||
57
beacon-chain/cache/sync_subnet_ids_test.go
vendored
Normal file
57
beacon-chain/cache/sync_subnet_ids_test.go
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
|
||||
c := newSyncSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
pubkey := [48]byte{byte(i)}
|
||||
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
|
||||
}
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
pubkey := [48]byte{byte(i)}
|
||||
|
||||
idxs, _, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
|
||||
if !ok {
|
||||
t.Errorf("Couldn't find entry in cache for pubkey %#x", pubkey)
|
||||
continue
|
||||
}
|
||||
require.Equal(t, i, idxs[0])
|
||||
}
|
||||
coms := c.GetAllSubnets(100)
|
||||
assert.Equal(t, 20, len(coms))
|
||||
}
|
||||
|
||||
func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
|
||||
c := newSyncSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
pubkey := [48]byte{byte(i)}
|
||||
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
|
||||
}
|
||||
|
||||
coms := c.GetAllSubnets(50)
|
||||
assert.Equal(t, 0, len(coms))
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
pubkey := [48]byte{byte(i)}
|
||||
|
||||
_, jEpoch, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
|
||||
if !ok {
|
||||
t.Errorf("Couldn't find entry in cache for pubkey %#x", pubkey)
|
||||
continue
|
||||
}
|
||||
require.Equal(t, true, uint64(jEpoch) >= 100-params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
}
|
||||
|
||||
coms = c.GetAllSubnets(99)
|
||||
assert.Equal(t, 20, len(coms))
|
||||
}
|
||||
85
beacon-chain/core/altair/BUILD.bazel
Normal file
85
beacon-chain/core/altair/BUILD.bazel
Normal file
@@ -0,0 +1,85 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attestation.go",
|
||||
"block.go",
|
||||
"deposit.go",
|
||||
"epoch_precompute.go",
|
||||
"epoch_spec.go",
|
||||
"fork_transition.go",
|
||||
"reward.go",
|
||||
"sync_committee.go",
|
||||
"transition.go",
|
||||
"validator.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/core/altair",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//shared/testutil:__pkg__",
|
||||
"//spectest:__subpackages__",
|
||||
"//validator/client:__pkg__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/hashutil:go_default_library",
|
||||
"//shared/mathutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"attestation_test.go",
|
||||
"block_test.go",
|
||||
"deposit_fuzz_test.go",
|
||||
"deposit_test.go",
|
||||
"epoch_precompute_test.go",
|
||||
"epoch_spec_test.go",
|
||||
"fork_transition_test.go",
|
||||
"reward_test.go",
|
||||
"sync_committee_test.go",
|
||||
"transition_test.go",
|
||||
"validator_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//shared/attestationutil:go_default_library",
|
||||
"//shared/bls:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
"//shared/testutil/require:go_default_library",
|
||||
"//shared/timeutils:go_default_library",
|
||||
"//shared/trieutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
284
beacon-chain/core/altair/attestation.go
Normal file
284
beacon-chain/core/altair/attestation.go
Normal file
@@ -0,0 +1,284 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/mathutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
type matchingTarget bool
|
||||
type matchingSource bool
|
||||
type matchingHead bool
|
||||
|
||||
// ProcessAttestations applies processing operations to a block's inner attestation
|
||||
// records.
|
||||
func ProcessAttestations(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
b block.SignedBeaconBlock,
|
||||
) (state.BeaconState, error) {
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
for idx, attestation := range b.Block().Body().Attestations() {
|
||||
beaconState, err = ProcessAttestation(ctx, beaconState, attestation)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify attestation at index %d in block", idx)
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessAttestation verifies an input attestation can pass through processing using the given beacon state.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
// data = attestation.data
|
||||
// assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
// assert data.target.epoch == compute_epoch_at_slot(data.slot)
|
||||
// assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
|
||||
// assert data.index < get_committee_count_per_slot(state, data.target.epoch)
|
||||
//
|
||||
// committee = get_beacon_committee(state, data.slot, data.index)
|
||||
// assert len(attestation.aggregation_bits) == len(committee)
|
||||
//
|
||||
// # Participation flag indices
|
||||
// participation_flag_indices = get_attestation_participation_flag_indices(state, data, state.slot - data.slot)
|
||||
//
|
||||
// # Verify signature
|
||||
// assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
//
|
||||
// # Update epoch participation flags
|
||||
// if data.target.epoch == get_current_epoch(state):
|
||||
// epoch_participation = state.current_epoch_participation
|
||||
// else:
|
||||
// epoch_participation = state.previous_epoch_participation
|
||||
//
|
||||
// proposer_reward_numerator = 0
|
||||
// for index in get_attesting_indices(state, data, attestation.aggregation_bits):
|
||||
// for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
|
||||
// if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
||||
// epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||
// proposer_reward_numerator += get_base_reward(state, index) * weight
|
||||
//
|
||||
// # Reward proposer
|
||||
// proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
|
||||
// proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator)
|
||||
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
func ProcessAttestation(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconStateAltair,
|
||||
att *ethpb.Attestation,
|
||||
) (state.BeaconStateAltair, error) {
|
||||
beaconState, err := ProcessAttestationNoVerifySignature(ctx, beaconState, att)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, blocks.VerifyAttestationSignature(ctx, beaconState, att)
|
||||
}
|
||||
|
||||
// ProcessAttestationsNoVerifySignature applies processing operations to a block's inner attestation
|
||||
// records. The only difference would be that the attestation signature would not be verified.
|
||||
func ProcessAttestationsNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
b block.SignedBeaconBlock,
|
||||
) (state.BeaconState, error) {
|
||||
if err := helpers.VerifyNilBeaconBlock(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body := b.Block().Body()
|
||||
var err error
|
||||
for idx, attestation := range body.Attestations() {
|
||||
beaconState, err = ProcessAttestationNoVerifySignature(ctx, beaconState, attestation)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify attestation at index %d in block", idx)
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessAttestationNoVerifySignature processes the attestation without verifying the attestation signature. This
|
||||
// method is used to validate attestations whose signatures have already been verified or will be verified later.
|
||||
func ProcessAttestationNoVerifySignature(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconStateAltair,
|
||||
att *ethpb.Attestation,
|
||||
) (state.BeaconStateAltair, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessAttestationNoVerifySignature")
|
||||
defer span.End()
|
||||
|
||||
if err := blocks.VerifyAttestationNoVerifySignature(ctx, beaconState, att); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
delay, err := beaconState.Slot().SafeSubSlot(att.Data.Slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
participatedFlags, err := attestationParticipationFlagIndices(
|
||||
beaconState,
|
||||
att.Data,
|
||||
delay)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var epochParticipation []byte
|
||||
currentEpoch := helpers.CurrentEpoch(beaconState)
|
||||
targetEpoch := att.Data.Target.Epoch
|
||||
if targetEpoch == currentEpoch {
|
||||
epochParticipation, err = beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
epochParticipation, err = beaconState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
sourceFlagIndex := params.BeaconConfig().TimelySourceFlagIndex
|
||||
targetFlagIndex := params.BeaconConfig().TimelyTargetFlagIndex
|
||||
headFlagIndex := params.BeaconConfig().TimelyHeadFlagIndex
|
||||
proposerRewardNumerator := uint64(0)
|
||||
totalBalance, err := helpers.TotalActiveBalance(beaconState)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not calculate active balance")
|
||||
}
|
||||
for _, index := range indices {
|
||||
br, err := BaseRewardWithTotalBalance(beaconState, types.ValidatorIndex(index), totalBalance)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if participatedFlags[sourceFlagIndex] && !HasValidatorFlag(epochParticipation[index], sourceFlagIndex) {
|
||||
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], sourceFlagIndex)
|
||||
proposerRewardNumerator += br * params.BeaconConfig().TimelySourceWeight
|
||||
}
|
||||
if participatedFlags[targetFlagIndex] && !HasValidatorFlag(epochParticipation[index], targetFlagIndex) {
|
||||
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], targetFlagIndex)
|
||||
proposerRewardNumerator += br * params.BeaconConfig().TimelyTargetWeight
|
||||
}
|
||||
if participatedFlags[headFlagIndex] && !HasValidatorFlag(epochParticipation[index], headFlagIndex) {
|
||||
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], headFlagIndex)
|
||||
proposerRewardNumerator += br * params.BeaconConfig().TimelyHeadWeight
|
||||
}
|
||||
}
|
||||
|
||||
if targetEpoch == currentEpoch {
|
||||
if err := beaconState.SetCurrentParticipationBits(epochParticipation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if err := beaconState.SetPreviousParticipationBits(epochParticipation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Reward proposer.
|
||||
if err := rewardProposer(beaconState, proposerRewardNumerator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// This returns the matching statues for attestation data's source target and head.
|
||||
func matchingStatus(beaconState state.BeaconState, data *ethpb.AttestationData, cp *ethpb.Checkpoint) (s matchingSource, t matchingTarget, h matchingHead, err error) {
|
||||
s = matchingSource(attestationutil.CheckPointIsEqual(data.Source, cp))
|
||||
|
||||
r, err := helpers.BlockRoot(beaconState, data.Target.Epoch)
|
||||
if err != nil {
|
||||
return false, false, false, err
|
||||
}
|
||||
t = matchingTarget(bytes.Equal(r, data.Target.Root))
|
||||
|
||||
r, err = helpers.BlockRootAtSlot(beaconState, data.Slot)
|
||||
if err != nil {
|
||||
return false, false, false, err
|
||||
}
|
||||
h = matchingHead(bytes.Equal(r, data.BeaconBlockRoot))
|
||||
return
|
||||
}
|
||||
|
||||
// This rewards proposer by increasing proposer's balance with input reward numerator and calculated reward denominator.
|
||||
func rewardProposer(beaconState state.BeaconState, proposerRewardNumerator uint64) error {
|
||||
proposerRewardDenominator := (params.BeaconConfig().WeightDenominator - params.BeaconConfig().ProposerWeight) * params.BeaconConfig().WeightDenominator / params.BeaconConfig().ProposerWeight
|
||||
proposerReward := proposerRewardNumerator / proposerRewardDenominator
|
||||
i, err := helpers.BeaconProposerIndex(beaconState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return helpers.IncreaseBalance(beaconState, i, proposerReward)
|
||||
}
|
||||
|
||||
// HasValidatorFlag returns true if the flag at position has set.
|
||||
func HasValidatorFlag(flag, flagPosition uint8) bool {
|
||||
return ((flag >> flagPosition) & 1) == 1
|
||||
}
|
||||
|
||||
// AddValidatorFlag adds new validator flag to existing one.
|
||||
func AddValidatorFlag(flag, flagPosition uint8) uint8 {
|
||||
return flag | (1 << flagPosition)
|
||||
}
|
||||
|
||||
// This retrieves a map of attestation scoring based on Altair's participation flag indices.
|
||||
// This is used to facilitate process attestation during state transition.
|
||||
func attestationParticipationFlagIndices(beaconState state.BeaconStateAltair, data *ethpb.AttestationData, delay types.Slot) (map[uint8]bool, error) {
|
||||
currEpoch := helpers.CurrentEpoch(beaconState)
|
||||
var justifiedCheckpt *ethpb.Checkpoint
|
||||
if data.Target.Epoch == currEpoch {
|
||||
justifiedCheckpt = beaconState.CurrentJustifiedCheckpoint()
|
||||
} else {
|
||||
justifiedCheckpt = beaconState.PreviousJustifiedCheckpoint()
|
||||
}
|
||||
|
||||
// Get matching participation flags.
|
||||
matchingSource, matchingTarget, matchingHead, err := matchingStatus(beaconState, data, justifiedCheckpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matchingSource {
|
||||
return nil, errors.New("source epoch does not match")
|
||||
}
|
||||
|
||||
// Process matched participation flags.
|
||||
participatedFlags := make(map[uint8]bool)
|
||||
sourceFlagIndex := params.BeaconConfig().TimelySourceFlagIndex
|
||||
targetFlagIndex := params.BeaconConfig().TimelyTargetFlagIndex
|
||||
headFlagIndex := params.BeaconConfig().TimelyHeadFlagIndex
|
||||
if matchingSource && delay <= types.Slot(mathutil.IntegerSquareRoot(uint64(params.BeaconConfig().SlotsPerEpoch))) {
|
||||
participatedFlags[sourceFlagIndex] = true
|
||||
}
|
||||
if matchingTarget && delay <= params.BeaconConfig().SlotsPerEpoch {
|
||||
participatedFlags[targetFlagIndex] = true
|
||||
}
|
||||
matchingHeadTarget := bool(matchingHead) && bool(matchingTarget)
|
||||
if matchingHeadTarget && delay == params.BeaconConfig().MinAttestationInclusionDelay {
|
||||
participatedFlags[headFlagIndex] = true
|
||||
}
|
||||
return participatedFlags, nil
|
||||
}
|
||||
344
beacon-chain/core/altair/attestation_test.go
Normal file
344
beacon-chain/core/altair/attestation_test.go
Normal file
@@ -0,0 +1,344 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
altair "github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestProcessAttestations_InclusionDelayFailure(t *testing.T) {
|
||||
attestations := []*ethpb.Attestation{
|
||||
testutil.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Slot: 5,
|
||||
},
|
||||
}),
|
||||
}
|
||||
b := testutil.NewBeaconBlockAltair()
|
||||
b.Block = ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
Attestations: attestations,
|
||||
},
|
||||
}
|
||||
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
|
||||
|
||||
want := fmt.Sprintf(
|
||||
"attestation slot %d + inclusion delay %d > state slot %d",
|
||||
attestations[0].Data.Slot,
|
||||
params.BeaconConfig().MinAttestationInclusionDelay,
|
||||
beaconState.Slot(),
|
||||
)
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestProcessAttestations_NeitherCurrentNorPrevEpoch(t *testing.T) {
|
||||
att := testutil.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
|
||||
Target: ðpb.Checkpoint{Epoch: 0}}})
|
||||
|
||||
b := testutil.NewBeaconBlockAltair()
|
||||
b.Block = ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
Attestations: []*ethpb.Attestation{att},
|
||||
},
|
||||
}
|
||||
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
|
||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().SlotsPerEpoch*4 + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
pfc := beaconState.PreviousJustifiedCheckpoint()
|
||||
pfc.Root = []byte("hello-world")
|
||||
require.NoError(t, beaconState.SetPreviousJustifiedCheckpoint(pfc))
|
||||
|
||||
want := fmt.Sprintf(
|
||||
"expected target epoch (%d) to be the previous epoch (%d) or the current epoch (%d)",
|
||||
att.Data.Target.Epoch,
|
||||
helpers.PrevEpoch(beaconState),
|
||||
helpers.CurrentEpoch(beaconState),
|
||||
)
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestProcessAttestations_CurrentEpochFFGDataMismatches(t *testing.T) {
|
||||
attestations := []*ethpb.Attestation{
|
||||
{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0x09},
|
||||
},
|
||||
}
|
||||
b := testutil.NewBeaconBlockAltair()
|
||||
b.Block = ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
Attestations: attestations,
|
||||
},
|
||||
}
|
||||
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
|
||||
require.NoError(t, beaconState.SetSlot(beaconState.Slot()+params.BeaconConfig().MinAttestationInclusionDelay))
|
||||
cfc := beaconState.CurrentJustifiedCheckpoint()
|
||||
cfc.Root = []byte("hello-world")
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
|
||||
want := "source check point not equal to current justified checkpoint"
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, want, err)
|
||||
b.Block.Body.Attestations[0].Data.Source.Epoch = helpers.CurrentEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
|
||||
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestProcessAttestations_PrevEpochFFGDataMismatches(t *testing.T) {
|
||||
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
|
||||
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
attestations := []*ethpb.Attestation{
|
||||
{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 1, Root: make([]byte, 32)},
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
},
|
||||
}
|
||||
b := testutil.NewBeaconBlockAltair()
|
||||
b.Block = ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
Attestations: attestations,
|
||||
},
|
||||
}
|
||||
|
||||
err := beaconState.SetSlot(beaconState.Slot() + 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
pfc := beaconState.PreviousJustifiedCheckpoint()
|
||||
pfc.Root = []byte("hello-world")
|
||||
require.NoError(t, beaconState.SetPreviousJustifiedCheckpoint(pfc))
|
||||
|
||||
want := "source check point not equal to previous justified checkpoint"
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, want, err)
|
||||
b.Block.Body.Attestations[0].Data.Source.Epoch = helpers.PrevEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Target.Epoch = helpers.PrevEpoch(beaconState)
|
||||
b.Block.Body.Attestations[0].Data.Source.Root = []byte{}
|
||||
wsb, err = wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
|
||||
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 100)
|
||||
|
||||
aggBits := bitfield.NewBitlist(4)
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: []byte("hello-world")},
|
||||
Target: ðpb.Checkpoint{Epoch: 0}},
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
|
||||
b := testutil.NewBeaconBlockAltair()
|
||||
b.Block = ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{
|
||||
Attestations: []*ethpb.Attestation{att},
|
||||
},
|
||||
}
|
||||
|
||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfc := beaconState.CurrentJustifiedCheckpoint()
|
||||
cfc.Root = []byte("hello-world")
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
|
||||
expected := "failed to verify aggregation bitfield: wanted participants bitfield length 3, got: 4"
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
|
||||
require.ErrorContains(t, expected, err)
|
||||
}
|
||||
|
||||
func TestProcessAttestations_OK(t *testing.T) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, 100)
|
||||
|
||||
aggBits := bitfield.NewBitlist(3)
|
||||
aggBits.SetBitAt(0, true)
|
||||
var mockRoot [32]byte
|
||||
copy(mockRoot[:], "hello-world")
|
||||
att := testutil.HydrateAttestation(ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
Target: ðpb.Checkpoint{Root: mockRoot[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
})
|
||||
|
||||
cfc := beaconState.CurrentJustifiedCheckpoint()
|
||||
cfc.Root = mockRoot[:]
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
attestingIndices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(attestingIndices))
|
||||
for i, indice := range attestingIndices {
|
||||
sb, err := helpers.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
att.Signature = bls.AggregateSignatures(sigs).Marshal()
|
||||
|
||||
block := testutil.NewBeaconBlockAltair()
|
||||
block.Block.Body.Attestations = []*ethpb.Attestation{att}
|
||||
|
||||
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
_, err = altair.ProcessAttestations(context.Background(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
|
||||
beaconState, _ := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
|
||||
require.NoError(t, err)
|
||||
|
||||
aggBits := bitfield.NewBitlist(2)
|
||||
aggBits.SetBitAt(0, true)
|
||||
aggBits.SetBitAt(1, true)
|
||||
r, err := helpers.BlockRootAtSlot(beaconState, 0)
|
||||
require.NoError(t, err)
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: r,
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
zeroSig := [96]byte{}
|
||||
att.Signature = zeroSig[:]
|
||||
|
||||
ckp := beaconState.CurrentJustifiedCheckpoint()
|
||||
copy(ckp.Root, make([]byte, 32))
|
||||
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
|
||||
|
||||
beaconState, err = altair.ProcessAttestationNoVerifySignature(context.Background(), beaconState, att)
|
||||
require.NoError(t, err)
|
||||
|
||||
p, err := beaconState.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
indices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
|
||||
require.NoError(t, err)
|
||||
for _, index := range indices {
|
||||
require.Equal(t, true, altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyHeadFlagIndex))
|
||||
require.Equal(t, true, altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyTargetFlagIndex))
|
||||
require.Equal(t, true, altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelySourceFlagIndex))
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatorFlag_AddHas(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
set []uint8
|
||||
expectedTrue []uint8
|
||||
expectedFalse []uint8
|
||||
}{
|
||||
{name: "none",
|
||||
set: []uint8{},
|
||||
expectedTrue: []uint8{},
|
||||
expectedFalse: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
|
||||
},
|
||||
{
|
||||
name: "source",
|
||||
set: []uint8{params.BeaconConfig().TimelySourceFlagIndex},
|
||||
expectedTrue: []uint8{params.BeaconConfig().TimelySourceFlagIndex},
|
||||
expectedFalse: []uint8{params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
|
||||
},
|
||||
{
|
||||
name: "source, target",
|
||||
set: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex},
|
||||
expectedTrue: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex},
|
||||
expectedFalse: []uint8{params.BeaconConfig().TimelyHeadFlagIndex},
|
||||
},
|
||||
{name: "source, target, head",
|
||||
set: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
|
||||
expectedTrue: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
|
||||
expectedFalse: []uint8{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
b := uint8(0)
|
||||
for _, f := range tt.set {
|
||||
b = altair.AddValidatorFlag(b, f)
|
||||
}
|
||||
for _, f := range tt.expectedFalse {
|
||||
require.Equal(t, false, altair.HasValidatorFlag(b, f))
|
||||
}
|
||||
for _, f := range tt.expectedTrue {
|
||||
require.Equal(t, true, altair.HasValidatorFlag(b, f))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
b := ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{}}
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
if b.Block == nil {
|
||||
b.Block = ðpb.BeaconBlockAltair{}
|
||||
}
|
||||
s, err := stateAltair.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedAltairSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := altair.ProcessAttestationsNoVerifySignature(ctx, s, wsb)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
130
beacon-chain/core/altair/block.go
Normal file
130
beacon-chain/core/altair/block.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
p2pType "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// ProcessSyncAggregate verifies sync committee aggregate signature signing over the previous slot block root.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_sync_aggregate(state: BeaconState, sync_aggregate: SyncAggregate) -> None:
|
||||
// # Verify sync committee aggregate signature signing over the previous slot block root
|
||||
// committee_pubkeys = state.current_sync_committee.pubkeys
|
||||
// participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, sync_aggregate.sync_committee_bits) if bit]
|
||||
// previous_slot = max(state.slot, Slot(1)) - Slot(1)
|
||||
// domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot))
|
||||
// signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain)
|
||||
// assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
|
||||
//
|
||||
// # Compute participant and proposer rewards
|
||||
// total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
|
||||
// total_base_rewards = Gwei(get_base_reward_per_increment(state) * total_active_increments)
|
||||
// max_participant_rewards = Gwei(total_base_rewards * SYNC_REWARD_WEIGHT // WEIGHT_DENOMINATOR // SLOTS_PER_EPOCH)
|
||||
// participant_reward = Gwei(max_participant_rewards // SYNC_COMMITTEE_SIZE)
|
||||
// proposer_reward = Gwei(participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT))
|
||||
//
|
||||
// # Apply participant and proposer rewards
|
||||
// all_pubkeys = [v.pubkey for v in state.validators]
|
||||
// committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys]
|
||||
// for participant_index, participation_bit in zip(committee_indices, sync_aggregate.sync_committee_bits):
|
||||
// if participation_bit:
|
||||
// increase_balance(state, participant_index, participant_reward)
|
||||
// increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
// else:
|
||||
// decrease_balance(state, participant_index, participant_reward)
|
||||
func ProcessSyncAggregate(state state.BeaconStateAltair, sync *ethpb.SyncAggregate) (state.BeaconStateAltair, error) {
|
||||
currentSyncCommittee, err := state.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committeeKeys := currentSyncCommittee.Pubkeys
|
||||
votedKeys := make([]bls.PublicKey, 0, len(committeeKeys))
|
||||
votedIndices := make([]types.ValidatorIndex, 0, len(committeeKeys))
|
||||
didntVoteIndices := make([]types.ValidatorIndex, 0, len(committeeKeys))
|
||||
// Verify sync committee signature.
|
||||
for i := uint64(0); i < sync.SyncCommitteeBits.Len(); i++ {
|
||||
vIdx, exists := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(committeeKeys[i]))
|
||||
// Impossible scenario.
|
||||
if !exists {
|
||||
return nil, errors.New("validator public key does not exist in state")
|
||||
}
|
||||
|
||||
if sync.SyncCommitteeBits.BitAt(i) {
|
||||
pubKey, err := bls.PublicKeyFromBytes(committeeKeys[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
votedKeys = append(votedKeys, pubKey)
|
||||
votedIndices = append(votedIndices, vIdx)
|
||||
} else {
|
||||
didntVoteIndices = append(didntVoteIndices, vIdx)
|
||||
}
|
||||
}
|
||||
ps := helpers.PrevSlot(state.Slot())
|
||||
d, err := helpers.Domain(state.Fork(), helpers.SlotToEpoch(ps), params.BeaconConfig().DomainSyncCommittee, state.GenesisValidatorRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pbr, err := helpers.BlockRootAtSlot(state, ps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sszBytes := p2pType.SSZBytes(pbr)
|
||||
r, err := helpers.ComputeSigningRoot(&sszBytes, d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sig, err := bls.SignatureFromBytes(sync.SyncCommitteeSignature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !sig.Eth2FastAggregateVerify(votedKeys, r) {
|
||||
return nil, errors.New("could not verify sync committee signature")
|
||||
}
|
||||
|
||||
// Calculate sync committee and proposer rewards
|
||||
activeBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalActiveIncrements := activeBalance / params.BeaconConfig().EffectiveBalanceIncrement
|
||||
totalBaseRewards := baseRewardPerIncrement(activeBalance) * totalActiveIncrements
|
||||
maxParticipantRewards := totalBaseRewards * params.BeaconConfig().SyncRewardWeight / params.BeaconConfig().WeightDenominator / uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
participantReward := maxParticipantRewards / params.BeaconConfig().SyncCommitteeSize
|
||||
proposerReward := participantReward * params.BeaconConfig().ProposerWeight / (params.BeaconConfig().WeightDenominator - params.BeaconConfig().ProposerWeight)
|
||||
|
||||
// Apply sync committee rewards.
|
||||
earnedProposerReward := uint64(0)
|
||||
for _, index := range votedIndices {
|
||||
if err := helpers.IncreaseBalance(state, index, participantReward); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
earnedProposerReward += proposerReward
|
||||
}
|
||||
// Apply proposer rewards.
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.IncreaseBalance(state, proposerIndex, earnedProposerReward); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Apply sync committee penalties.
|
||||
for _, index := range didntVoteIndices {
|
||||
if err := helpers.DecreaseBalance(state, index, participantReward); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
91
beacon-chain/core/altair/block_test.go
Normal file
91
beacon-chain/core/altair/block_test.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
p2pType "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestProcessSyncCommittee_OK(t *testing.T) {
|
||||
beaconState, privKeys := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
committee, err := altair.NextSyncCommittee(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(committee))
|
||||
|
||||
syncBits := bitfield.NewBitvector512()
|
||||
for i := range syncBits {
|
||||
syncBits[i] = 0xff
|
||||
}
|
||||
indices, err := altair.NextSyncCommitteeIndices(context.Background(), beaconState)
|
||||
require.NoError(t, err)
|
||||
ps := helpers.PrevSlot(beaconState.Slot())
|
||||
pbr, err := helpers.BlockRootAtSlot(beaconState, ps)
|
||||
require.NoError(t, err)
|
||||
sigs := make([]bls.Signature, len(indices))
|
||||
for i, indice := range indices {
|
||||
b := p2pType.SSZBytes(pbr)
|
||||
sb, err := helpers.ComputeDomainAndSign(beaconState, helpers.CurrentEpoch(beaconState), &b, params.BeaconConfig().DomainSyncCommittee, privKeys[indice])
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
sigs[i] = sig
|
||||
}
|
||||
aggregatedSig := bls.AggregateSignatures(sigs).Marshal()
|
||||
syncAggregate := ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: syncBits,
|
||||
SyncCommitteeSignature: aggregatedSig,
|
||||
}
|
||||
|
||||
beaconState, err = altair.ProcessSyncAggregate(beaconState, syncAggregate)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a non-sync committee index to compare profitability.
|
||||
syncCommittee := make(map[types.ValidatorIndex]bool)
|
||||
for _, index := range indices {
|
||||
syncCommittee[index] = true
|
||||
}
|
||||
nonSyncIndex := types.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerCommittee + 1)
|
||||
for i := types.ValidatorIndex(0); uint64(i) < params.BeaconConfig().MaxValidatorsPerCommittee; i++ {
|
||||
if !syncCommittee[i] {
|
||||
nonSyncIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Sync committee should be more profitable than non sync committee
|
||||
balances := beaconState.Balances()
|
||||
require.Equal(t, true, balances[indices[0]] > balances[nonSyncIndex])
|
||||
|
||||
// Proposer should be more profitable than rest of the sync committee
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(beaconState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, balances[proposerIndex] > balances[indices[0]])
|
||||
|
||||
// Sync committee should have the same profits, except you are a proposer
|
||||
for i := 1; i < len(indices); i++ {
|
||||
if proposerIndex == indices[i-1] || proposerIndex == indices[i] {
|
||||
continue
|
||||
}
|
||||
require.Equal(t, balances[indices[i-1]], balances[indices[i]])
|
||||
}
|
||||
|
||||
// Increased balance validator count should equal to sync committee count
|
||||
increased := uint64(0)
|
||||
for _, balance := range balances {
|
||||
if balance > params.BeaconConfig().MaxEffectiveBalance {
|
||||
increased++
|
||||
}
|
||||
}
|
||||
require.Equal(t, params.BeaconConfig().SyncCommitteeSize, increased)
|
||||
}
|
||||
68
beacon-chain/core/altair/deposit.go
Normal file
68
beacon-chain/core/altair/deposit.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// ProcessDeposits processes validator deposits for beacon state Altair.
|
||||
func ProcessDeposits(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconStateAltair,
|
||||
deposits []*ethpb.Deposit,
|
||||
) (state.BeaconStateAltair, error) {
|
||||
batchVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, deposit := range deposits {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, errors.New("got a nil deposit in block")
|
||||
}
|
||||
beaconState, err = ProcessDeposit(ctx, beaconState, deposit, batchVerified)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process deposit from %#x", bytesutil.Trunc(deposit.Data.PublicKey))
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessDeposit processes validator deposit for beacon state Altair.
|
||||
func ProcessDeposit(ctx context.Context, beaconState state.BeaconStateAltair, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconStateAltair, error) {
|
||||
beaconState, err := blocks.ProcessDeposit(beaconState, deposit, verifySignature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The last validator in the beacon state validator registry.
|
||||
v, err := beaconState.ValidatorAtIndexReadOnly(types.ValidatorIndex(beaconState.NumValidators() - 1))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We know a validator is brand new when its status epochs are all far future epoch.
|
||||
// In this case, we append 0 to inactivity score and participation bits.
|
||||
if v.ActivationEligibilityEpoch() == v.ActivationEpoch() &&
|
||||
v.ActivationEpoch() == v.ExitEpoch() &&
|
||||
v.ExitEpoch() == v.WithdrawableEpoch() &&
|
||||
v.WithdrawableEpoch() == params.BeaconConfig().FarFutureEpoch {
|
||||
if err := beaconState.AppendInactivityScore(0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconState.AppendPreviousParticipationBits(0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconState.AppendCurrentParticipationBits(0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return beaconState, nil
|
||||
}
|
||||
48
beacon-chain/core/altair/deposit_fuzz_test.go
Normal file
48
beacon-chain/core/altair/deposit_fuzz_test.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposits := make([]*ethpb.Deposit, 100)
|
||||
ctx := context.Background()
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
for i := range deposits {
|
||||
fuzzer.Fuzz(deposits[i])
|
||||
}
|
||||
s, err := stateAltair.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := altair.ProcessDeposits(ctx, s, deposits)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := stateAltair.InitializeFromProtoUnsafe(state)
|
||||
require.NoError(t, err)
|
||||
r, err := altair.ProcessDeposit(context.Background(), s, deposit, true)
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, deposit)
|
||||
}
|
||||
}
|
||||
}
|
||||
245
beacon-chain/core/altair/deposit_test.go
Normal file
245
beacon-chain/core/altair/deposit_test.go
Normal file
@@ -0,0 +1,245 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/prysmaticlabs/prysm/shared/trieutil"
|
||||
)
|
||||
|
||||
func TestProcessDeposits_SameValidatorMultipleDepositsSameBlock(t *testing.T) {
|
||||
// Same validator created 3 valid deposits within the same block
|
||||
dep, _, err := testutil.DeterministicDepositsAndKeysSameValidator(3)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := testutil.DeterministicEth1Data(len(dep))
|
||||
require.NoError(t, err)
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1},
|
||||
WithdrawalCredentials: []byte{1, 2, 3},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0}
|
||||
beaconState, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: eth1Data,
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{dep[0], dep[1], dep[2]})
|
||||
require.NoError(t, err, "Expected block deposits to process correctly")
|
||||
require.Equal(t, 2, len(newState.Validators()), "Incorrect validator count")
|
||||
}
|
||||
|
||||
func TestProcessDeposits_MerkleBranchFailsVerification(t *testing.T) {
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{1, 2, 3}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
leaf, err := deposit.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
|
||||
deposit.Proof = proof
|
||||
beaconState, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: []byte{0},
|
||||
BlockHash: []byte{1},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
want := "deposit root did not verify"
|
||||
_, err = altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{deposit})
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestProcessDeposits_AddsNewValidatorDeposit(t *testing.T) {
|
||||
dep, _, err := testutil.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := testutil.DeterministicEth1Data(len(dep))
|
||||
require.NoError(t, err)
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1},
|
||||
WithdrawalCredentials: []byte{1, 2, 3},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0}
|
||||
beaconState, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: eth1Data,
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{dep[0]})
|
||||
require.NoError(t, err, "Expected block deposits to process correctly")
|
||||
if newState.Balances()[1] != dep[0].Data.Amount {
|
||||
t.Errorf(
|
||||
"Expected state validator balances index 0 to equal %d, received %d",
|
||||
dep[0].Data.Amount,
|
||||
newState.Balances()[1],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessDeposits_RepeatedDeposit_IncreasesValidatorBalance(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: 1000,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
sr, err := helpers.ComputeSigningRoot(deposit.Data, bytesutil.ToBytes(3, 32))
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
deposit.Data.Signature = sig.Marshal()
|
||||
leaf, err := deposit.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trieutil.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
|
||||
deposit.Proof = proof
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1, 2, 3},
|
||||
},
|
||||
{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: []byte{1},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0, 50}
|
||||
root := depositTrie.Root()
|
||||
beaconState, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
BlockHash: root[:],
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposits(context.Background(), beaconState, []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err, "Process deposit failed")
|
||||
require.Equal(t, uint64(1000+50), newState.Balances()[1], "Expected balance at index 1 to be 1050")
|
||||
}
|
||||
|
||||
func TestProcessDeposit_AddsNewValidatorDeposit(t *testing.T) {
|
||||
// Similar to TestProcessDeposits_AddsNewValidatorDeposit except that this test directly calls ProcessDeposit
|
||||
dep, _, err := testutil.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
eth1Data, err := testutil.DeterministicEth1Data(len(dep))
|
||||
require.NoError(t, err)
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1},
|
||||
WithdrawalCredentials: []byte{1, 2, 3},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0}
|
||||
beaconState, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: eth1Data,
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposit(context.Background(), beaconState, dep[0], true)
|
||||
require.NoError(t, err, "Process deposit failed")
|
||||
require.Equal(t, 2, len(newState.Validators()), "Expected validator list to have length 2")
|
||||
require.Equal(t, 2, len(newState.Balances()), "Expected validator balances list to have length 2")
|
||||
if newState.Balances()[1] != dep[0].Data.Amount {
|
||||
t.Errorf(
|
||||
"Expected state validator balances index 1 to equal %d, received %d",
|
||||
dep[0].Data.Amount,
|
||||
newState.Balances()[1],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
// Same test settings as in TestProcessDeposit_AddsNewValidatorDeposit, except that we use an invalid signature
|
||||
dep, _, err := testutil.DeterministicDepositsAndKeys(1)
|
||||
require.NoError(t, err)
|
||||
dep[0].Data.Signature = make([]byte, 96)
|
||||
trie, _, err := testutil.DepositTrieFromDeposits(dep)
|
||||
require.NoError(t, err)
|
||||
root := trie.Root()
|
||||
eth1Data := ðpb.Eth1Data{
|
||||
DepositRoot: root[:],
|
||||
DepositCount: 1,
|
||||
}
|
||||
registry := []*ethpb.Validator{
|
||||
{
|
||||
PublicKey: []byte{1},
|
||||
WithdrawalCredentials: []byte{1, 2, 3},
|
||||
},
|
||||
}
|
||||
balances := []uint64{0}
|
||||
beaconState, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: registry,
|
||||
Balances: balances,
|
||||
Eth1Data: eth1Data,
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposit(context.Background(), beaconState, dep[0], true)
|
||||
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
|
||||
|
||||
if newState.Eth1DepositIndex() != 1 {
|
||||
t.Errorf(
|
||||
"Expected Eth1DepositIndex to be increased by 1 after processing an invalid deposit, received change: %v",
|
||||
newState.Eth1DepositIndex(),
|
||||
)
|
||||
}
|
||||
if len(newState.Validators()) != 1 {
|
||||
t.Errorf("Expected validator list to have length 1, received: %v", len(newState.Validators()))
|
||||
}
|
||||
if len(newState.Balances()) != 1 {
|
||||
t.Errorf("Expected validator balances list to have length 1, received: %v", len(newState.Balances()))
|
||||
}
|
||||
if newState.Balances()[0] != 0 {
|
||||
t.Errorf("Expected validator balance at index 0 to stay 0, received: %v", newState.Balances()[0])
|
||||
}
|
||||
}
|
||||
257
beacon-chain/core/altair/epoch_precompute.go
Normal file
257
beacon-chain/core/altair/epoch_precompute.go
Normal file
@@ -0,0 +1,257 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/mathutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// InitializeEpochValidators gets called at the beginning of process epoch cycle to return
|
||||
// pre computed instances of validators attesting records and total
|
||||
// balances attested in an epoch.
|
||||
func InitializeEpochValidators(ctx context.Context, st state.BeaconStateAltair) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "altair.InitializeEpochValidators")
|
||||
defer span.End()
|
||||
pValidators := make([]*precompute.Validator, st.NumValidators())
|
||||
bal := &precompute.Balance{}
|
||||
prevEpoch := helpers.PrevEpoch(st)
|
||||
|
||||
inactivityScores, err := st.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// This shouldn't happen with a correct beacon state,
|
||||
// but rather be safe to defend against index out of bound panics.
|
||||
if st.NumValidators() > len(inactivityScores) {
|
||||
return nil, nil, errors.New("num of validators can't be greater than length of inactivity scores")
|
||||
}
|
||||
if err := st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
// Was validator withdrawable or slashed
|
||||
withdrawable := prevEpoch+1 >= val.WithdrawableEpoch()
|
||||
pVal := &precompute.Validator{
|
||||
IsSlashed: val.Slashed(),
|
||||
IsWithdrawableCurrentEpoch: withdrawable,
|
||||
CurrentEpochEffectiveBalance: val.EffectiveBalance(),
|
||||
InactivityScore: inactivityScores[idx],
|
||||
}
|
||||
// Validator active current epoch
|
||||
if helpers.IsActiveValidatorUsingTrie(val, helpers.CurrentEpoch(st)) {
|
||||
pVal.IsActiveCurrentEpoch = true
|
||||
bal.ActiveCurrentEpoch += val.EffectiveBalance()
|
||||
}
|
||||
// Validator active previous epoch
|
||||
if helpers.IsActiveValidatorUsingTrie(val, prevEpoch) {
|
||||
pVal.IsActivePrevEpoch = true
|
||||
bal.ActivePrevEpoch += val.EffectiveBalance()
|
||||
}
|
||||
|
||||
pValidators[idx] = pVal
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not initialize epoch validator")
|
||||
}
|
||||
return pValidators, bal, nil
|
||||
}
|
||||
|
||||
// ProcessInactivityScores of beacon chain. This updates inactivity scores of beacon chain and
|
||||
// updates the precompute validator struct for later processing.
|
||||
func ProcessInactivityScores(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
vals []*precompute.Validator,
|
||||
) (state.BeaconState, []*precompute.Validator, error) {
|
||||
if helpers.CurrentEpoch(state) == params.BeaconConfig().GenesisEpoch {
|
||||
return state, vals, nil
|
||||
}
|
||||
|
||||
inactivityScores, err := state.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for i, v := range vals {
|
||||
if v.IsPrevEpochTargetAttester && !v.IsSlashed {
|
||||
// Decrease inactivity score when validator gets target correct.
|
||||
if v.InactivityScore > 0 {
|
||||
score := uint64(1)
|
||||
if score > v.InactivityScore {
|
||||
score = v.InactivityScore
|
||||
}
|
||||
v.InactivityScore -= score
|
||||
}
|
||||
} else {
|
||||
v.InactivityScore += params.BeaconConfig().InactivityScoreBias
|
||||
}
|
||||
if !helpers.IsInInactivityLeak(helpers.PrevEpoch(state), state.FinalizedCheckpointEpoch()) {
|
||||
score := params.BeaconConfig().InactivityScoreRecoveryRate
|
||||
if score > v.InactivityScore {
|
||||
score = v.InactivityScore
|
||||
}
|
||||
v.InactivityScore -= score
|
||||
|
||||
}
|
||||
inactivityScores[i] = v.InactivityScore
|
||||
}
|
||||
|
||||
if err := state.SetInactivityScores(inactivityScores); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return state, vals, nil
|
||||
}
|
||||
|
||||
// ProcessEpochParticipation processes the epoch participation in state and updates individual validator's pre computes,
|
||||
// it also tracks and updates epoch attesting balances.
|
||||
func ProcessEpochParticipation(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
bal *precompute.Balance,
|
||||
vals []*precompute.Validator,
|
||||
) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
|
||||
defer span.End()
|
||||
|
||||
cp, err := state.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for i, b := range cp {
|
||||
if HasValidatorFlag(b, params.BeaconConfig().TimelyTargetFlagIndex) {
|
||||
vals[i].IsCurrentEpochTargetAttester = true
|
||||
}
|
||||
}
|
||||
pp, err := state.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for i, b := range pp {
|
||||
if HasValidatorFlag(b, params.BeaconConfig().TimelySourceFlagIndex) {
|
||||
vals[i].IsPrevEpochAttester = true
|
||||
}
|
||||
if HasValidatorFlag(b, params.BeaconConfig().TimelyTargetFlagIndex) {
|
||||
vals[i].IsPrevEpochTargetAttester = true
|
||||
}
|
||||
if HasValidatorFlag(b, params.BeaconConfig().TimelyHeadFlagIndex) {
|
||||
vals[i].IsPrevEpochHeadAttester = true
|
||||
}
|
||||
}
|
||||
bal = precompute.UpdateBalance(vals, bal)
|
||||
return vals, bal, nil
|
||||
}
|
||||
|
||||
// ProcessRewardsAndPenaltiesPrecompute processes the rewards and penalties of individual validator.
|
||||
// This is an optimized version by passing in precomputed validator attesting records and and total epoch balances.
|
||||
func ProcessRewardsAndPenaltiesPrecompute(
|
||||
state state.BeaconStateAltair,
|
||||
bal *precompute.Balance,
|
||||
vals []*precompute.Validator,
|
||||
) (state.BeaconStateAltair, error) {
|
||||
// Don't process rewards and penalties in genesis epoch.
|
||||
if helpers.CurrentEpoch(state) == 0 {
|
||||
return state, nil
|
||||
}
|
||||
|
||||
numOfVals := state.NumValidators()
|
||||
// Guard against an out-of-bounds using validator balance precompute.
|
||||
if len(vals) != numOfVals || len(vals) != state.BalancesLength() {
|
||||
return state, errors.New("validator registries not the same length as state's validator registries")
|
||||
}
|
||||
|
||||
attsRewards, attsPenalties, err := AttestationsDelta(state, bal, vals)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attestation delta")
|
||||
}
|
||||
|
||||
balances := state.Balances()
|
||||
for i := 0; i < numOfVals; i++ {
|
||||
vals[i].BeforeEpochTransitionBalance = balances[i]
|
||||
|
||||
// Compute the post balance of the validator after accounting for the
|
||||
// attester and proposer rewards and penalties.
|
||||
balances[i] = helpers.IncreaseBalanceWithVal(balances[i], attsRewards[i])
|
||||
balances[i] = helpers.DecreaseBalanceWithVal(balances[i], attsPenalties[i])
|
||||
|
||||
vals[i].AfterEpochTransitionBalance = balances[i]
|
||||
}
|
||||
|
||||
if err := state.SetBalances(balances); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set validator balances")
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// AttestationsDelta computes and returns the rewards and penalties differences for individual validators based on the
|
||||
// voting records.
|
||||
func AttestationsDelta(state state.BeaconStateAltair, bal *precompute.Balance, vals []*precompute.Validator) (rewards, penalties []uint64, err error) {
|
||||
numOfVals := state.NumValidators()
|
||||
rewards = make([]uint64, numOfVals)
|
||||
penalties = make([]uint64, numOfVals)
|
||||
prevEpoch := helpers.PrevEpoch(state)
|
||||
finalizedEpoch := state.FinalizedCheckpointEpoch()
|
||||
|
||||
for i, v := range vals {
|
||||
rewards[i], penalties[i] = attestationDelta(bal, v, prevEpoch, finalizedEpoch)
|
||||
}
|
||||
|
||||
return rewards, penalties, nil
|
||||
}
|
||||
|
||||
func attestationDelta(bal *precompute.Balance, v *precompute.Validator, prevEpoch, finalizedEpoch types.Epoch) (r, p uint64) {
|
||||
eligible := v.IsActivePrevEpoch || (v.IsSlashed && !v.IsWithdrawableCurrentEpoch)
|
||||
if !eligible || bal.ActiveCurrentEpoch == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
ebi := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
eb := v.CurrentEpochEffectiveBalance
|
||||
br := (eb / ebi) * (ebi * params.BeaconConfig().BaseRewardFactor / mathutil.IntegerSquareRoot(bal.ActiveCurrentEpoch))
|
||||
activeCurrentEpochIncrements := bal.ActiveCurrentEpoch / ebi
|
||||
|
||||
r, p = uint64(0), uint64(0)
|
||||
// Process source reward / penalty
|
||||
if v.IsPrevEpochAttester && !v.IsSlashed {
|
||||
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
|
||||
rewardNumerator := br * params.BeaconConfig().TimelySourceWeight * (bal.PrevEpochAttested / ebi)
|
||||
r += rewardNumerator / (activeCurrentEpochIncrements * params.BeaconConfig().WeightDenominator)
|
||||
}
|
||||
} else {
|
||||
p += br * params.BeaconConfig().TimelySourceWeight / params.BeaconConfig().WeightDenominator
|
||||
}
|
||||
|
||||
// Process target reward / penalty
|
||||
if v.IsPrevEpochTargetAttester && !v.IsSlashed {
|
||||
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
|
||||
rewardNumerator := br * params.BeaconConfig().TimelyTargetWeight * (bal.PrevEpochTargetAttested / ebi)
|
||||
r += rewardNumerator / (activeCurrentEpochIncrements * params.BeaconConfig().WeightDenominator)
|
||||
}
|
||||
} else {
|
||||
p += br * params.BeaconConfig().TimelyTargetWeight / params.BeaconConfig().WeightDenominator
|
||||
}
|
||||
|
||||
// Process head reward / penalty
|
||||
if v.IsPrevEpochHeadAttester && !v.IsSlashed {
|
||||
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
|
||||
rewardNumerator := br * params.BeaconConfig().TimelyHeadWeight * (bal.PrevEpochHeadAttested / ebi)
|
||||
r += rewardNumerator / (activeCurrentEpochIncrements * params.BeaconConfig().WeightDenominator)
|
||||
}
|
||||
}
|
||||
|
||||
// Process finality delay penalty
|
||||
// Apply an additional penalty to validators that did not vote on the correct target or slashed.
|
||||
if !v.IsPrevEpochTargetAttester || v.IsSlashed {
|
||||
penaltyNumerator := eb * v.InactivityScore
|
||||
penaltyDenominator := params.BeaconConfig().InactivityScoreBias * params.BeaconConfig().InactivityPenaltyQuotientAltair
|
||||
p += penaltyNumerator / penaltyDenominator
|
||||
}
|
||||
|
||||
return r, p
|
||||
}
|
||||
283
beacon-chain/core/altair/epoch_precompute_test.go
Normal file
283
beacon-chain/core/altair/epoch_precompute_test.go
Normal file
@@ -0,0 +1,283 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestInitializeEpochValidators_Ok(t *testing.T) {
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
s, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
// Validator 0 is slashed
|
||||
// Validator 1 is withdrawable
|
||||
// Validator 2 is active prev epoch and current epoch
|
||||
// Validator 3 is active prev epoch
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true, WithdrawableEpoch: ffe, EffectiveBalance: 100},
|
||||
{EffectiveBalance: 100},
|
||||
{WithdrawableEpoch: ffe, ExitEpoch: ffe, EffectiveBalance: 100},
|
||||
{WithdrawableEpoch: ffe, ExitEpoch: 1, EffectiveBalance: 100},
|
||||
},
|
||||
InactivityScores: []uint64{0, 1, 2, 3},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
v, b, err := InitializeEpochValidators(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
IsSlashed: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InactivityScore: 0,
|
||||
}, v[0], "Incorrect validator 0 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InactivityScore: 1,
|
||||
}, v[1], "Incorrect validator 1 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
IsActivePrevEpoch: true,
|
||||
IsActiveCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InactivityScore: 2,
|
||||
}, v[2], "Incorrect validator 2 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
IsActivePrevEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InactivityScore: 3,
|
||||
}, v[3], "Incorrect validator 3 status")
|
||||
|
||||
wantedBalances := &precompute.Balance{
|
||||
ActiveCurrentEpoch: 100,
|
||||
ActivePrevEpoch: 200,
|
||||
}
|
||||
assert.DeepEqual(t, wantedBalances, b, "Incorrect wanted balance")
|
||||
}
|
||||
|
||||
func TestInitializeEpochValidators_BadState(t *testing.T) {
|
||||
s, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: []*ethpb.Validator{{}},
|
||||
InactivityScores: []uint64{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = InitializeEpochValidators(context.Background(), s)
|
||||
require.ErrorContains(t, "num of validators can't be greater than length of inactivity scores", err)
|
||||
}
|
||||
|
||||
func TestProcessEpochParticipation(t *testing.T) {
|
||||
s, err := testState()
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializeEpochValidators(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}, validators[0])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
IsPrevEpochAttester: true,
|
||||
}, validators[1])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
IsPrevEpochAttester: true,
|
||||
IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochTargetAttester: true,
|
||||
}, validators[2])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
IsPrevEpochAttester: true,
|
||||
IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochTargetAttester: true,
|
||||
IsPrevEpochHeadAttester: true,
|
||||
}, validators[3])
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance*3, balance.PrevEpochAttested)
|
||||
require.Equal(t, balance.CurrentEpochTargetAttested, params.BeaconConfig().MaxEffectiveBalance*2)
|
||||
require.Equal(t, balance.PrevEpochTargetAttested, params.BeaconConfig().MaxEffectiveBalance*2)
|
||||
require.Equal(t, balance.PrevEpochHeadAttested, params.BeaconConfig().MaxEffectiveBalance*1)
|
||||
}
|
||||
|
||||
func TestAttestationsDelta(t *testing.T) {
|
||||
s, err := testState()
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializeEpochValidators(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
rewards, penalties, err := AttestationsDelta(s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reward amount should increase as validator index increases due to setup.
|
||||
for i := 1; i < len(rewards); i++ {
|
||||
require.Equal(t, true, rewards[i] > rewards[i-1])
|
||||
}
|
||||
|
||||
// Penalty amount should decrease as validator index increases due to setup.
|
||||
for i := 1; i < len(penalties); i++ {
|
||||
require.Equal(t, true, penalties[i] <= penalties[i-1])
|
||||
}
|
||||
|
||||
// First index should have 0 reward.
|
||||
require.Equal(t, uint64(0), rewards[0])
|
||||
// Last index should have 0 penalty.
|
||||
require.Equal(t, uint64(0), penalties[len(penalties)-1])
|
||||
}
|
||||
|
||||
func TestProcessRewardsAndPenaltiesPrecompute_Ok(t *testing.T) {
|
||||
s, err := testState()
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializeEpochValidators(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
|
||||
balances := s.Balances()
|
||||
// Reward amount should increase as validator index increases due to setup.
|
||||
for i := 1; i < len(balances); i++ {
|
||||
require.Equal(t, true, balances[i] >= balances[i-1])
|
||||
}
|
||||
|
||||
wanted := make([]uint64, s.NumValidators())
|
||||
rewards, penalties, err := AttestationsDelta(s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
for i := range rewards {
|
||||
wanted[i] += rewards[i]
|
||||
}
|
||||
for i := range penalties {
|
||||
if wanted[i] > penalties[i] {
|
||||
wanted[i] -= penalties[i]
|
||||
} else {
|
||||
wanted[i] = 0
|
||||
}
|
||||
}
|
||||
require.DeepEqual(t, wanted, balances)
|
||||
}
|
||||
|
||||
func TestProcessRewardsAndPenaltiesPrecompute_InactivityLeak(t *testing.T) {
|
||||
s, err := testState()
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializeEpochValidators(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
sCopy := s.Copy()
|
||||
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Copied state where finality happened long ago
|
||||
require.NoError(t, sCopy.SetSlot(params.BeaconConfig().SlotsPerEpoch*1000))
|
||||
sCopy, err = ProcessRewardsAndPenaltiesPrecompute(sCopy, balance, validators)
|
||||
require.NoError(t, err)
|
||||
|
||||
balances := s.Balances()
|
||||
inactivityBalances := sCopy.Balances()
|
||||
// Balances should be much less in inactivity leak cases.
|
||||
for i := 0; i < len(balances); i++ {
|
||||
require.Equal(t, true, balances[i] >= inactivityBalances[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessInactivityScores_CanProcess(t *testing.T) {
|
||||
s, err := testState()
|
||||
require.NoError(t, err)
|
||||
defaultScore := uint64(5)
|
||||
require.NoError(t, s.SetInactivityScores([]uint64{defaultScore, defaultScore, defaultScore, defaultScore}))
|
||||
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch*types.Slot(params.BeaconConfig().MinEpochsToInactivityPenalty+2)))
|
||||
validators, balance, err := InitializeEpochValidators(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
validators, _, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
s, _, err = ProcessInactivityScores(context.Background(), s, validators)
|
||||
require.NoError(t, err)
|
||||
inactivityScores, err := s.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
// V0 and V1 didn't vote head. V2 and V3 did.
|
||||
require.Equal(t, defaultScore+params.BeaconConfig().InactivityScoreBias, inactivityScores[0])
|
||||
require.Equal(t, defaultScore+params.BeaconConfig().InactivityScoreBias, inactivityScores[1])
|
||||
require.Equal(t, defaultScore-1, inactivityScores[2])
|
||||
require.Equal(t, defaultScore-1, inactivityScores[3])
|
||||
}
|
||||
|
||||
func TestProcessRewardsAndPenaltiesPrecompute_GenesisEpoch(t *testing.T) {
|
||||
s, err := testState()
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializeEpochValidators(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetSlot(0))
|
||||
s, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
|
||||
balances := s.Balances()
|
||||
// Nothing should happen at genesis epoch
|
||||
for i := 1; i < len(balances); i++ {
|
||||
require.Equal(t, true, balances[i] == balances[i-1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRewardsAndPenaltiesPrecompute_BadState(t *testing.T) {
|
||||
s, err := testState()
|
||||
require.NoError(t, err)
|
||||
validators, balance, err := InitializeEpochValidators(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
_, balance, err = ProcessEpochParticipation(context.Background(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
_, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, []*precompute.Validator{})
|
||||
require.ErrorContains(t, "validator registries not the same length as state's validator registries", err)
|
||||
}
|
||||
|
||||
func testState() (state.BeaconState, error) {
|
||||
generateParticipation := func(flags ...uint8) byte {
|
||||
b := byte(0)
|
||||
for _, flag := range flags {
|
||||
b = AddValidatorFlag(b, flag)
|
||||
}
|
||||
return b
|
||||
}
|
||||
return stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Slot: 2 * params.BeaconConfig().SlotsPerEpoch,
|
||||
Validators: []*ethpb.Validator{
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
{EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, ExitEpoch: params.BeaconConfig().FarFutureEpoch},
|
||||
},
|
||||
CurrentEpochParticipation: []byte{
|
||||
0,
|
||||
generateParticipation(params.BeaconConfig().TimelySourceFlagIndex),
|
||||
generateParticipation(params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex),
|
||||
generateParticipation(params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex),
|
||||
},
|
||||
PreviousEpochParticipation: []byte{
|
||||
0,
|
||||
generateParticipation(params.BeaconConfig().TimelySourceFlagIndex),
|
||||
generateParticipation(params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex),
|
||||
generateParticipation(params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex),
|
||||
},
|
||||
InactivityScores: []uint64{0, 0, 0, 0},
|
||||
Balances: []uint64{0, 0, 0, 0},
|
||||
})
|
||||
}
|
||||
116
beacon-chain/core/altair/epoch_spec.go
Normal file
116
beacon-chain/core/altair/epoch_spec.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/mathutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// ProcessSyncCommitteeUpdates processes sync client committee updates for the beacon state.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_sync_committee_updates(state: BeaconState) -> None:
|
||||
// next_epoch = get_current_epoch(state) + Epoch(1)
|
||||
// if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
|
||||
// state.current_sync_committee = state.next_sync_committee
|
||||
// state.next_sync_committee = get_next_sync_committee(state)
|
||||
func ProcessSyncCommitteeUpdates(ctx context.Context, beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
nextEpoch := helpers.NextEpoch(beaconState)
|
||||
if nextEpoch%params.BeaconConfig().EpochsPerSyncCommitteePeriod == 0 {
|
||||
currentSyncCommittee, err := beaconState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconState.SetCurrentSyncCommittee(currentSyncCommittee); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextCommittee, err := NextSyncCommittee(ctx, beaconState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconState.SetNextSyncCommittee(nextCommittee); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.UpdateSyncCommitteeCache(beaconState); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessParticipationFlagUpdates processes participation flag updates by rotating current to previous.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_participation_flag_updates(state: BeaconState) -> None:
|
||||
// state.previous_epoch_participation = state.current_epoch_participation
|
||||
// state.current_epoch_participation = [ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))]
|
||||
func ProcessParticipationFlagUpdates(beaconState state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
c, err := beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconState.SetPreviousParticipationBits(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := beaconState.SetCurrentParticipationBits(make([]byte, beaconState.NumValidators())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessSlashings processes the slashed validators during epoch processing,
|
||||
// The function is modified to use PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_slashings(state: BeaconState) -> None:
|
||||
// epoch = get_current_epoch(state)
|
||||
// total_balance = get_total_active_balance(state)
|
||||
// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR, total_balance)
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
// penalty = penalty_numerator // total_balance * increment
|
||||
// decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
// decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
func ProcessSlashings(state state.BeaconState) (state.BeaconState, error) {
|
||||
currentEpoch := helpers.CurrentEpoch(state)
|
||||
totalBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get total active balance")
|
||||
}
|
||||
|
||||
// Compute slashed balances in the current epoch
|
||||
exitLength := params.BeaconConfig().EpochsPerSlashingsVector
|
||||
|
||||
// Compute the sum of state slashings
|
||||
slashings := state.Slashings()
|
||||
totalSlashing := uint64(0)
|
||||
for _, slashing := range slashings {
|
||||
totalSlashing += slashing
|
||||
}
|
||||
|
||||
// a callback is used here to apply the following actions to all validators
|
||||
// below equally.
|
||||
increment := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
minSlashing := mathutil.Min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplierAltair, totalBalance)
|
||||
err = state.ApplyToEveryValidator(func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error) {
|
||||
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch
|
||||
if val.Slashed && correctEpoch {
|
||||
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
|
||||
penalty := penaltyNumerator / totalBalance * increment
|
||||
if err := helpers.DecreaseBalance(state, types.ValidatorIndex(idx), penalty); err != nil {
|
||||
return false, val, err
|
||||
}
|
||||
return true, val, nil
|
||||
}
|
||||
return false, val, nil
|
||||
})
|
||||
return state, err
|
||||
}
|
||||
181
beacon-chain/core/altair/epoch_spec_test.go
Normal file
181
beacon-chain/core/altair/epoch_spec_test.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func TestProcessSyncCommitteeUpdates_CanRotate(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
h := ðpb.BeaconBlockHeader{
|
||||
StateRoot: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
ParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte{'c'}, 32),
|
||||
}
|
||||
require.NoError(t, s.SetLatestBlockHeader(h))
|
||||
postState, err := altair.ProcessSyncCommitteeUpdates(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
current, err := postState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
next, err := postState.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, current, next)
|
||||
|
||||
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
postState, err = altair.ProcessSyncCommitteeUpdates(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
c, err := postState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
n, err := postState.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, current, c)
|
||||
require.DeepEqual(t, next, n)
|
||||
|
||||
require.NoError(t, s.SetSlot(types.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*params.BeaconConfig().SlotsPerEpoch-1))
|
||||
postState, err = altair.ProcessSyncCommitteeUpdates(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
c, err = postState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
n, err = postState.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, current, c)
|
||||
require.NotEqual(t, next, n)
|
||||
require.DeepEqual(t, next, c)
|
||||
|
||||
// Test boundary condition.
|
||||
slot := params.BeaconConfig().SlotsPerEpoch * types.Slot(helpers.CurrentEpoch(s)+params.BeaconConfig().EpochsPerSyncCommitteePeriod)
|
||||
require.NoError(t, s.SetSlot(slot))
|
||||
boundaryCommittee, err := altair.NextSyncCommittee(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, boundaryCommittee, n)
|
||||
}
|
||||
|
||||
func TestProcessParticipationFlagUpdates_CanRotate(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
c, err := s.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, make([]byte, params.BeaconConfig().MaxValidatorsPerCommittee), c)
|
||||
p, err := s.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, make([]byte, params.BeaconConfig().MaxValidatorsPerCommittee), p)
|
||||
|
||||
newC := []byte{'a'}
|
||||
newP := []byte{'b'}
|
||||
require.NoError(t, s.SetCurrentParticipationBits(newC))
|
||||
require.NoError(t, s.SetPreviousParticipationBits(newP))
|
||||
c, err = s.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, newC, c)
|
||||
p, err = s.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, newP, p)
|
||||
|
||||
s, err = altair.ProcessParticipationFlagUpdates(s)
|
||||
require.NoError(t, err)
|
||||
c, err = s.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, make([]byte, params.BeaconConfig().MaxValidatorsPerCommittee), c)
|
||||
p, err = s.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, newC, p)
|
||||
}
|
||||
|
||||
func TestProcessSlashings_NotSlashed(t *testing.T) {
|
||||
base := ðpb.BeaconStateAltair{
|
||||
Slot: 0,
|
||||
Validators: []*ethpb.Validator{{Slashed: true}},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
}
|
||||
s, err := stateAltair.InitializeFromProto(base)
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessSlashings(s)
|
||||
require.NoError(t, err)
|
||||
wanted := params.BeaconConfig().MaxEffectiveBalance
|
||||
assert.Equal(t, wanted, newState.Balances()[0], "Unexpected slashed balance")
|
||||
}
|
||||
|
||||
func TestProcessSlashings_SlashedLess(t *testing.T) {
|
||||
tests := []struct {
|
||||
state *ethpb.BeaconStateAltair
|
||||
want uint64
|
||||
}{
|
||||
{
|
||||
state: ðpb.BeaconStateAltair{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance}},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
},
|
||||
want: uint64(30000000000),
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconStateAltair{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
},
|
||||
want: uint64(31000000000),
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconStateAltair{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
Slashings: []uint64{0, 2 * 1e9},
|
||||
},
|
||||
want: uint64(30000000000),
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconStateAltair{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement}},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance - params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
},
|
||||
want: uint64(29000000000),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprint(i), func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
original := proto.Clone(tt.state)
|
||||
s, err := stateAltair.InitializeFromProto(tt.state)
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessSlashings(s)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, newState.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, newState.Balances()[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
115
beacon-chain/core/altair/fork_transition.go
Normal file
115
beacon-chain/core/altair/fork_transition.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
statealtair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// UpgradeToAltair updates input state to return the version Altair state.
|
||||
func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.BeaconStateAltair, error) {
|
||||
epoch := helpers.CurrentEpoch(state)
|
||||
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: state.GenesisTime(),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: make([]byte, state.NumValidators()),
|
||||
CurrentEpochParticipation: make([]byte, state.NumValidators()),
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: make([]uint64, state.NumValidators()),
|
||||
}
|
||||
|
||||
newState, err := statealtair.InitializeFromProto(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
prevEpochAtts, err := state.PreviousEpochAttestations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newState, err = TranslateParticipation(newState, prevEpochAtts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
committee, err := NextSyncCommittee(ctx, newState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := newState.SetCurrentSyncCommittee(committee); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := newState.SetNextSyncCommittee(committee); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newState, nil
|
||||
}
|
||||
|
||||
// TranslateParticipation translates pending attestations into participation bits, then inserts the bits into beacon state.
|
||||
// This is helper function t o convert phase 0 beacon state(pending attestations) to Altair beacon state(participation bits).
|
||||
func TranslateParticipation(state *statealtair.BeaconState, atts []*ethpb.PendingAttestation) (*statealtair.BeaconState, error) {
|
||||
for _, att := range atts {
|
||||
epochParticipation, err := state.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
participatedFlags, err := attestationParticipationFlagIndices(state, att.Data, att.InclusionDelay)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(state, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices, err := attestationutil.AttestingIndices(att.AggregationBits, committee)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sourceFlagIndex := params.BeaconConfig().TimelySourceFlagIndex
|
||||
targetFlagIndex := params.BeaconConfig().TimelyTargetFlagIndex
|
||||
headFlagIndex := params.BeaconConfig().TimelyHeadFlagIndex
|
||||
for _, index := range indices {
|
||||
if participatedFlags[sourceFlagIndex] && !HasValidatorFlag(epochParticipation[index], sourceFlagIndex) {
|
||||
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], sourceFlagIndex)
|
||||
}
|
||||
if participatedFlags[targetFlagIndex] && !HasValidatorFlag(epochParticipation[index], targetFlagIndex) {
|
||||
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], targetFlagIndex)
|
||||
}
|
||||
if participatedFlags[headFlagIndex] && !HasValidatorFlag(epochParticipation[index], headFlagIndex) {
|
||||
epochParticipation[index] = AddValidatorFlag(epochParticipation[index], headFlagIndex)
|
||||
}
|
||||
}
|
||||
|
||||
if err := state.SetPreviousParticipationBits(epochParticipation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
87
beacon-chain/core/altair/fork_transition_test.go
Normal file
87
beacon-chain/core/altair/fork_transition_test.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/attestationutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestTranslateParticipation(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, 64)
|
||||
st, ok := s.(*stateAltair.BeaconState)
|
||||
require.Equal(t, true, ok)
|
||||
require.NoError(t, st.SetSlot(st.Slot()+params.BeaconConfig().MinAttestationInclusionDelay))
|
||||
|
||||
var err error
|
||||
newState, err := altair.TranslateParticipation(st, nil)
|
||||
require.NoError(t, err)
|
||||
participation, err := newState.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, 64), participation)
|
||||
|
||||
aggBits := bitfield.NewBitlist(2)
|
||||
aggBits.SetBitAt(0, true)
|
||||
aggBits.SetBitAt(1, true)
|
||||
r, err := helpers.BlockRootAtSlot(s, 0)
|
||||
require.NoError(t, err)
|
||||
var pendingAtts []*ethpb.PendingAttestation
|
||||
for i := 0; i < 3; i++ {
|
||||
pendingAtts = append(pendingAtts, ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: types.CommitteeIndex(i),
|
||||
BeaconBlockRoot: r,
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
InclusionDelay: 1,
|
||||
})
|
||||
}
|
||||
|
||||
newState, err = altair.TranslateParticipation(newState, pendingAtts)
|
||||
require.NoError(t, err)
|
||||
participation, err = newState.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepNotSSZEqual(t, make([]byte, 64), participation)
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(st, pendingAtts[0].Data.Slot, pendingAtts[0].Data.CommitteeIndex)
|
||||
require.NoError(t, err)
|
||||
indices, err := attestationutil.AttestingIndices(pendingAtts[0].AggregationBits, committee)
|
||||
require.NoError(t, err)
|
||||
for _, index := range indices {
|
||||
require.Equal(t, true, altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelyHeadFlagIndex))
|
||||
require.Equal(t, true, altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelyTargetFlagIndex))
|
||||
require.Equal(t, true, altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelySourceFlagIndex))
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpgradeToAltair(t *testing.T) {
|
||||
st, _ := testutil.DeterministicGenesisState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
aState, err := altair.UpgradeToAltair(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
_, ok := aState.(state.BeaconStateAltair)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
f := aState.Fork()
|
||||
require.DeepSSZEqual(t, ðpb.Fork{
|
||||
PreviousVersion: st.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: helpers.CurrentEpoch(st),
|
||||
}, f)
|
||||
csc, err := aState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
nsc, err := aState.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, nsc, csc)
|
||||
}
|
||||
51
beacon-chain/core/altair/reward.go
Normal file
51
beacon-chain/core/altair/reward.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/shared/mathutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
// BaseReward takes state and validator index and calculate
|
||||
// individual validator's base reward quotient.
|
||||
//
|
||||
// Spec code:
|
||||
// def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
|
||||
// """
|
||||
// Return the base reward for the validator defined by ``index`` with respect to the current ``state``.
|
||||
//
|
||||
// Note: An optimally performing validator can earn one base reward per epoch over a long time horizon.
|
||||
// This takes into account both per-epoch (e.g. attestation) and intermittent duties (e.g. block proposal
|
||||
// and sync committees).
|
||||
// """
|
||||
// increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT
|
||||
// return Gwei(increments * get_base_reward_per_increment(state))
|
||||
func BaseReward(state state.ReadOnlyBeaconState, index types.ValidatorIndex) (uint64, error) {
|
||||
totalBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not calculate active balance")
|
||||
}
|
||||
return BaseRewardWithTotalBalance(state, index, totalBalance)
|
||||
}
|
||||
|
||||
// BaseRewardWithTotalBalance calculates the base reward with the provided total balance.
|
||||
func BaseRewardWithTotalBalance(state state.ReadOnlyBeaconState, index types.ValidatorIndex, totalBalance uint64) (uint64, error) {
|
||||
val, err := state.ValidatorAtIndexReadOnly(index)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
increments := val.EffectiveBalance() / params.BeaconConfig().EffectiveBalanceIncrement
|
||||
return increments * baseRewardPerIncrement(totalBalance), nil
|
||||
}
|
||||
|
||||
// baseRewardPerIncrement of the beacon state
|
||||
//
|
||||
// Spec code:
|
||||
// def get_base_reward_per_increment(state: BeaconState) -> Gwei:
|
||||
// return Gwei(EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR // integer_squareroot(get_total_active_balance(state)))
|
||||
func baseRewardPerIncrement(activeBalance uint64) uint64 {
|
||||
return params.BeaconConfig().EffectiveBalanceIncrement * params.BeaconConfig().BaseRewardFactor / mathutil.IntegerSquareRoot(activeBalance)
|
||||
}
|
||||
28
beacon-chain/core/altair/reward_test.go
Normal file
28
beacon-chain/core/altair/reward_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
altair "github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestBaseReward(t *testing.T) {
|
||||
s, _ := testutil.DeterministicGenesisStateAltair(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
r0, err := altair.BaseReward(s, 0)
|
||||
require.NoError(t, err)
|
||||
r1, err := altair.BaseReward(s, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, r0, r1)
|
||||
|
||||
v, err := s.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
v.EffectiveBalance = v.EffectiveBalance + params.BeaconConfig().EffectiveBalanceIncrement
|
||||
require.NoError(t, s.UpdateValidatorAtIndex(0, v))
|
||||
|
||||
r0, err = altair.BaseReward(s, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, r0 > r1)
|
||||
}
|
||||
221
beacon-chain/core/altair/sync_committee.go
Normal file
221
beacon-chain/core/altair/sync_committee.go
Normal file
@@ -0,0 +1,221 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/hashutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/mathutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
|
||||
const maxRandomByte = uint64(1<<8 - 1)
|
||||
|
||||
// ValidateNilSyncContribution validates the following fields are not nil:
|
||||
// -the contribution and proof itself
|
||||
// -the message within contribution and proof
|
||||
// -the contribution within contribution and proof
|
||||
// -the aggregation bits within contribution
|
||||
func ValidateNilSyncContribution(s *ethpb.SignedContributionAndProof) error {
|
||||
if s == nil {
|
||||
return errors.New("signed message can't be nil")
|
||||
}
|
||||
if s.Message == nil {
|
||||
return errors.New("signed contribution's message can't be nil")
|
||||
}
|
||||
if s.Message.Contribution == nil {
|
||||
return errors.New("inner contribution can't be nil")
|
||||
}
|
||||
if s.Message.Contribution.AggregationBits == nil {
|
||||
return errors.New("contribution's bitfield can't be nil")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextSyncCommittee returns the next sync committee for a given state.
|
||||
//
|
||||
// Spec code:
|
||||
// def get_next_sync_committee(state: BeaconState) -> SyncCommittee:
|
||||
// """
|
||||
// Return the next sync committee, with possible pubkey duplicates.
|
||||
// """
|
||||
// indices = get_next_sync_committee_indices(state)
|
||||
// pubkeys = [state.validators[index].pubkey for index in indices]
|
||||
// aggregate_pubkey = bls.AggregatePKs(pubkeys)
|
||||
// return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey)
|
||||
func NextSyncCommittee(ctx context.Context, s state.BeaconStateAltair) (*ethpb.SyncCommittee, error) {
|
||||
indices, err := NextSyncCommitteeIndices(ctx, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubkeys := make([][]byte, len(indices))
|
||||
for i, index := range indices {
|
||||
p := s.PubkeyAtIndex(index)
|
||||
pubkeys[i] = p[:]
|
||||
}
|
||||
aggregated, err := bls.AggregatePublicKeys(pubkeys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: aggregated.Marshal(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NextSyncCommitteeIndices returns the next sync committee indices for a given state.
|
||||
//
|
||||
// Spec code:
|
||||
// def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]:
|
||||
// """
|
||||
// Return the sync committee indices, with possible duplicates, for the next sync committee.
|
||||
// """
|
||||
// epoch = Epoch(get_current_epoch(state) + 1)
|
||||
//
|
||||
// MAX_RANDOM_BYTE = 2**8 - 1
|
||||
// active_validator_indices = get_active_validator_indices(state, epoch)
|
||||
// active_validator_count = uint64(len(active_validator_indices))
|
||||
// seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
|
||||
// i = 0
|
||||
// sync_committee_indices: List[ValidatorIndex] = []
|
||||
// while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
|
||||
// shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed)
|
||||
// candidate_index = active_validator_indices[shuffled_index]
|
||||
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||
// effective_balance = state.validators[candidate_index].effective_balance
|
||||
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||
// sync_committee_indices.append(candidate_index)
|
||||
// i += 1
|
||||
// return sync_committee_indices
|
||||
func NextSyncCommitteeIndices(ctx context.Context, s state.BeaconStateAltair) ([]types.ValidatorIndex, error) {
|
||||
epoch := helpers.NextEpoch(s)
|
||||
indices, err := helpers.ActiveValidatorIndices(s, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
seed, err := helpers.Seed(s, epoch, params.BeaconConfig().DomainSyncCommittee)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
count := uint64(len(indices))
|
||||
cfg := params.BeaconConfig()
|
||||
syncCommitteeSize := cfg.SyncCommitteeSize
|
||||
cIndices := make([]types.ValidatorIndex, 0, syncCommitteeSize)
|
||||
hashFunc := hashutil.CustomSHA256Hasher()
|
||||
|
||||
for i := types.ValidatorIndex(0); uint64(len(cIndices)) < params.BeaconConfig().SyncCommitteeSize; i++ {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
sIndex, err := helpers.ComputeShuffledIndex(i.Mod(count), count, seed, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b := append(seed[:], bytesutil.Bytes8(uint64(i.Div(32)))...)
|
||||
randomByte := hashFunc(b)[i%32]
|
||||
cIndex := indices[sIndex]
|
||||
v, err := s.ValidatorAtIndexReadOnly(cIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
effectiveBal := v.EffectiveBalance()
|
||||
if effectiveBal*maxRandomByte >= cfg.MaxEffectiveBalance*uint64(randomByte) {
|
||||
cIndices = append(cIndices, cIndex)
|
||||
}
|
||||
}
|
||||
|
||||
return cIndices, nil
|
||||
}
|
||||
|
||||
// SyncSubCommitteePubkeys returns the pubkeys participating in a sync subcommittee.
|
||||
//
|
||||
// def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64) -> Sequence[BLSPubkey]:
|
||||
// # Committees assigned to `slot` sign for `slot - 1`
|
||||
// # This creates the exceptional logic below when transitioning between sync committee periods
|
||||
// next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
|
||||
// if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
|
||||
// sync_committee = state.current_sync_committee
|
||||
// else:
|
||||
// sync_committee = state.next_sync_committee
|
||||
//
|
||||
// # Return pubkeys for the subcommittee index
|
||||
// sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
|
||||
// i = subcommittee_index * sync_subcommittee_size
|
||||
// return sync_committee.pubkeys[i:i + sync_subcommittee_size]
|
||||
func SyncSubCommitteePubkeys(syncCommittee *ethpb.SyncCommittee, subComIdx types.CommitteeIndex) ([][]byte, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
subCommSize := cfg.SyncCommitteeSize / cfg.SyncCommitteeSubnetCount
|
||||
i := uint64(subComIdx) * subCommSize
|
||||
endOfSubCom := i + subCommSize
|
||||
pubkeyLen := uint64(len(syncCommittee.Pubkeys))
|
||||
if endOfSubCom > pubkeyLen {
|
||||
return nil, errors.Errorf("end index is larger than array length: %d > %d", endOfSubCom, pubkeyLen)
|
||||
}
|
||||
return syncCommittee.Pubkeys[i:endOfSubCom], nil
|
||||
}
|
||||
|
||||
// IsSyncCommitteeAggregator checks whether the provided signature is for a valid
|
||||
// aggregator.
|
||||
//
|
||||
// def is_sync_committee_aggregator(signature: BLSSignature) -> bool:
|
||||
// modulo = max(1, SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT // TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE)
|
||||
// return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0
|
||||
func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
|
||||
if len(sig) != params.BeaconConfig().BLSSignatureLength {
|
||||
return false, errors.New("incorrect sig length")
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
modulo := mathutil.Max(1, cfg.SyncCommitteeSize/cfg.SyncCommitteeSubnetCount/cfg.TargetAggregatorsPerSyncSubcommittee)
|
||||
hashedSig := hashutil.Hash(sig)
|
||||
return bytesutil.FromBytes8(hashedSig[:8])%modulo == 0, nil
|
||||
}
|
||||
|
||||
// Validate Sync Message to ensure that the provided slot is valid.
|
||||
func ValidateSyncMessageTime(slot types.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
|
||||
if err := helpers.ValidateSlotClock(slot, uint64(genesisTime.Unix())); err != nil {
|
||||
return err
|
||||
}
|
||||
messageTime, err := helpers.SlotToTime(uint64(genesisTime.Unix()), slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentSlot := helpers.SlotsSince(genesisTime)
|
||||
slotStartTime, err := helpers.SlotToTime(uint64(genesisTime.Unix()), currentSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lowestSlotBound := slotStartTime.Add(-clockDisparity)
|
||||
currentLowerBound := time.Now().Add(-clockDisparity)
|
||||
// In the event the Slot's start time, is before the
|
||||
// current allowable bound, we set the slot's start
|
||||
// time as the bound.
|
||||
if slotStartTime.Before(currentLowerBound) {
|
||||
lowestSlotBound = slotStartTime
|
||||
}
|
||||
|
||||
lowerBound := lowestSlotBound
|
||||
upperBound := time.Now().Add(clockDisparity)
|
||||
// Verify sync message slot is within the time range.
|
||||
if messageTime.Before(lowerBound) || messageTime.After(upperBound) {
|
||||
return fmt.Errorf(
|
||||
"sync message slot %d not within allowable range of %d to %d (current slot)",
|
||||
slot,
|
||||
lowerBound.Unix(),
|
||||
upperBound.Unix(),
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
402
beacon-chain/core/altair/sync_committee_test.go
Normal file
402
beacon-chain/core/altair/sync_committee_test.go
Normal file
@@ -0,0 +1,402 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
)
|
||||
|
||||
func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
getState := func(t *testing.T, count uint64) *stateAltair.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
}
|
||||
}
|
||||
state, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return state
|
||||
}
|
||||
|
||||
type args struct {
|
||||
state *stateAltair.BeaconState
|
||||
epoch types.Epoch
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "nil state",
|
||||
args: args{
|
||||
state: nil,
|
||||
},
|
||||
wantErr: true,
|
||||
errString: "nil inner state",
|
||||
},
|
||||
{
|
||||
name: "genesis validator count, epoch 0",
|
||||
args: args{
|
||||
state: getState(t, params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
epoch: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "genesis validator count, epoch 100",
|
||||
args: args{
|
||||
state: getState(t, params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
epoch: 100,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "less than optimal validator count, epoch 100",
|
||||
args: args{
|
||||
state: getState(t, params.BeaconConfig().MaxValidatorsPerCommittee),
|
||||
epoch: 100,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
got, err := altair.NextSyncCommitteeIndices(context.Background(), tt.args.state)
|
||||
if tt.wantErr {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(params.BeaconConfig().SyncCommitteeSize), len(got))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
getState := func(t *testing.T, count uint64) *stateAltair.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
}
|
||||
}
|
||||
state, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return state
|
||||
}
|
||||
|
||||
state := getState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
got1, err := altair.NextSyncCommitteeIndices(context.Background(), state)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
got2, err := altair.NextSyncCommitteeIndices(context.Background(), state)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, got1, got2)
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch*types.Slot(params.BeaconConfig().EpochsPerSyncCommitteePeriod)))
|
||||
got2, err = altair.NextSyncCommitteeIndices(context.Background(), state)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, got1, got2)
|
||||
require.NoError(t, state.SetSlot(params.BeaconConfig().SlotsPerEpoch*types.Slot(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)))
|
||||
got2, err = altair.NextSyncCommitteeIndices(context.Background(), state)
|
||||
require.NoError(t, err)
|
||||
require.DeepNotEqual(t, got1, got2)
|
||||
}
|
||||
|
||||
func TestSyncCommittee_CanGet(t *testing.T) {
|
||||
getState := func(t *testing.T, count uint64) *stateAltair.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
PublicKey: blsKey.PublicKey().Marshal(),
|
||||
}
|
||||
}
|
||||
state, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return state
|
||||
}
|
||||
|
||||
type args struct {
|
||||
state *stateAltair.BeaconState
|
||||
epoch types.Epoch
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "nil state",
|
||||
args: args{
|
||||
state: nil,
|
||||
},
|
||||
wantErr: true,
|
||||
errString: "nil inner state",
|
||||
},
|
||||
{
|
||||
name: "genesis validator count, epoch 0",
|
||||
args: args{
|
||||
state: getState(t, params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
epoch: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "genesis validator count, epoch 100",
|
||||
args: args{
|
||||
state: getState(t, params.BeaconConfig().MinGenesisActiveValidatorCount),
|
||||
epoch: 100,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "less than optimal validator count, epoch 100",
|
||||
args: args{
|
||||
state: getState(t, params.BeaconConfig().MaxValidatorsPerCommittee),
|
||||
epoch: 100,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
if !tt.wantErr {
|
||||
require.NoError(t, tt.args.state.SetSlot(types.Slot(tt.args.epoch)*params.BeaconConfig().SlotsPerEpoch))
|
||||
}
|
||||
got, err := altair.NextSyncCommittee(context.Background(), tt.args.state)
|
||||
if tt.wantErr {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int(params.BeaconConfig().SyncCommitteeSize), len(got.Pubkeys))
|
||||
require.Equal(t, params.BeaconConfig().BLSPubkeyLength, len(got.AggregatePubkey))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateNilSyncContribution(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s *ethpb.SignedContributionAndProof
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil object",
|
||||
s: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "nil message",
|
||||
s: ðpb.SignedContributionAndProof{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "nil contribution",
|
||||
s: ðpb.SignedContributionAndProof{Message: ðpb.ContributionAndProof{}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "nil bitfield",
|
||||
s: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
Contribution: ðpb.SyncCommitteeContribution{},
|
||||
}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "non nil sync contribution",
|
||||
s: ðpb.SignedContributionAndProof{
|
||||
Message: ðpb.ContributionAndProof{
|
||||
Contribution: ðpb.SyncCommitteeContribution{
|
||||
AggregationBits: []byte{},
|
||||
},
|
||||
}},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if err := altair.ValidateNilSyncContribution(tt.s); (err != nil) != tt.wantErr {
|
||||
t.Errorf("ValidateNilSyncContribution() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncSubCommitteePubkeys_CanGet(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
state := getState(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
com, err := altair.NextSyncCommittee(context.Background(), state)
|
||||
require.NoError(t, err)
|
||||
sub, err := altair.SyncSubCommitteePubkeys(com, 0)
|
||||
require.NoError(t, err)
|
||||
subCommSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
require.Equal(t, int(subCommSize), len(sub))
|
||||
require.DeepSSZEqual(t, com.Pubkeys[0:subCommSize], sub)
|
||||
|
||||
sub, err = altair.SyncSubCommitteePubkeys(com, 1)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, com.Pubkeys[subCommSize:2*subCommSize], sub)
|
||||
|
||||
sub, err = altair.SyncSubCommitteePubkeys(com, 2)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, com.Pubkeys[2*subCommSize:3*subCommSize], sub)
|
||||
|
||||
sub, err = altair.SyncSubCommitteePubkeys(com, 3)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, com.Pubkeys[3*subCommSize:], sub)
|
||||
|
||||
}
|
||||
|
||||
func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
if params.BeaconNetworkConfig().MaximumGossipClockDisparity < 200*time.Millisecond {
|
||||
t.Fatal("This test expects the maximum clock disparity to be at least 200ms")
|
||||
}
|
||||
|
||||
type args struct {
|
||||
syncMessageSlot types.Slot
|
||||
genesisTime time.Time
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "sync_message.slot == current_slot",
|
||||
args: args{
|
||||
syncMessageSlot: 15,
|
||||
genesisTime: timeutils.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot == current_slot, received in middle of slot",
|
||||
args: args{
|
||||
syncMessageSlot: 15,
|
||||
genesisTime: timeutils.Now().Add(
|
||||
-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
|
||||
).Add(-(time.Duration(params.BeaconConfig().SecondsPerSlot/2) * time.Second)),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot == current_slot, received 200ms early",
|
||||
args: args{
|
||||
syncMessageSlot: 16,
|
||||
genesisTime: timeutils.Now().Add(
|
||||
-16 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second,
|
||||
).Add(-200 * time.Millisecond),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot > current_slot",
|
||||
args: args{
|
||||
syncMessageSlot: 16,
|
||||
genesisTime: timeutils.Now().Add(-(15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)),
|
||||
},
|
||||
wantedErr: "sync message slot 16 not within allowable range of",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot == current_slot+CLOCK_DISPARITY",
|
||||
args: args{
|
||||
syncMessageSlot: 100,
|
||||
genesisTime: timeutils.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second - params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
|
||||
},
|
||||
wantedErr: "",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot == current_slot+CLOCK_DISPARITY+200ms",
|
||||
args: args{
|
||||
syncMessageSlot: 100,
|
||||
genesisTime: timeutils.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second - params.BeaconNetworkConfig().MaximumGossipClockDisparity - 200*time.Millisecond)),
|
||||
},
|
||||
wantedErr: "sync message slot 100 not within allowable range of",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot == current_slot-CLOCK_DISPARITY",
|
||||
args: args{
|
||||
syncMessageSlot: 100,
|
||||
genesisTime: timeutils.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
|
||||
},
|
||||
wantedErr: "",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot > current_slot+CLOCK_DISPARITY",
|
||||
args: args{
|
||||
syncMessageSlot: 101,
|
||||
genesisTime: timeutils.Now().Add(-(100*time.Duration(params.BeaconConfig().SecondsPerSlot)*time.Second + params.BeaconNetworkConfig().MaximumGossipClockDisparity)),
|
||||
},
|
||||
wantedErr: "sync message slot 101 not within allowable range of",
|
||||
},
|
||||
{
|
||||
name: "sync_message.slot is well beyond current slot",
|
||||
args: args{
|
||||
syncMessageSlot: 1 << 32,
|
||||
genesisTime: timeutils.Now().Add(-15 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second),
|
||||
},
|
||||
wantedErr: "which exceeds max allowed value relative to the local clock",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := altair.ValidateSyncMessageTime(tt.args.syncMessageSlot, tt.args.genesisTime,
|
||||
params.BeaconNetworkConfig().MaximumGossipClockDisparity)
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getState(t *testing.T, count uint64) *stateAltair.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
PublicKey: blsKey.PublicKey().Marshal(),
|
||||
}
|
||||
}
|
||||
state, err := stateAltair.InitializeFromProto(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return state
|
||||
}
|
||||
110
beacon-chain/core/altair/transition.go
Normal file
110
beacon-chain/core/altair/transition.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
e "github.com/prysmaticlabs/prysm/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.
|
||||
// It's optimized by pre computing validator attested info and epoch total/attested balances upfront.
|
||||
//
|
||||
// Spec code:
|
||||
// def process_epoch(state: BeaconState) -> None:
|
||||
// process_justification_and_finalization(state) # [Modified in Altair]
|
||||
// process_inactivity_updates(state) # [New in Altair]
|
||||
// process_rewards_and_penalties(state) # [Modified in Altair]
|
||||
// process_registry_updates(state)
|
||||
// process_slashings(state) # [Modified in Altair]
|
||||
// process_eth1_data_reset(state)
|
||||
// process_effective_balance_updates(state)
|
||||
// process_slashings_reset(state)
|
||||
// process_randao_mixes_reset(state)
|
||||
// process_historical_roots_update(state)
|
||||
// process_participation_flag_updates(state) # [New in Altair]
|
||||
// process_sync_committee_updates(state) # [New in Altair]
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconStateAltair) (state.BeaconStateAltair, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "altair.ProcessEpoch")
|
||||
defer span.End()
|
||||
|
||||
if state == nil || state.IsNil() {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
vp, bp, err := InitializeEpochValidators(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
vp, bp, err = ProcessEpochParticipation(ctx, state, bp, vp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
state, err = precompute.ProcessJustificationAndFinalizationPreCompute(state, bp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process justification")
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, vp, err = ProcessInactivityScores(ctx, state, vp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process inactivity updates")
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, err = ProcessRewardsAndPenaltiesPrecompute(state, bp, vp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process rewards and penalties")
|
||||
}
|
||||
|
||||
state, err = e.ProcessRegistryUpdates(state)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process registry updates")
|
||||
}
|
||||
|
||||
// Modified in Altair.
|
||||
state, err = ProcessSlashings(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
state, err = e.ProcessEth1DataReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = e.ProcessEffectiveBalanceUpdates(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = e.ProcessSlashingsReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = e.ProcessRandaoMixesReset(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = e.ProcessHistoricalRootsUpdate(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, err = ProcessParticipationFlagUpdates(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// New in Altair.
|
||||
state, err = ProcessSyncCommitteeUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
34
beacon-chain/core/altair/transition_test.go
Normal file
34
beacon-chain/core/altair/transition_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
||||
stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestProcessEpoch_CanProcess(t *testing.T) {
|
||||
epoch := types.Epoch(1)
|
||||
slashing := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
|
||||
base := ðpb.BeaconStateAltair{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epoch)) + 1,
|
||||
BlockRoots: make([][]byte, 128),
|
||||
Slashings: slashing,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
JustificationBits: bitfield.Bitvector4{0x00},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
}
|
||||
s, err := stateAltair.InitializeFromProto(base)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.SetValidators([]*ethpb.Validator{}))
|
||||
newState, err := altair.ProcessEpoch(context.Background(), s)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), newState.Slashings()[2], "Unexpected slashed balance")
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user