mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-14 07:48:08 -05:00
Compare commits
966 Commits
moveCoreMe
...
replaceWit
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8572352e1 | ||
|
|
1848d76796 | ||
|
|
b4f1fea029 | ||
|
|
f1b88d005d | ||
|
|
ee612d958a | ||
|
|
09e22538f9 | ||
|
|
3b9e974a45 | ||
|
|
ad749a40b6 | ||
|
|
9b13454457 | ||
|
|
b9917807d8 | ||
|
|
e5c9387cd9 | ||
|
|
2c3b3b802a | ||
|
|
3ef3e1d13b | ||
|
|
5c00fcb84f | ||
|
|
aef22bf54e | ||
|
|
f6764fe62b | ||
|
|
07db0dc448 | ||
|
|
4b4e213a24 | ||
|
|
7d9f36985e | ||
|
|
98f8ca4e34 | ||
|
|
535b38395e | ||
|
|
4bd7e8aefd | ||
|
|
6f383f272a | ||
|
|
dbb8279fe6 | ||
|
|
b6625554dd | ||
|
|
bd833e1c12 | ||
|
|
918129cf36 | ||
|
|
5b8084b829 | ||
|
|
83416f31a5 | ||
|
|
d382abe198 | ||
|
|
8481a3e6ec | ||
|
|
f9f40975c5 | ||
|
|
b56f3e70df | ||
|
|
ce0616b88e | ||
|
|
7dffee1c66 | ||
|
|
cec631d760 | ||
|
|
ba9a744552 | ||
|
|
f962d13407 | ||
|
|
34b6c8f333 | ||
|
|
73fea51398 | ||
|
|
fa7a2bd445 | ||
|
|
3894d60282 | ||
|
|
328e6fb074 | ||
|
|
9e3598334d | ||
|
|
ebccdfadb5 | ||
|
|
efdda168c5 | ||
|
|
08d6eccfb3 | ||
|
|
97a32e1b72 | ||
|
|
b7ef762701 | ||
|
|
c9e333723e | ||
|
|
300a827027 | ||
|
|
6374cc0118 | ||
|
|
9f886da1de | ||
|
|
7e7a2a2959 | ||
|
|
b0bf402469 | ||
|
|
a090751c8c | ||
|
|
4c916403e9 | ||
|
|
b6181f8d1a | ||
|
|
00001c8628 | ||
|
|
8428a79971 | ||
|
|
03f63f294b | ||
|
|
f4681fde19 | ||
|
|
0c7292b85b | ||
|
|
10b438e2c8 | ||
|
|
e2386cfb11 | ||
|
|
898cb0b512 | ||
|
|
6944d22ce3 | ||
|
|
1eb00866ea | ||
|
|
4862d57b13 | ||
|
|
191b0c4652 | ||
|
|
c09977d8a8 | ||
|
|
52d72c989e | ||
|
|
9f7711e74a | ||
|
|
f376427add | ||
|
|
ff1b03ab13 | ||
|
|
5fdd4e9148 | ||
|
|
80e26143eb | ||
|
|
75338dd83d | ||
|
|
99eebe9bac | ||
|
|
6b1efff4e8 | ||
|
|
763e9e3361 | ||
|
|
37182168e3 | ||
|
|
0325741318 | ||
|
|
150e8aa14d | ||
|
|
f4307a902c | ||
|
|
d257ef1742 | ||
|
|
aad7aa79d4 | ||
|
|
2eb2f87913 | ||
|
|
9214364c5e | ||
|
|
fb65421678 | ||
|
|
35e3eeddf9 | ||
|
|
2618a114e5 | ||
|
|
aac47640b4 | ||
|
|
17cfc60bdd | ||
|
|
8d001d49d4 | ||
|
|
5ecebbe15a | ||
|
|
5ae7e17ada | ||
|
|
17488028a6 | ||
|
|
163f65f8a3 | ||
|
|
6ebe5ea8b2 | ||
|
|
5ea0417df2 | ||
|
|
61fed336e1 | ||
|
|
fed6a351cc | ||
|
|
98949d8075 | ||
|
|
5cbbd26df4 | ||
|
|
83ccb15e66 | ||
|
|
f5bfb8e9eb | ||
|
|
b00f7f4592 | ||
|
|
191e603e28 | ||
|
|
76c729f9fa | ||
|
|
797cc360c7 | ||
|
|
79da486508 | ||
|
|
5bb1491d8f | ||
|
|
c0ee781638 | ||
|
|
4d5827c527 | ||
|
|
88f03967dd | ||
|
|
fb65287ba2 | ||
|
|
0cb46eb29a | ||
|
|
abe9e7fa7e | ||
|
|
3536669a70 | ||
|
|
7c49277e83 | ||
|
|
6480d5a0fd | ||
|
|
d1c45b44be | ||
|
|
2a1c626707 | ||
|
|
67595d576c | ||
|
|
d17996f8b0 | ||
|
|
7d82a0510f | ||
|
|
3072b17329 | ||
|
|
e58b66f76d | ||
|
|
a937d78711 | ||
|
|
5243ca7d64 | ||
|
|
04b0a84de1 | ||
|
|
5beb5e8912 | ||
|
|
99997fa2e3 | ||
|
|
5ea6e32d6c | ||
|
|
ab407809f0 | ||
|
|
4d28d69fd9 | ||
|
|
3802761088 | ||
|
|
2669c93375 | ||
|
|
ce32453c7b | ||
|
|
ee8cb8bd6f | ||
|
|
be4ef54482 | ||
|
|
0c025ab719 | ||
|
|
b180a7de81 | ||
|
|
5c234c8c68 | ||
|
|
f92d492e33 | ||
|
|
a926028e45 | ||
|
|
49f0c44dfe | ||
|
|
8aec170f9b | ||
|
|
cc764c346b | ||
|
|
7d5d30ac94 | ||
|
|
f6eb42b761 | ||
|
|
39fe29d8f4 | ||
|
|
81fbfceea8 | ||
|
|
a0ff5ff792 | ||
|
|
4528ea8d0d | ||
|
|
00b9e484e5 | ||
|
|
5eaa152589 | ||
|
|
6d3ff65635 | ||
|
|
83a294c1a5 | ||
|
|
753e285fb6 | ||
|
|
525d3b05a6 | ||
|
|
4f38ba38b7 | ||
|
|
eb0b5a6146 | ||
|
|
4356cbc352 | ||
|
|
0893821e35 | ||
|
|
c3346fefa7 | ||
|
|
d639a26bbe | ||
|
|
bb95d951cc | ||
|
|
78d49fda13 | ||
|
|
3aaba7c065 | ||
|
|
b8a1bcdfe3 | ||
|
|
93514de00f | ||
|
|
86a883aa19 | ||
|
|
c000e8fde5 | ||
|
|
75e5887f07 | ||
|
|
4ca3c5b058 | ||
|
|
25d06d41be | ||
|
|
0a87210514 | ||
|
|
196798eacc | ||
|
|
17fe935343 | ||
|
|
ac4483417d | ||
|
|
0d3fb0a32b | ||
|
|
3d337b07e1 | ||
|
|
11b90e1f63 | ||
|
|
3c73bac798 | ||
|
|
91fee5db17 | ||
|
|
155b0c161e | ||
|
|
a7010d817d | ||
|
|
c0dd233a1c | ||
|
|
c391fad258 | ||
|
|
e92b546a36 | ||
|
|
765345ac3a | ||
|
|
ec13d52f03 | ||
|
|
08ebc99bc3 | ||
|
|
8918e8c274 | ||
|
|
0e4185b40c | ||
|
|
383edb3553 | ||
|
|
40589905bc | ||
|
|
ae76240f83 | ||
|
|
096cf7b8c1 | ||
|
|
7c3027801b | ||
|
|
8f6d5ff075 | ||
|
|
c379c9ea47 | ||
|
|
75e8f85ba5 | ||
|
|
8d82ca08ab | ||
|
|
29f645f0cc | ||
|
|
15016555f5 | ||
|
|
98d2d5f324 | ||
|
|
a4579398cb | ||
|
|
094ff877f6 | ||
|
|
9ccfe17227 | ||
|
|
bf25b34ac1 | ||
|
|
da69af9b6e | ||
|
|
3c8f5deb66 | ||
|
|
dc5cf88243 | ||
|
|
0c52064d90 | ||
|
|
032f9b7450 | ||
|
|
166f7119ec | ||
|
|
b687d29d02 | ||
|
|
2839f2c124 | ||
|
|
3d0ecdff3f | ||
|
|
481d24cb1b | ||
|
|
7d7c91c17f | ||
|
|
a6dd561b9d | ||
|
|
c7f0a94b19 | ||
|
|
f49720209e | ||
|
|
2d03c233b8 | ||
|
|
bc643f8872 | ||
|
|
fa4c9beb13 | ||
|
|
449d767294 | ||
|
|
93298dfc56 | ||
|
|
791110f795 | ||
|
|
63186c8b0f | ||
|
|
e136e10ebb | ||
|
|
81b29ea2d8 | ||
|
|
71b4468be6 | ||
|
|
76ed634f73 | ||
|
|
5092738be6 | ||
|
|
d4f3257f60 | ||
|
|
e1f7c52e6b | ||
|
|
0d6e8718fd | ||
|
|
4f6cb3209d | ||
|
|
dc4440abe7 | ||
|
|
23c4cc0249 | ||
|
|
9e15316351 | ||
|
|
0d01debe9a | ||
|
|
1b11a9672a | ||
|
|
5e89dac9f0 | ||
|
|
2579242669 | ||
|
|
529d48d0c5 | ||
|
|
b8bbeae740 | ||
|
|
59cb9f0dd7 | ||
|
|
b10e59a144 | ||
|
|
c070283bf1 | ||
|
|
3f67818a65 | ||
|
|
847835534d | ||
|
|
c2194c57b7 | ||
|
|
bd83018e76 | ||
|
|
39114e6817 | ||
|
|
67a9701e06 | ||
|
|
5dc7741b30 | ||
|
|
53a135adbd | ||
|
|
950d0c0282 | ||
|
|
e8c25effb0 | ||
|
|
cc454bb42c | ||
|
|
9529c73ff1 | ||
|
|
eca129d8ff | ||
|
|
53a4939da4 | ||
|
|
efc1e06c6b | ||
|
|
07210d7360 | ||
|
|
9136d30121 | ||
|
|
56907bb2c6 | ||
|
|
77d3ccb9ad | ||
|
|
a296a0c783 | ||
|
|
c6338e3a31 | ||
|
|
0a4b0a68d3 | ||
|
|
f9dd462338 | ||
|
|
943eb93a57 | ||
|
|
9619170df1 | ||
|
|
4da0abace7 | ||
|
|
d919f800e3 | ||
|
|
38f095d556 | ||
|
|
04010d45c8 | ||
|
|
1a048a2f2a | ||
|
|
3df2dedbb2 | ||
|
|
2fee906d25 | ||
|
|
9f44d6e452 | ||
|
|
55f311eb73 | ||
|
|
0f90bacac9 | ||
|
|
30974039f3 | ||
|
|
3392ecb3e1 | ||
|
|
fa2b64f702 | ||
|
|
6f5e35f08a | ||
|
|
79d6ce45ad | ||
|
|
73cd7df679 | ||
|
|
d084d5a979 | ||
|
|
db6b1c15c4 | ||
|
|
7c9bff489e | ||
|
|
1fca73d761 | ||
|
|
fbafbdd62c | ||
|
|
75d98cf9af | ||
|
|
96401e734e | ||
|
|
5480d607ac | ||
|
|
38f0a81526 | ||
|
|
ad680d3417 | ||
|
|
047cae5e8b | ||
|
|
179252faea | ||
|
|
f93b82ee89 | ||
|
|
508e1ad005 | ||
|
|
1b2d917389 | ||
|
|
eb6b811071 | ||
|
|
cf71dbdf97 | ||
|
|
9328e9af1f | ||
|
|
4762fd71de | ||
|
|
769daffb73 | ||
|
|
16e6e0de6c | ||
|
|
396fc3dc7e | ||
|
|
7fa3ebfaa8 | ||
|
|
81b9eceb50 | ||
|
|
505ff6ea3d | ||
|
|
2b5125c7bc | ||
|
|
1e3a55c6a6 | ||
|
|
116f3ac265 | ||
|
|
bbe003720c | ||
|
|
e957edcb12 | ||
|
|
684022fa69 | ||
|
|
a7a64632b1 | ||
|
|
c89ab764e3 | ||
|
|
375a76d6c9 | ||
|
|
bad7c6a94e | ||
|
|
bdc3a6ef11 | ||
|
|
4c9a0bf772 | ||
|
|
1c27b21b5a | ||
|
|
c1f00923c1 | ||
|
|
700f5fee8c | ||
|
|
66b6177ed3 | ||
|
|
0b0373f2fd | ||
|
|
b481c91453 | ||
|
|
e1408deb40 | ||
|
|
51601716dc | ||
|
|
0e77a89208 | ||
|
|
6715f10ffc | ||
|
|
2906196786 | ||
|
|
ff82d9ee8c | ||
|
|
42450fb24e | ||
|
|
b6060d5a49 | ||
|
|
cf6617933d | ||
|
|
4e28192541 | ||
|
|
4aa075abac | ||
|
|
c1563e40bd | ||
|
|
dac555a57c | ||
|
|
4d549f572c | ||
|
|
83f48350b2 | ||
|
|
77a9dca9eb | ||
|
|
9f1c6b2d18 | ||
|
|
0c997ede0f | ||
|
|
2dc56878cd | ||
|
|
d916827fb9 | ||
|
|
e43152102e | ||
|
|
90d5f6097c | ||
|
|
ce125b763c | ||
|
|
3ff5895548 | ||
|
|
4eb4cd9756 | ||
|
|
81b049ea02 | ||
|
|
a2fc57cb03 | ||
|
|
b63f649d89 | ||
|
|
e627d3c6a4 | ||
|
|
856102cba8 | ||
|
|
8ff6f7768f | ||
|
|
295cc5e4f0 | ||
|
|
8a2c7e4269 | ||
|
|
cb02a6897d | ||
|
|
e843cafe7d | ||
|
|
1c5b0596c7 | ||
|
|
159228b34f | ||
|
|
1fbb3f3e51 | ||
|
|
9c21809c50 | ||
|
|
62ffed7861 | ||
|
|
bdf063819c | ||
|
|
9efc89f993 | ||
|
|
5ed52b1e44 | ||
|
|
2e49fdb3d2 | ||
|
|
a01d08b857 | ||
|
|
fbe0672fc4 | ||
|
|
a15ec49844 | ||
|
|
a268255ab1 | ||
|
|
043079dafe | ||
|
|
3fcdd58872 | ||
|
|
0798092ca8 | ||
|
|
fb981d29e8 | ||
|
|
fa01ee5eba | ||
|
|
09619c388f | ||
|
|
1531313603 | ||
|
|
babfc66c5b | ||
|
|
cb9b5e8f6e | ||
|
|
145eb5a8e4 | ||
|
|
7510f584cb | ||
|
|
dbeb3ee886 | ||
|
|
ca2618110f | ||
|
|
642c399b9d | ||
|
|
6eee539425 | ||
|
|
bd82eb873c | ||
|
|
62455b7bcb | ||
|
|
3d6d0a12dd | ||
|
|
05148dbc8f | ||
|
|
19af1d2bb0 | ||
|
|
faf16f9e56 | ||
|
|
0a5c65e29c | ||
|
|
7dc966bb3b | ||
|
|
51d35f36f0 | ||
|
|
943a0556e9 | ||
|
|
f7cecf9f8a | ||
|
|
8be73a52b1 | ||
|
|
bebceb3bfa | ||
|
|
d541010bf1 | ||
|
|
8cb07e0c2b | ||
|
|
aa2bf0c9c4 | ||
|
|
e49d8f2162 | ||
|
|
c6ed4e2089 | ||
|
|
0ad902e47d | ||
|
|
4f4775f9f9 | ||
|
|
679e6bc54a | ||
|
|
c7a3cf8563 | ||
|
|
6c3b75f908 | ||
|
|
f276c5d006 | ||
|
|
29953cb734 | ||
|
|
a23a5052bc | ||
|
|
f9e0d4b13a | ||
|
|
0aaee51973 | ||
|
|
a0c0706224 | ||
|
|
a525fad0ea | ||
|
|
7ab5851c54 | ||
|
|
e231cfd59d | ||
|
|
0a41b957dc | ||
|
|
f2399e21e1 | ||
|
|
395e49972e | ||
|
|
ee099d3f03 | ||
|
|
e2ffaf983a | ||
|
|
0d4b98cd0a | ||
|
|
fe1281dc1a | ||
|
|
9761bd0753 | ||
|
|
0bcddb3009 | ||
|
|
ee9da3aded | ||
|
|
d58d3f2c57 | ||
|
|
4b033f4cc7 | ||
|
|
07d0a00f88 | ||
|
|
08d63a0cd0 | ||
|
|
f02ad8a68b | ||
|
|
466fd359a2 | ||
|
|
b5f8e69b6b | ||
|
|
cf466702df | ||
|
|
de73baa4a7 | ||
|
|
d4a3b74bc6 | ||
|
|
216cdd9361 | ||
|
|
68c3e939b8 | ||
|
|
bf7e17db8f | ||
|
|
ead9a83d8d | ||
|
|
2fef03414d | ||
|
|
7b63d5c08c | ||
|
|
af6d5e9149 | ||
|
|
d0d7021c1d | ||
|
|
4db1a02763 | ||
|
|
69d7f7f6ca | ||
|
|
4e342b8802 | ||
|
|
02566de74c | ||
|
|
90ecd23d41 | ||
|
|
4d68211ad4 | ||
|
|
19f6d3bef6 | ||
|
|
37108e6ed8 | ||
|
|
d33af46c90 | ||
|
|
53d4659654 | ||
|
|
09e117370a | ||
|
|
8ade8afb73 | ||
|
|
872021f10d | ||
|
|
bb09295072 | ||
|
|
e4b2b1ea7d | ||
|
|
ead329e610 | ||
|
|
a0c5669511 | ||
|
|
0fd5253915 | ||
|
|
86e855d499 | ||
|
|
001ae30a59 | ||
|
|
d1f0e5dd55 | ||
|
|
2142b13a41 | ||
|
|
ffac232d89 | ||
|
|
26b46301d2 | ||
|
|
fdf913aed9 | ||
|
|
9435d10652 | ||
|
|
8fa481cb93 | ||
|
|
26087d7b2d | ||
|
|
5a3a01090a | ||
|
|
55690de685 | ||
|
|
007c776d8a | ||
|
|
a15e0797e4 | ||
|
|
1572c530b5 | ||
|
|
652303522f | ||
|
|
39a7988e9e | ||
|
|
648ab9f2c2 | ||
|
|
43a0b4bb16 | ||
|
|
57d7207554 | ||
|
|
b7b5b28c5a | ||
|
|
968dc5d1e8 | ||
|
|
c24016f006 | ||
|
|
661cbc45ae | ||
|
|
4bd4d6392d | ||
|
|
5f4c26875b | ||
|
|
6aab2b2b8d | ||
|
|
b7a878d011 | ||
|
|
2ae9f1be9e | ||
|
|
98b9c9e6c9 | ||
|
|
df694aad71 | ||
|
|
e8400a0773 | ||
|
|
dcba27ffbc | ||
|
|
cafe0bd1f8 | ||
|
|
ce7f042974 | ||
|
|
974cf51a8c | ||
|
|
eb49404a75 | ||
|
|
6bea17cb54 | ||
|
|
de8e50d8b6 | ||
|
|
8049060119 | ||
|
|
c1446a35c5 | ||
|
|
f5efde5ccc | ||
|
|
cdcb289693 | ||
|
|
5dca874530 | ||
|
|
885dd2e327 | ||
|
|
0f0d480dbc | ||
|
|
1696208318 | ||
|
|
7f686a7878 | ||
|
|
2817f8e8d6 | ||
|
|
db76be2f15 | ||
|
|
ad65c841c4 | ||
|
|
7b255f5c9d | ||
|
|
157b1e373c | ||
|
|
e3df25f443 | ||
|
|
805473cb38 | ||
|
|
a54bb19c82 | ||
|
|
14908f639e | ||
|
|
77fc45304b | ||
|
|
722c7fb034 | ||
|
|
97ef8dde4d | ||
|
|
211c5c2c5c | ||
|
|
2ea66a8467 | ||
|
|
7720d98764 | ||
|
|
20e99fd1f9 | ||
|
|
61f6b27548 | ||
|
|
e1c1d0f864 | ||
|
|
7a8d780869 | ||
|
|
588674f2fd | ||
|
|
73443208a1 | ||
|
|
9c2c665e92 | ||
|
|
0f0ab1327e | ||
|
|
a27feb4cb2 | ||
|
|
41e95cb274 | ||
|
|
3b8a3c2276 | ||
|
|
5a1d260b9a | ||
|
|
53627e9709 | ||
|
|
9fefb33cdf | ||
|
|
d860daff75 | ||
|
|
6cf4f3c260 | ||
|
|
99fbf5d3d8 | ||
|
|
d57a44b973 | ||
|
|
fbe591c363 | ||
|
|
fc509cc220 | ||
|
|
78cbe4dfe1 | ||
|
|
436fcb8682 | ||
|
|
b4d2395a38 | ||
|
|
0cee01ad55 | ||
|
|
14a6fe3f01 | ||
|
|
4d90afe944 | ||
|
|
b98e9019ce | ||
|
|
45a637a3b0 | ||
|
|
17f3d66885 | ||
|
|
29390516b0 | ||
|
|
135c30e912 | ||
|
|
50f9d2bab8 | ||
|
|
8743e6a688 | ||
|
|
08e6274c14 | ||
|
|
cca9ea6989 | ||
|
|
cbc2153664 | ||
|
|
8627fe72e8 | ||
|
|
65bf3d0fa8 | ||
|
|
a5da9aedd4 | ||
|
|
e1ab034d25 | ||
|
|
84bc8f3d64 | ||
|
|
c4deb84012 | ||
|
|
488e19e428 | ||
|
|
bcaae1c440 | ||
|
|
587ba83aca | ||
|
|
091f16b26c | ||
|
|
fb9626fdd7 | ||
|
|
c638e114db | ||
|
|
b1e08307ed | ||
|
|
cac5d0f234 | ||
|
|
52d48b328f | ||
|
|
9729b2ec77 | ||
|
|
7aa3776aa6 | ||
|
|
760c71ef77 | ||
|
|
6c209db3ca | ||
|
|
0725905797 | ||
|
|
166f8a1eb6 | ||
|
|
85896e994e | ||
|
|
4a00b295ed | ||
|
|
d2b39e9697 | ||
|
|
97dc86e742 | ||
|
|
cff3b99918 | ||
|
|
be9847f23c | ||
|
|
4796827d22 | ||
|
|
57b7e0b572 | ||
|
|
b5039e9bd9 | ||
|
|
f5d792299f | ||
|
|
9ce922304f | ||
|
|
3cbb4aace4 | ||
|
|
c94095b609 | ||
|
|
ae858bbd0a | ||
|
|
30cd158ae5 | ||
|
|
2db22adfe0 | ||
|
|
161a14d256 | ||
|
|
9dee22f7ab | ||
|
|
52271cf0ba | ||
|
|
e1f56d403c | ||
|
|
a2193ee014 | ||
|
|
762b3df491 | ||
|
|
2b3025828f | ||
|
|
436792fe38 | ||
|
|
1d07bffe11 | ||
|
|
f086535c8a | ||
|
|
3a4c599a96 | ||
|
|
1c6cbc574e | ||
|
|
2317375983 | ||
|
|
6354748b12 | ||
|
|
e910471784 | ||
|
|
ab7e97ba63 | ||
|
|
e99de7726d | ||
|
|
606fdd2299 | ||
|
|
1eb6025aaa | ||
|
|
d431ceee25 | ||
|
|
4597599196 | ||
|
|
0c32eb5c03 | ||
|
|
4b1cb6fa80 | ||
|
|
9cfb823cc6 | ||
|
|
cb502ceb8c | ||
|
|
8da4d572d9 | ||
|
|
1c6fa65f7b | ||
|
|
eaa2566e90 | ||
|
|
6957f0637f | ||
|
|
01b1f15bdf | ||
|
|
b787fd877a | ||
|
|
2c89ce810d | ||
|
|
e687fff922 | ||
|
|
5e2498be7e | ||
|
|
76f958710f | ||
|
|
1775cf89c6 | ||
|
|
8fecfaee48 | ||
|
|
f089405d2f | ||
|
|
029c81a2e4 | ||
|
|
56c48b4971 | ||
|
|
20ed47a107 | ||
|
|
e30471f1a0 | ||
|
|
3b38765a2d | ||
|
|
b60e508c89 | ||
|
|
a65c670f5e | ||
|
|
4af7d8230a | ||
|
|
27733969f7 | ||
|
|
e70fe1c9fd | ||
|
|
9b3a834437 | ||
|
|
d815fa8f21 | ||
|
|
ac3079f8cd | ||
|
|
cb8f6423e0 | ||
|
|
515e7c959f | ||
|
|
82bbfce524 | ||
|
|
95430ddb57 | ||
|
|
21b7861d37 | ||
|
|
c1e7afa201 | ||
|
|
dfa400d4a1 | ||
|
|
b04c28b30c | ||
|
|
ed07359573 | ||
|
|
25d87dd27b | ||
|
|
a9ccabf6c9 | ||
|
|
2377d6d6ea | ||
|
|
100ca0ebaf | ||
|
|
49ef0ad284 | ||
|
|
e9ee6e2c9d | ||
|
|
a3cc26dd2a | ||
|
|
5ad36486b0 | ||
|
|
faa0643b16 | ||
|
|
868377318c | ||
|
|
1c180e5088 | ||
|
|
b4fb8304fe | ||
|
|
1062f5fe4d | ||
|
|
01614e7bfb | ||
|
|
8d3f474bfa | ||
|
|
ccfc09151f | ||
|
|
85ad61ea83 | ||
|
|
2728b83f6a | ||
|
|
e0eee87bf4 | ||
|
|
5c84192606 | ||
|
|
d077483577 | ||
|
|
155d60dd60 | ||
|
|
63b7376b75 | ||
|
|
8de5512908 | ||
|
|
65b5c430d1 | ||
|
|
afe9fa71f0 | ||
|
|
12b3a0048b | ||
|
|
aa73250373 | ||
|
|
9565ee12d7 | ||
|
|
7d7e6fcd62 | ||
|
|
e04a8bcfbd | ||
|
|
186fbc5f99 | ||
|
|
c79ec31c17 | ||
|
|
c64cf05879 | ||
|
|
f7a298ae8f | ||
|
|
f3997647ac | ||
|
|
9c6efe4abd | ||
|
|
9aaa42156b | ||
|
|
d72e42bbb8 | ||
|
|
6b42a0a3a1 | ||
|
|
f8ff36f534 | ||
|
|
bd9776f510 | ||
|
|
252086a0b2 | ||
|
|
6dc38ba3a9 | ||
|
|
93ebba6dca | ||
|
|
d08aa6b23f | ||
|
|
b0f1ea0451 | ||
|
|
0f19beb105 | ||
|
|
5c580d4a1c | ||
|
|
d35006b0a3 | ||
|
|
f96b47bcc7 | ||
|
|
1117118435 | ||
|
|
c2e4ecdfb5 | ||
|
|
9b4d22c48c | ||
|
|
7d5eac7458 | ||
|
|
ed1116e578 | ||
|
|
760d2fcfbc | ||
|
|
c1f2c36704 | ||
|
|
bd3dfb27f3 | ||
|
|
4e225fc667 | ||
|
|
9261da7fb1 | ||
|
|
ea93a68818 | ||
|
|
9d375969d1 | ||
|
|
1323912625 | ||
|
|
c05c18787e | ||
|
|
0ed739ac5f | ||
|
|
82ef9f1e48 | ||
|
|
e169ce9107 | ||
|
|
cef95dd5b2 | ||
|
|
a82325615b | ||
|
|
8aa6057b60 | ||
|
|
d4ab08050f | ||
|
|
0c3df40b1f | ||
|
|
2349899658 | ||
|
|
19e4cd30cf | ||
|
|
7f4c694bb3 | ||
|
|
19bc691cef | ||
|
|
a5cab4e9c8 | ||
|
|
879e310332 | ||
|
|
4b46dead2f | ||
|
|
c1f89cc4c8 | ||
|
|
881b5c8adb | ||
|
|
d17826fb9d | ||
|
|
699bfdfdb4 | ||
|
|
758d28678c | ||
|
|
616cfd3390 | ||
|
|
5005fbfe71 | ||
|
|
015cf5ed92 | ||
|
|
801d5eabe5 | ||
|
|
eb066da5c2 | ||
|
|
44ae300b04 | ||
|
|
588021f75b | ||
|
|
78c55019e6 | ||
|
|
1601972625 | ||
|
|
e5ab259ee1 | ||
|
|
9e74c3d641 | ||
|
|
59dcea81c2 | ||
|
|
9149dc2aae | ||
|
|
a7c9c76b18 | ||
|
|
5a4edf897f | ||
|
|
2d6b157eea | ||
|
|
32745b5484 | ||
|
|
bfdaf2ec5a | ||
|
|
39c343bcab | ||
|
|
de1ecf2d60 | ||
|
|
7aee67af90 | ||
|
|
9830ce43d6 | ||
|
|
63a8690140 | ||
|
|
7978a0269b | ||
|
|
021df67fdc | ||
|
|
f20e6351f5 | ||
|
|
a2e834a683 | ||
|
|
da25624b1d | ||
|
|
1a5dd879c5 | ||
|
|
65a9ede2d3 | ||
|
|
4b083c2ca9 | ||
|
|
82ae4caca8 | ||
|
|
e770f0fe1f | ||
|
|
30b8fba2ac | ||
|
|
5d94fd48ca | ||
|
|
176d763091 | ||
|
|
97dc9ebbc8 | ||
|
|
6a74dcf35b | ||
|
|
a775798d89 | ||
|
|
e2442bb0a6 | ||
|
|
2669006694 | ||
|
|
5722a5793c | ||
|
|
96aba590b5 | ||
|
|
2162ffb05f | ||
|
|
d41805a39c | ||
|
|
819632dfc5 | ||
|
|
10fffa6e7c | ||
|
|
5cda86bb93 | ||
|
|
f2dcc9a570 | ||
|
|
63354b5bb7 | ||
|
|
e48f0aef41 | ||
|
|
ab1defc5de | ||
|
|
b0e15f3c8f | ||
|
|
e01a898264 | ||
|
|
7c30533870 | ||
|
|
7654ffdcfc | ||
|
|
d6031ac386 | ||
|
|
f7d3b5c510 | ||
|
|
c0f3946f58 | ||
|
|
c33acde64e | ||
|
|
8310d22a05 | ||
|
|
3060096233 | ||
|
|
5d06c14cec | ||
|
|
57abf02e34 | ||
|
|
44218a9c5b | ||
|
|
d53f2c7661 | ||
|
|
5e53d6976e | ||
|
|
7fd2c08be3 | ||
|
|
6695e7c756 | ||
|
|
aeede2165d | ||
|
|
67a15183ca | ||
|
|
208dc80702 | ||
|
|
e80806d455 | ||
|
|
80d0a82f9b | ||
|
|
f9b3cde005 | ||
|
|
d8f9750387 | ||
|
|
5e8b9dde5b | ||
|
|
c2caff4230 | ||
|
|
b67c885995 | ||
|
|
60ed488428 | ||
|
|
2f0e2886b4 | ||
|
|
2d53ae79df | ||
|
|
ce277cb388 | ||
|
|
c9a366c36a | ||
|
|
3a957c567f | ||
|
|
77a63f871d | ||
|
|
8dd8ccc147 | ||
|
|
3c48bce3a3 | ||
|
|
0ed5007d2e | ||
|
|
65900115fc | ||
|
|
379bed9268 | ||
|
|
2dd2e74369 | ||
|
|
73237826d3 | ||
|
|
af4d0c84c8 | ||
|
|
c68f1883d6 | ||
|
|
ae1685d937 | ||
|
|
2a5f05bc29 | ||
|
|
49e5e73ec0 | ||
|
|
2ecb905ae5 | ||
|
|
d4e7da8200 | ||
|
|
4b042a7103 | ||
|
|
e59859c78f | ||
|
|
6bcc7d3a5e | ||
|
|
7b597bb130 | ||
|
|
2c7b273260 | ||
|
|
8bedaaf0a8 | ||
|
|
69350a6a80 | ||
|
|
93e8c749f8 | ||
|
|
96fecf8c57 | ||
|
|
5d29ca4984 | ||
|
|
43523c0266 | ||
|
|
8ebbde7836 | ||
|
|
44c39a0b40 | ||
|
|
f376f3fb9b | ||
|
|
8510743406 | ||
|
|
b82e2e7d40 | ||
|
|
acfafd3f0d | ||
|
|
13001cd000 | ||
|
|
5291b9d85b | ||
|
|
7747471624 | ||
|
|
31f96a05b3 | ||
|
|
dfe8e54a42 | ||
|
|
3a841a8467 | ||
|
|
7f56ac6355 | ||
|
|
9216be7d43 | ||
|
|
7c489199bf | ||
|
|
f8b4d8c57a | ||
|
|
b7463d0070 | ||
|
|
2b6e86ec1b | ||
|
|
4e604ee22b | ||
|
|
3576d2ccfe | ||
|
|
4b009ed813 | ||
|
|
7faad27f5f | ||
|
|
2d3966bf4f | ||
|
|
58d10e3ace | ||
|
|
88becdc114 | ||
|
|
20b4c60bcb | ||
|
|
0b50ab7832 | ||
|
|
6b55d33ea2 | ||
|
|
7e64a3963d | ||
|
|
55b4af9f92 | ||
|
|
546bb5ed53 | ||
|
|
29a25b3f09 | ||
|
|
56af079aea | ||
|
|
385f101b2b | ||
|
|
b3c3b207c9 | ||
|
|
e0cd10ed3c | ||
|
|
2d0fdf8b4a | ||
|
|
3a7117dcbf | ||
|
|
8888fa4bb3 | ||
|
|
f065209a3e | ||
|
|
e439f4aff6 | ||
|
|
88f8dbecc8 | ||
|
|
2dfe291cf9 | ||
|
|
a80c15f3a9 | ||
|
|
4de92bafc4 | ||
|
|
69438583e5 | ||
|
|
e81f3fed01 | ||
|
|
1b2a5fb4a5 | ||
|
|
6c878b1665 | ||
|
|
838963c9f7 | ||
|
|
7b38f8b8fc | ||
|
|
23e8e695cc | ||
|
|
ce9eaae22e | ||
|
|
7010e8dec8 | ||
|
|
9e4ba75e71 | ||
|
|
044a4ad5a3 | ||
|
|
690084cab6 | ||
|
|
88db7117d2 | ||
|
|
1faa292615 | ||
|
|
434018a4b9 | ||
|
|
54624569bf | ||
|
|
b55ddb5a34 | ||
|
|
a38de90435 | ||
|
|
d454d30f19 | ||
|
|
b04dd9fe5c | ||
|
|
8140a1a7e0 | ||
|
|
cab9917317 | ||
|
|
4c4fb9f2c0 | ||
|
|
80f4f22401 | ||
|
|
dd296cbd8a | ||
|
|
f9e3b0a3c2 | ||
|
|
a58809597e | ||
|
|
7f443e8387 | ||
|
|
18fc17c903 | ||
|
|
d7b01b9d81 | ||
|
|
7ebd9035dd | ||
|
|
578fea73d7 | ||
|
|
7fcadbe3ef | ||
|
|
fce9e6883d | ||
|
|
5ee66a4a68 | ||
|
|
f4c7fb6182 | ||
|
|
077dcdc643 | ||
|
|
1fa864cb1a | ||
|
|
6357860cc2 | ||
|
|
cc1ea81d4a | ||
|
|
dd65622441 | ||
|
|
6c39301f33 | ||
|
|
5b12f5a27d | ||
|
|
105d73d59b | ||
|
|
db687bf56d | ||
|
|
f0403afb25 | ||
|
|
3f309968d4 | ||
|
|
52acaceb3f | ||
|
|
5216402f66 |
173
.bazelrc
173
.bazelrc
@@ -1,9 +1,9 @@
|
|||||||
# Print warnings for tests with inappropriate test size or timeout.
|
# Import bazelrc presets
|
||||||
test --test_verbose_timeout_warnings
|
import %workspace%/build/bazelrc/convenience.bazelrc
|
||||||
|
import %workspace%/build/bazelrc/correctness.bazelrc
|
||||||
# Only build test targets when running bazel test //...
|
import %workspace%/build/bazelrc/cross.bazelrc
|
||||||
test --build_tests_only
|
import %workspace%/build/bazelrc/debug.bazelrc
|
||||||
test --test_output=errors
|
import %workspace%/build/bazelrc/performance.bazelrc
|
||||||
|
|
||||||
# E2E run with debug gotag
|
# E2E run with debug gotag
|
||||||
test:e2e --define gotags=debug
|
test:e2e --define gotags=debug
|
||||||
@@ -11,26 +11,10 @@ test:e2e --define gotags=debug
|
|||||||
# Clearly indicate that coverage is enabled to disable certain nogo checks.
|
# Clearly indicate that coverage is enabled to disable certain nogo checks.
|
||||||
coverage --define=coverage_enabled=1
|
coverage --define=coverage_enabled=1
|
||||||
|
|
||||||
# Fix for rules_docker. See: https://github.com/bazelbuild/rules_docker/issues/842
|
|
||||||
build --host_force_python=PY2
|
|
||||||
test --host_force_python=PY2
|
|
||||||
run --host_force_python=PY2
|
|
||||||
|
|
||||||
# Networking is blocked for tests by default, add "requires-network" tag to your test if networking
|
|
||||||
# is required within the sandbox. Network sandboxing only works on linux.
|
|
||||||
build --sandbox_default_allow_network=false
|
|
||||||
|
|
||||||
# Stamp binaries with git information
|
# Stamp binaries with git information
|
||||||
build --workspace_status_command=./hack/workspace_status.sh
|
build --workspace_status_command=./hack/workspace_status.sh
|
||||||
build --stamp
|
|
||||||
|
|
||||||
# Prevent PATH changes from rebuilding when switching from IDE to command line.
|
|
||||||
build --incompatible_strict_action_env
|
|
||||||
test --incompatible_strict_action_env
|
|
||||||
run --incompatible_strict_action_env
|
|
||||||
|
|
||||||
build --define blst_disabled=false
|
build --define blst_disabled=false
|
||||||
test --define blst_disabled=false
|
|
||||||
run --define blst_disabled=false
|
run --define blst_disabled=false
|
||||||
|
|
||||||
build:blst_disabled --define blst_disabled=true
|
build:blst_disabled --define blst_disabled=true
|
||||||
@@ -41,13 +25,14 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
|
|||||||
|
|
||||||
# Release flags
|
# Release flags
|
||||||
build:release --compilation_mode=opt
|
build:release --compilation_mode=opt
|
||||||
build:release --config=llvm
|
build:release --stamp
|
||||||
|
|
||||||
# LLVM compiler for building C/C++ dependencies.
|
# LLVM compiler for building C/C++ dependencies.
|
||||||
build:llvm --crosstool_top=@llvm_toolchain//:toolchain
|
|
||||||
build:llvm --define compiler=llvm
|
build:llvm --define compiler=llvm
|
||||||
build:llvm --copt -fno-sanitize=vptr,function
|
build:llvm --copt -fno-sanitize=vptr,function
|
||||||
build:llvm --linkopt -fno-sanitize=vptr,function
|
build:llvm --linkopt -fno-sanitize=vptr,function
|
||||||
|
# --incompatible_enable_cc_toolchain_resolution not needed after this issue is closed https://github.com/bazelbuild/bazel/issues/7260
|
||||||
|
build:llvm --incompatible_enable_cc_toolchain_resolution
|
||||||
|
|
||||||
build:asan --copt -fsanitize=address,undefined
|
build:asan --copt -fsanitize=address,undefined
|
||||||
build:asan --copt -fno-omit-frame-pointer
|
build:asan --copt -fno-omit-frame-pointer
|
||||||
@@ -71,36 +56,6 @@ build:cgo_symbolizer --define=USE_CGO_SYMBOLIZER=true
|
|||||||
build:cgo_symbolizer -c dbg
|
build:cgo_symbolizer -c dbg
|
||||||
build:cgo_symbolizer --define=gotags=cgosymbolizer_enabled
|
build:cgo_symbolizer --define=gotags=cgosymbolizer_enabled
|
||||||
|
|
||||||
# multi-arch cross-compiling toolchain configs:
|
|
||||||
-----------------------------------------------
|
|
||||||
build:cross --crosstool_top=@prysm_toolchains//:multiarch_toolchain
|
|
||||||
build:cross --host_platform=@io_bazel_rules_go//go/toolchain:linux_amd64
|
|
||||||
build:cross --host_crosstool_top=@prysm_toolchains//:hostonly_toolchain
|
|
||||||
|
|
||||||
# linux_amd64 config for cross compiler toolchain, not strictly necessary since host/exec env is amd64
|
|
||||||
build:linux_amd64 --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64_cgo
|
|
||||||
|
|
||||||
# osx_amd64 config for cross compiler toolchain
|
|
||||||
build:osx_amd64 --config=cross
|
|
||||||
build:osx_amd64 --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64_cgo
|
|
||||||
build:osx_amd64 --compiler=osxcross
|
|
||||||
|
|
||||||
# windows
|
|
||||||
build:windows_amd64 --config=cross
|
|
||||||
build:windows_amd64 --platforms=@io_bazel_rules_go//go/toolchain:windows_amd64_cgo
|
|
||||||
build:windows_amd64 --compiler=mingw-w64
|
|
||||||
|
|
||||||
# linux_arm64 conifg for cross compiler toolchain
|
|
||||||
build:linux_arm64 --config=cross
|
|
||||||
build:linux_arm64 --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64_cgo
|
|
||||||
build:linux_arm64 --copt=-funsafe-math-optimizations
|
|
||||||
build:linux_arm64 --copt=-ftree-vectorize
|
|
||||||
build:linux_arm64 --copt=-fomit-frame-pointer
|
|
||||||
build:linux_arm64 --cpu=aarch64
|
|
||||||
build:linux_arm64 --compiler=clang
|
|
||||||
build:linux_arm64 --copt=-march=armv8-a
|
|
||||||
|
|
||||||
|
|
||||||
# toolchain build debug configs
|
# toolchain build debug configs
|
||||||
#------------------------------
|
#------------------------------
|
||||||
build:debug --sandbox_debug
|
build:debug --sandbox_debug
|
||||||
@@ -108,115 +63,5 @@ build:debug --toolchain_resolution_debug
|
|||||||
build:debug --verbose_failures
|
build:debug --verbose_failures
|
||||||
build:debug -s
|
build:debug -s
|
||||||
|
|
||||||
# windows debug
|
|
||||||
build:windows_amd64_debug --config=windows_amd64
|
|
||||||
build:windows_amd64_debug --config=debug
|
|
||||||
|
|
||||||
# osx_amd64 debug config
|
|
||||||
build:osx_amd64_debug --config=debug
|
|
||||||
build:osx_amd64_debug --config=osx_amd64
|
|
||||||
|
|
||||||
# linux_arm64_debug
|
|
||||||
build:linux_arm64_debug --config=linux_arm64
|
|
||||||
build:linux_arm64_debug --config=debug
|
|
||||||
|
|
||||||
# linux_amd64_debug
|
|
||||||
build:linux_amd64_debug --config=linux_amd64
|
|
||||||
build:linux_amd64_debug --config=debug
|
|
||||||
|
|
||||||
|
|
||||||
# Docker Sandbox Configs
|
|
||||||
#-----------------------
|
|
||||||
# Note all docker sandbox configs must run from a linux x86_64 host
|
|
||||||
# build:docker-sandbox --experimental_docker_image=gcr.io/prysmaticlabs/rbe-worker:latest
|
|
||||||
build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker
|
|
||||||
build:docker-sandbox --define=EXECUTOR=remote
|
|
||||||
build:docker-sandbox --experimental_docker_verbose
|
|
||||||
build:docker-sandbox --experimental_enable_docker_sandbox
|
|
||||||
build:docker-sandbox --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
|
|
||||||
build:docker-sandbox --host_javabase=@rbe_ubuntu_clang//java:jdk
|
|
||||||
build:docker-sandbox --javabase=@rbe_ubuntu_clang//java:jdk
|
|
||||||
build:docker-sandbox --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
|
||||||
build:docker-sandbox --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
|
||||||
build:docker-sandbox --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
|
|
||||||
build:docker-sandbox --host_platform=@rbe_ubuntu_clang//config:platform
|
|
||||||
build:docker-sandbox --platforms=@rbe_ubuntu_clang//config:platform
|
|
||||||
build:docker-sandbox --extra_toolchains=@prysm_toolchains//:cc-toolchain-multiarch
|
|
||||||
|
|
||||||
# windows_amd64 docker sandbox build config
|
|
||||||
build:windows_amd64_docker --config=docker-sandbox --config=windows_amd64
|
|
||||||
build:windows_amd64_docker_debug --config=windows_amd64_docker --config=debug
|
|
||||||
|
|
||||||
# osx_amd64 docker sandbox build config
|
|
||||||
build:osx_amd64_docker --config=docker-sandbox --config=osx_amd64
|
|
||||||
build:osx_amd64_docker_debug --config=osx_amd64_docker --config=debug
|
|
||||||
|
|
||||||
# linux_arm64 docker sandbox build config
|
|
||||||
build:linux_arm64_docker --config=docker-sandbox --config=linux_arm64
|
|
||||||
build:linux_arm64_docker_debug --config=linux_arm64_docker --config=debug
|
|
||||||
|
|
||||||
# linux_amd64 docker sandbox build config
|
|
||||||
build:linux_amd64_docker --config=docker-sandbox --config=linux_amd64
|
|
||||||
build:linux_amd64_docker_debug --config=linux_amd64_docker --config=debug
|
|
||||||
|
|
||||||
|
|
||||||
# Remote Build Execution
|
|
||||||
#-----------------------
|
|
||||||
# Originally from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/bazel-2.0.0.bazelrc
|
|
||||||
#
|
|
||||||
# Depending on how many machines are in the remote execution instance, setting
|
|
||||||
# this higher can make builds faster by allowing more jobs to run in parallel.
|
|
||||||
# Setting it too high can result in jobs that timeout, however, while waiting
|
|
||||||
# for a remote machine to execute them.
|
|
||||||
build:remote --jobs=50
|
|
||||||
|
|
||||||
# Set several flags related to specifying the platform, toolchain and java
|
|
||||||
# properties.
|
|
||||||
# These flags should only be used as is for the rbe-ubuntu16-04 container
|
|
||||||
# and need to be adapted to work with other toolchain containers.
|
|
||||||
build:remote --host_javabase=@rbe_ubuntu_clang//java:jdk
|
|
||||||
build:remote --javabase=@rbe_ubuntu_clang//java:jdk
|
|
||||||
build:remote --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
|
||||||
build:remote --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
|
|
||||||
build:remote --crosstool_top=@rbe_ubuntu_clang//cc:toolchain
|
|
||||||
build:remote --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
|
|
||||||
# Platform flags:
|
|
||||||
# The toolchain container used for execution is defined in the target indicated
|
|
||||||
# by "extra_execution_platforms", "host_platform" and "platforms".
|
|
||||||
# More about platforms: https://docs.bazel.build/versions/master/platforms.html
|
|
||||||
build:remote --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain
|
|
||||||
build:remote --extra_execution_platforms=@rbe_ubuntu_clang//config:platform
|
|
||||||
build:remote --host_platform=@rbe_ubuntu_clang//config:platform
|
|
||||||
build:remote --platforms=@rbe_ubuntu_clang//config:platform
|
|
||||||
|
|
||||||
# Starting with Bazel 0.27.0 strategies do not need to be explicitly
|
|
||||||
# defined. See https://github.com/bazelbuild/bazel/issues/7480
|
|
||||||
build:remote --define=EXECUTOR=remote
|
|
||||||
|
|
||||||
# Enable remote execution so actions are performed on the remote systems.
|
|
||||||
# build:remote --remote_executor=grpcs://remotebuildexecution.googleapis.com
|
|
||||||
|
|
||||||
# Enforce stricter environment rules, which eliminates some non-hermetic
|
|
||||||
# behavior and therefore improves both the remote cache hit rate and the
|
|
||||||
# correctness and repeatability of the build.
|
|
||||||
build:remote --incompatible_strict_action_env=true
|
|
||||||
|
|
||||||
# Set a higher timeout value, just in case.
|
|
||||||
build:remote --remote_timeout=3600
|
|
||||||
|
|
||||||
# Enable authentication. This will pick up application default credentials by
|
|
||||||
# default. You can use --google_credentials=some_file.json to use a service
|
|
||||||
# account credential instead.
|
|
||||||
# build:remote --google_default_credentials=true
|
|
||||||
|
|
||||||
# Enable build without the bytes
|
|
||||||
# See: https://github.com/bazelbuild/bazel/issues/6862
|
|
||||||
build:remote --experimental_remote_download_outputs=toplevel --experimental_inmemory_jdeps_files --experimental_inmemory_dotd_files
|
|
||||||
|
|
||||||
build:remote --remote_local_fallback
|
|
||||||
|
|
||||||
# Ignore GoStdLib with remote caching
|
|
||||||
build --modify_execution_info='GoStdlib.*=+no-remote-cache'
|
|
||||||
|
|
||||||
# Set bazel gotag
|
# Set bazel gotag
|
||||||
build --define gotags=bazel
|
build --define gotags=bazel
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
5.0.0
|
6.1.0
|
||||||
|
|||||||
@@ -43,4 +43,12 @@ build --flaky_test_attempts=5
|
|||||||
|
|
||||||
# Better caching
|
# Better caching
|
||||||
build:nostamp --nostamp
|
build:nostamp --nostamp
|
||||||
build:nostamp --workspace_status_command=./hack/workspace_status_ci.sh
|
|
||||||
|
# Build metadata
|
||||||
|
build --build_metadata=ROLE=CI
|
||||||
|
build --build_metadata=REPO_URL=https://github.com/prysmaticlabs/prysm.git
|
||||||
|
build --workspace_status_command=./hack/workspace_status_ci.sh
|
||||||
|
|
||||||
|
# Buildbuddy
|
||||||
|
build --bes_results_url=https://app.buildbuddy.io/invocation/
|
||||||
|
build --bes_backend=grpcs://remote.buildbuddy.io
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ name = "go"
|
|||||||
enabled = true
|
enabled = true
|
||||||
|
|
||||||
[analyzers.meta]
|
[analyzers.meta]
|
||||||
import_paths = ["github.com/prysmaticlabs/prysm"]
|
import_paths = ["github.com/prysmaticlabs/prysm/v4"]
|
||||||
|
|
||||||
[[analyzers]]
|
[[analyzers]]
|
||||||
name = "test-coverage"
|
name = "test-coverage"
|
||||||
|
|||||||
8
.github/CODEOWNERS
vendored
8
.github/CODEOWNERS
vendored
@@ -6,11 +6,3 @@
|
|||||||
|
|
||||||
# Anyone on prylabs team can approve dependency updates.
|
# Anyone on prylabs team can approve dependency updates.
|
||||||
deps.bzl @prysmaticlabs/core-team
|
deps.bzl @prysmaticlabs/core-team
|
||||||
|
|
||||||
# Radek and Nishant are responsible for changes that can affect the native state feature.
|
|
||||||
# See https://www.notion.so/prysmaticlabs/Native-Beacon-State-Redesign-6cc9744b4ec1439bb34fa829b36a35c1
|
|
||||||
/beacon-chain/state/fieldtrie/ @rkapka @nisdas
|
|
||||||
/beacon-chain/state/v1/ @rkapka @nisdas
|
|
||||||
/beacon-chain/state/v2/ @rkapka @nisdas
|
|
||||||
/beacon-chain/state/v3/ @rkapka @nisdas
|
|
||||||
/beacon-chain/state/state-native/ @rkapka @nisdas
|
|
||||||
|
|||||||
6
.github/ISSUE_TEMPLATE/feature_request.md
vendored
6
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -16,12 +16,12 @@ Existing issues often contain information about workarounds, resolution, or prog
|
|||||||
|
|
||||||
### Description
|
### Description
|
||||||
|
|
||||||
<!-- ✍️--> A clear and concise description of the problem or missing capability...
|
<!-- ✍️ A clear and concise description of the problem or missing capability... -->
|
||||||
|
|
||||||
### Describe the solution you'd like
|
### Describe the solution you'd like
|
||||||
|
|
||||||
<!-- ✍️--> If you have a solution in mind, please describe it.
|
<!-- ✍️ If you have a solution in mind, please describe it. -->
|
||||||
|
|
||||||
### Describe alternatives you've considered
|
### Describe alternatives you've considered
|
||||||
|
|
||||||
<!-- ✍️--> Have you considered any alternative solutions or workarounds?
|
<!-- ✍️ Have you considered any alternative solutions or workarounds? -->
|
||||||
|
|||||||
5
.github/actions/gofmt/Dockerfile
vendored
5
.github/actions/gofmt/Dockerfile
vendored
@@ -1,5 +0,0 @@
|
|||||||
FROM cytopia/gofmt
|
|
||||||
|
|
||||||
COPY entrypoint.sh /entrypoint.sh
|
|
||||||
|
|
||||||
ENTRYPOINT ["/entrypoint.sh"]
|
|
||||||
12
.github/actions/gofmt/action.yml
vendored
12
.github/actions/gofmt/action.yml
vendored
@@ -1,12 +0,0 @@
|
|||||||
name: 'Gofmt checker'
|
|
||||||
description: 'Checks that all project files have been properly formatted.'
|
|
||||||
inputs:
|
|
||||||
path:
|
|
||||||
description: 'Path to check'
|
|
||||||
required: true
|
|
||||||
default: './'
|
|
||||||
runs:
|
|
||||||
using: 'docker'
|
|
||||||
image: 'Dockerfile'
|
|
||||||
args:
|
|
||||||
- ${{ inputs.path }}
|
|
||||||
15
.github/actions/gofmt/entrypoint.sh
vendored
15
.github/actions/gofmt/entrypoint.sh
vendored
@@ -1,15 +0,0 @@
|
|||||||
#!/bin/sh -l
|
|
||||||
set -e
|
|
||||||
|
|
||||||
cd $GITHUB_WORKSPACE
|
|
||||||
|
|
||||||
# Check if any files are not formatted.
|
|
||||||
nonformatted="$(gofmt -l $1 2>&1)"
|
|
||||||
|
|
||||||
# Return if `go fmt` passes.
|
|
||||||
[ -z "$nonformatted" ] && exit 0
|
|
||||||
|
|
||||||
# Notify of issues with formatting.
|
|
||||||
echo "Following files need to be properly formatted:"
|
|
||||||
echo "$nonformatted"
|
|
||||||
exit 1
|
|
||||||
36
.github/workflows/go.yml
vendored
36
.github/workflows/go.yml
vendored
@@ -18,18 +18,6 @@ jobs:
|
|||||||
id: gomodtidy
|
id: gomodtidy
|
||||||
uses: ./.github/actions/gomodtidy
|
uses: ./.github/actions/gomodtidy
|
||||||
|
|
||||||
- name: Gofmt checker
|
|
||||||
id: gofmt
|
|
||||||
uses: ./.github/actions/gofmt
|
|
||||||
with:
|
|
||||||
path: ./
|
|
||||||
|
|
||||||
- name: GoImports checker
|
|
||||||
id: goimports
|
|
||||||
uses: Jerome1337/goimports-action@v1.0.2
|
|
||||||
with:
|
|
||||||
goimports-path: ./
|
|
||||||
|
|
||||||
gosec:
|
gosec:
|
||||||
name: Gosec scan
|
name: Gosec scan
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -38,14 +26,14 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Set up Go 1.18
|
- name: Set up Go 1.20
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: 1.18
|
go-version: '1.20'
|
||||||
- name: Run Gosec Security Scanner
|
- name: Run Gosec Security Scanner
|
||||||
run: | # https://github.com/securego/gosec/issues/469
|
run: | # https://github.com/securego/gosec/issues/469
|
||||||
export PATH=$PATH:$(go env GOPATH)/bin
|
export PATH=$PATH:$(go env GOPATH)/bin
|
||||||
go install github.com/securego/gosec/v2/cmd/gosec@latest
|
go install github.com/securego/gosec/v2/cmd/gosec@v2.15.0
|
||||||
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
@@ -55,18 +43,17 @@ jobs:
|
|||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- name: Set up Go 1.18
|
- name: Set up Go 1.20
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: 1.18
|
go-version: '1.20'
|
||||||
id: go
|
id: go
|
||||||
|
|
||||||
- name: Golangci-lint
|
- name: Golangci-lint
|
||||||
uses: golangci/golangci-lint-action@v2
|
uses: golangci/golangci-lint-action@v3
|
||||||
with:
|
with:
|
||||||
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto --go=1.18
|
version: v1.52.2
|
||||||
version: v1.45.2
|
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||||
skip-go-installation: true
|
|
||||||
|
|
||||||
build:
|
build:
|
||||||
name: Build
|
name: Build
|
||||||
@@ -75,7 +62,7 @@ jobs:
|
|||||||
- name: Set up Go 1.x
|
- name: Set up Go 1.x
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: 1.18
|
go-version: '1.20'
|
||||||
id: go
|
id: go
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
@@ -88,11 +75,14 @@ jobs:
|
|||||||
- name: Build
|
- name: Build
|
||||||
# Use blst tag to allow go and bazel builds for blst.
|
# Use blst tag to allow go and bazel builds for blst.
|
||||||
run: go build -v ./...
|
run: go build -v ./...
|
||||||
|
env:
|
||||||
|
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||||
# fuzz leverage go tag based stubs at compile time.
|
# fuzz leverage go tag based stubs at compile time.
|
||||||
# Building and testing with these tags should be checked and enforced at pre-submit.
|
# Building and testing with these tags should be checked and enforced at pre-submit.
|
||||||
- name: Test for fuzzing
|
- name: Test for fuzzing
|
||||||
run: go test -tags=fuzz,develop ./... -test.run=^Fuzz
|
run: go test -tags=fuzz,develop ./... -test.run=^Fuzz
|
||||||
|
env:
|
||||||
|
CGO_CFLAGS: "-O -D__BLST_PORTABLE__"
|
||||||
|
|
||||||
# Tests run via Bazel for now...
|
# Tests run via Bazel for now...
|
||||||
# - name: Test
|
# - name: Test
|
||||||
|
|||||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -35,3 +35,9 @@ bin
|
|||||||
|
|
||||||
# p2p metaData
|
# p2p metaData
|
||||||
metaData
|
metaData
|
||||||
|
|
||||||
|
# execution API authentication
|
||||||
|
jwt.hex
|
||||||
|
|
||||||
|
# manual testing
|
||||||
|
tmp
|
||||||
|
|||||||
@@ -1,69 +1,32 @@
|
|||||||
linters-settings:
|
run:
|
||||||
govet:
|
skip-files:
|
||||||
check-shadowing: true
|
- validator/web/site_data.go
|
||||||
settings:
|
- .*_test.go
|
||||||
printf:
|
skip-dirs:
|
||||||
funcs:
|
- proto
|
||||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof
|
- tools/analyzers
|
||||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf
|
timeout: 10m
|
||||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf
|
go: '1.19'
|
||||||
- (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf
|
|
||||||
golint:
|
|
||||||
min-confidence: 0
|
|
||||||
gocyclo:
|
|
||||||
min-complexity: 10
|
|
||||||
maligned:
|
|
||||||
suggest-new: true
|
|
||||||
dupl:
|
|
||||||
threshold: 100
|
|
||||||
goconst:
|
|
||||||
min-len: 2
|
|
||||||
min-occurrences: 2
|
|
||||||
depguard:
|
|
||||||
list-type: blacklist
|
|
||||||
packages:
|
|
||||||
# logging is allowed only by logutils.Log, logrus
|
|
||||||
# is allowed to use only in logutils package
|
|
||||||
- github.com/sirupsen/logrus
|
|
||||||
misspell:
|
|
||||||
locale: US
|
|
||||||
lll:
|
|
||||||
line-length: 140
|
|
||||||
goimports:
|
|
||||||
local-prefixes: github.com/golangci/golangci-lint
|
|
||||||
gocritic:
|
|
||||||
enabled-tags:
|
|
||||||
- performance
|
|
||||||
- style
|
|
||||||
- experimental
|
|
||||||
disabled-checks:
|
|
||||||
- wrapperFunc
|
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
|
||||||
- deadcode
|
|
||||||
- goconst
|
|
||||||
- goimports
|
|
||||||
- golint
|
|
||||||
- gosec
|
|
||||||
- misspell
|
|
||||||
- structcheck
|
|
||||||
- typecheck
|
|
||||||
- unparam
|
|
||||||
- varcheck
|
|
||||||
- gofmt
|
|
||||||
- unused
|
|
||||||
disable-all: true
|
disable-all: true
|
||||||
|
enable:
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
|
- unused
|
||||||
|
- errcheck
|
||||||
|
- gosimple
|
||||||
|
- gocognit
|
||||||
|
- dupword
|
||||||
|
- nilerr
|
||||||
|
- whitespace
|
||||||
|
- misspell
|
||||||
|
|
||||||
run:
|
linters-settings:
|
||||||
skip-dirs:
|
gocognit:
|
||||||
- proto/
|
# TODO: We should target for < 50
|
||||||
- ^contracts/
|
min-complexity: 65
|
||||||
deadline: 10m
|
|
||||||
|
|
||||||
# golangci.com configuration
|
output:
|
||||||
# https://github.com/golangci/golangci/wiki/Configuration
|
print-issued-lines: true
|
||||||
service:
|
sort-results: true
|
||||||
golangci-lint-version: 1.15.0 # use the fixed version to not introduce new linters unexpectedly
|
|
||||||
prepare:
|
|
||||||
- echo "here I can run custom commands, but no preparation needed for this repo"
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ exports_files([
|
|||||||
"LICENSE.md",
|
"LICENSE.md",
|
||||||
])
|
])
|
||||||
|
|
||||||
# gazelle:prefix github.com/prysmaticlabs/prysm
|
# gazelle:prefix github.com/prysmaticlabs/prysm/v4
|
||||||
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
|
# gazelle:map_kind go_library go_library @prysm//tools/go:def.bzl
|
||||||
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
|
# gazelle:map_kind go_test go_test @prysm//tools/go:def.bzl
|
||||||
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
|
# gazelle:map_kind go_repository go_repository @prysm//tools/go:def.bzl
|
||||||
@@ -122,6 +122,7 @@ nogo(
|
|||||||
"//tools/analyzers/gocognit:go_default_library",
|
"//tools/analyzers/gocognit:go_default_library",
|
||||||
"//tools/analyzers/ineffassign:go_default_library",
|
"//tools/analyzers/ineffassign:go_default_library",
|
||||||
"//tools/analyzers/interfacechecker:go_default_library",
|
"//tools/analyzers/interfacechecker:go_default_library",
|
||||||
|
"//tools/analyzers/logruswitherror:go_default_library",
|
||||||
"//tools/analyzers/maligned:go_default_library",
|
"//tools/analyzers/maligned:go_default_library",
|
||||||
"//tools/analyzers/nop:go_default_library",
|
"//tools/analyzers/nop:go_default_library",
|
||||||
"//tools/analyzers/properpermissions:go_default_library",
|
"//tools/analyzers/properpermissions:go_default_library",
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Dependency Managagement in Prysm
|
# Dependency Management in Prysm
|
||||||
|
|
||||||
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
|
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
|
||||||
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
|
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
|
||||||
@@ -28,7 +28,7 @@ including complicated c++ dependencies.
|
|||||||
One key advantage of Bazel over vanilla `go build` is that Bazel automatically (re)builds generated
|
One key advantage of Bazel over vanilla `go build` is that Bazel automatically (re)builds generated
|
||||||
pb.go files at build time when file changes are present in any protobuf definition file or after
|
pb.go files at build time when file changes are present in any protobuf definition file or after
|
||||||
any updates to the protobuf compiler or other relevant dependencies. Vanilla go users should run
|
any updates to the protobuf compiler or other relevant dependencies. Vanilla go users should run
|
||||||
the following scripts often to ensure their generated files are up to date. Further more, Prysm
|
the following scripts often to ensure their generated files are up to date. Furthermore, Prysm
|
||||||
generates SSZ marshal related code based on defined data structures. These generated files must
|
generates SSZ marshal related code based on defined data structures. These generated files must
|
||||||
also be updated and checked in as frequently.
|
also be updated and checked in as frequently.
|
||||||
|
|
||||||
|
|||||||
@@ -2,14 +2,16 @@
|
|||||||
|
|
||||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||||
[](https://github.com/ethereum/consensus-specs/tree/v1.1.10)
|
[](https://github.com/ethereum/consensus-specs/tree/v1.3.0)
|
||||||
[](https://discord.gg/CTYGPUJ)
|
[](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.2/src/engine)
|
||||||
|
[](https://discord.gg/prysmaticlabs)
|
||||||
|
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||||
|
|
||||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/eth2/) specification, developed by [Prysmatic Labs](https://prysmaticlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||||
|
|
||||||
### Getting Started
|
### Getting Started
|
||||||
|
|
||||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/CTYGPUJ).
|
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/prysmaticlabs).
|
||||||
|
|
||||||
### Staking on Mainnet
|
### Staking on Mainnet
|
||||||
|
|
||||||
|
|||||||
128
WORKSPACE
128
WORKSPACE
@@ -4,20 +4,23 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
|||||||
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
|
||||||
|
|
||||||
http_archive(
|
http_archive(
|
||||||
name = "bazel_toolchains",
|
name = "rules_pkg",
|
||||||
sha256 = "8e0633dfb59f704594f19ae996a35650747adc621ada5e8b9fb588f808c89cb0",
|
sha256 = "8c20f74bca25d2d442b327ae26768c02cf3c99e93fad0381f32be9aab1967675",
|
||||||
strip_prefix = "bazel-toolchains-3.7.0",
|
|
||||||
urls = [
|
urls = [
|
||||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
|
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
|
||||||
"https://github.com/bazelbuild/bazel-toolchains/releases/download/3.7.0/bazel-toolchains-3.7.0.tar.gz",
|
"https://github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||||
|
|
||||||
|
rules_pkg_dependencies()
|
||||||
|
|
||||||
http_archive(
|
http_archive(
|
||||||
name = "com_grail_bazel_toolchain",
|
name = "com_grail_bazel_toolchain",
|
||||||
sha256 = "040b9d00b8a03e8a28e38159ad0f2d0e0de625d93f453a9f226971a8c47e757b",
|
sha256 = "b210fc8e58782ef171f428bfc850ed7179bdd805543ebd1aa144b9c93489134f",
|
||||||
strip_prefix = "bazel-toolchain-5f82830f9d6a1941c3eb29683c1864ccf2862454",
|
strip_prefix = "bazel-toolchain-83e69ba9e4b4fdad0d1d057fcb87addf77c281c9",
|
||||||
urls = ["https://github.com/grailbio/bazel-toolchain/archive/5f82830f9d6a1941c3eb29683c1864ccf2862454.tar.gz"],
|
urls = ["https://github.com/grailbio/bazel-toolchain/archive/83e69ba9e4b4fdad0d1d057fcb87addf77c281c9.tar.gz"],
|
||||||
)
|
)
|
||||||
|
|
||||||
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
|
load("@com_grail_bazel_toolchain//toolchain:deps.bzl", "bazel_toolchain_dependencies")
|
||||||
@@ -28,7 +31,7 @@ load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
|
|||||||
|
|
||||||
llvm_toolchain(
|
llvm_toolchain(
|
||||||
name = "llvm_toolchain",
|
name = "llvm_toolchain",
|
||||||
llvm_version = "10.0.0",
|
llvm_version = "13.0.1",
|
||||||
)
|
)
|
||||||
|
|
||||||
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
|
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
|
||||||
@@ -39,10 +42,6 @@ load("@prysm//tools/cross-toolchain:prysm_toolchains.bzl", "configure_prysm_tool
|
|||||||
|
|
||||||
configure_prysm_toolchains()
|
configure_prysm_toolchains()
|
||||||
|
|
||||||
load("@prysm//tools/cross-toolchain:rbe_toolchains_config.bzl", "rbe_toolchains_config")
|
|
||||||
|
|
||||||
rbe_toolchains_config()
|
|
||||||
|
|
||||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||||
|
|
||||||
http_archive(
|
http_archive(
|
||||||
@@ -76,9 +75,8 @@ http_archive(
|
|||||||
|
|
||||||
http_archive(
|
http_archive(
|
||||||
name = "io_bazel_rules_docker",
|
name = "io_bazel_rules_docker",
|
||||||
sha256 = "1f4e59843b61981a96835dc4ac377ad4da9f8c334ebe5e0bb3f58f80c09735f4",
|
sha256 = "b1e80761a8a8243d03ebca8845e9cc1ba6c82ce7c5179ce2b295cd36f7e394bf",
|
||||||
strip_prefix = "rules_docker-0.19.0",
|
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.25.0/rules_docker-v0.25.0.tar.gz"],
|
||||||
urls = ["https://github.com/bazelbuild/rules_docker/releases/download/v0.19.0/rules_docker-v0.19.0.tar.gz"],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
http_archive(
|
http_archive(
|
||||||
@@ -88,10 +86,10 @@ http_archive(
|
|||||||
# Expose internals of go_test for custom build transitions.
|
# Expose internals of go_test for custom build transitions.
|
||||||
"//third_party:io_bazel_rules_go_test.patch",
|
"//third_party:io_bazel_rules_go_test.patch",
|
||||||
],
|
],
|
||||||
sha256 = "f2dcd210c7095febe54b804bb1cd3a58fe8435a909db2ec04e31542631cf715c",
|
sha256 = "6b65cb7917b4d1709f9410ffe00ecf3e160edf674b78c54a894471320862184f",
|
||||||
urls = [
|
urls = [
|
||||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.31.0/rules_go-v0.31.0.zip",
|
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
|
||||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.31.0/rules_go-v0.31.0.zip",
|
"https://github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -110,13 +108,6 @@ git_repository(
|
|||||||
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
|
# gazelle args: -go_prefix github.com/gogo/protobuf -proto legacy
|
||||||
)
|
)
|
||||||
|
|
||||||
http_archive(
|
|
||||||
name = "fuzzit_linux",
|
|
||||||
build_file_content = "exports_files([\"fuzzit\"])",
|
|
||||||
sha256 = "9ca76ac1c22d9360936006efddf992977ebf8e4788ded8e5f9d511285c9ac774",
|
|
||||||
urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"],
|
|
||||||
)
|
|
||||||
|
|
||||||
load(
|
load(
|
||||||
"@io_bazel_rules_docker//repositories:repositories.bzl",
|
"@io_bazel_rules_docker//repositories:repositories.bzl",
|
||||||
container_repositories = "repositories",
|
container_repositories = "repositories",
|
||||||
@@ -129,32 +120,36 @@ load(
|
|||||||
"container_pull",
|
"container_pull",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Pulled gcr.io/distroless/cc-debian11:latest on 2022-02-23
|
||||||
container_pull(
|
container_pull(
|
||||||
name = "cc_image_base",
|
name = "cc_image_base_amd64",
|
||||||
digest = "sha256:2c4bb6b7236db0a55ec54ba8845e4031f5db2be957ac61867872bf42e56c4deb",
|
digest = "sha256:2a0daf90a7deb78465bfca3ef2eee6e91ce0a5706059f05d79d799a51d339523",
|
||||||
registry = "gcr.io",
|
registry = "gcr.io",
|
||||||
repository = "distroless/cc",
|
repository = "distroless/cc-debian11",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Pulled gcr.io/distroless/cc-debian11:debug on 2022-02-23
|
||||||
container_pull(
|
container_pull(
|
||||||
name = "cc_debug_image_base",
|
name = "cc_debug_image_base_amd64",
|
||||||
digest = "sha256:3680c61e81f68fc00bfb5e1ec65e8e678aaafa7c5f056bc2681c29527ebbb30c",
|
digest = "sha256:7bd596f5f200588f13a69c268eea6ce428b222b67cd7428d6a7fef95e75c052a",
|
||||||
registry = "gcr.io",
|
registry = "gcr.io",
|
||||||
repository = "distroless/cc",
|
repository = "distroless/cc-debian11",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Pulled from gcr.io/distroless/base-debian11:latest on 2022-02-23
|
||||||
container_pull(
|
container_pull(
|
||||||
name = "go_image_base",
|
name = "go_image_base_amd64",
|
||||||
digest = "sha256:ba7a315f86771332e76fa9c3d423ecfdbb8265879c6f1c264d6fff7d4fa460a4",
|
digest = "sha256:34e682800774ecbd0954b1663d90238505f1ba5543692dbc75feef7dd4839e90",
|
||||||
registry = "gcr.io",
|
registry = "gcr.io",
|
||||||
repository = "distroless/base",
|
repository = "distroless/base-debian11",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Pulled from gcr.io/distroless/base-debian11:debug on 2022-02-23
|
||||||
container_pull(
|
container_pull(
|
||||||
name = "go_debug_image_base",
|
name = "go_debug_image_base_amd64",
|
||||||
digest = "sha256:efd8711717d9e9b5d0dbb20ea10876dab0609c923bc05321b912f9239090ca80",
|
digest = "sha256:0f503c6bfd207793bc416f20a35bf6b75d769a903c48f180ad73f60f7b60d7bd",
|
||||||
registry = "gcr.io",
|
registry = "gcr.io",
|
||||||
repository = "distroless/base",
|
repository = "distroless/base-debian11",
|
||||||
)
|
)
|
||||||
|
|
||||||
container_pull(
|
container_pull(
|
||||||
@@ -164,35 +159,15 @@ container_pull(
|
|||||||
repository = "pinglamb/alpine-glibc",
|
repository = "pinglamb/alpine-glibc",
|
||||||
)
|
)
|
||||||
|
|
||||||
container_pull(
|
|
||||||
name = "fuzzit_base",
|
|
||||||
digest = "sha256:24a39a4360b07b8f0121eb55674a2e757ab09f0baff5569332fefd227ee4338f",
|
|
||||||
registry = "gcr.io",
|
|
||||||
repository = "fuzzit-public/stretch-llvm8",
|
|
||||||
)
|
|
||||||
|
|
||||||
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
|
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
|
||||||
|
|
||||||
go_rules_dependencies()
|
go_rules_dependencies()
|
||||||
|
|
||||||
go_register_toolchains(
|
go_register_toolchains(
|
||||||
go_version = "1.18.1",
|
go_version = "1.20.3",
|
||||||
nogo = "@//:nogo",
|
nogo = "@//:nogo",
|
||||||
)
|
)
|
||||||
|
|
||||||
http_archive(
|
|
||||||
name = "prysm_testnet_site",
|
|
||||||
build_file_content = """
|
|
||||||
proto_library(
|
|
||||||
name = "faucet_proto",
|
|
||||||
srcs = ["src/proto/faucet.proto"],
|
|
||||||
visibility = ["//visibility:public"],
|
|
||||||
)""",
|
|
||||||
sha256 = "29742136ff9faf47343073c4569a7cf21b8ed138f726929e09e3c38ab83544f7",
|
|
||||||
strip_prefix = "prysm-testnet-site-5c711600f0a77fc553b18cf37b880eaffef4afdb",
|
|
||||||
url = "https://github.com/prestonvanloon/prysm-testnet-site/archive/5c711600f0a77fc553b18cf37b880eaffef4afdb.tar.gz",
|
|
||||||
)
|
|
||||||
|
|
||||||
http_archive(
|
http_archive(
|
||||||
name = "io_kubernetes_build",
|
name = "io_kubernetes_build",
|
||||||
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
|
sha256 = "b84fbd1173acee9d02a7d3698ad269fdf4f7aa081e9cecd40e012ad0ad8cfa2a",
|
||||||
@@ -215,7 +190,22 @@ filegroup(
|
|||||||
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
|
||||||
)
|
)
|
||||||
|
|
||||||
consensus_spec_version = "v1.2.0-rc.1"
|
http_archive(
|
||||||
|
name = "eip4881_spec_tests",
|
||||||
|
build_file_content = """
|
||||||
|
filegroup(
|
||||||
|
name = "test_data",
|
||||||
|
srcs = glob([
|
||||||
|
"**/*.yaml",
|
||||||
|
]),
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
|
""",
|
||||||
|
sha256 = "89cb659498c0d196fc9f957f8b849b2e1a5c041c3b2b3ae5432ac5c26944297e",
|
||||||
|
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||||
|
)
|
||||||
|
|
||||||
|
consensus_spec_version = "v1.3.0"
|
||||||
|
|
||||||
bls_test_version = "v0.1.1"
|
bls_test_version = "v0.1.1"
|
||||||
|
|
||||||
@@ -231,7 +221,7 @@ filegroup(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
)
|
)
|
||||||
""",
|
""",
|
||||||
sha256 = "9c93f87378aaa6d6fe1c67b396eac2aacc9594af2a83f028cb99c95dea5b81df",
|
sha256 = "1c806e04ac5e3779032c06a6009350b3836b6809bb23812993d6ececd7047cf5",
|
||||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -247,7 +237,7 @@ filegroup(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
)
|
)
|
||||||
""",
|
""",
|
||||||
sha256 = "52f2c52415228cee8a4de5a09abff785f439a77dfef8f03e834e4e16857673c1",
|
sha256 = "2b42796dc5ccd9f1246032d0c17663e20f70334ff7e00325f0fc3af28cb24186",
|
||||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -263,7 +253,7 @@ filegroup(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
)
|
)
|
||||||
""",
|
""",
|
||||||
sha256 = "022dcc0d6de7dd27b337a0d1b945077eaf5ee47000700395a693fc25e12f96df",
|
sha256 = "231e3371e81ce9acde65d2910ec4580587e74dbbcfcbd9c675e473e022deec8a",
|
||||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -278,7 +268,7 @@ filegroup(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
)
|
)
|
||||||
""",
|
""",
|
||||||
sha256 = "0a9c110305cbd6ebbe0d942f0f33e6ce22dd484ce4ceed277bf185a091941cde",
|
sha256 = "219b74d95664ea7e8dfbf31162dfa206b9c0cf45919ea86db5fa0f8902977e3c",
|
||||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||||
)
|
)
|
||||||
@@ -309,9 +299,9 @@ filegroup(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
)
|
)
|
||||||
""",
|
""",
|
||||||
sha256 = "4e8a18b21d056c4032605621b1a6632198eabab57cb90c61e273f344c287f1b2",
|
sha256 = "2701e1e1a3ec10c673fe7dbdbbe6f02c8ae8c922aebbf6e720d8c72d5458aafe",
|
||||||
strip_prefix = "eth2-networks-791a5369c5981e829698b17fbcdcdacbdaba97c8",
|
strip_prefix = "eth2-networks-7b4897888cebef23801540236f73123e21774954",
|
||||||
url = "https://github.com/eth-clients/eth2-networks/archive/791a5369c5981e829698b17fbcdcdacbdaba97c8.tar.gz",
|
url = "https://github.com/eth-clients/eth2-networks/archive/7b4897888cebef23801540236f73123e21774954.tar.gz",
|
||||||
)
|
)
|
||||||
|
|
||||||
http_archive(
|
http_archive(
|
||||||
@@ -342,9 +332,9 @@ filegroup(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
)
|
)
|
||||||
""",
|
""",
|
||||||
sha256 = "4797a7e594a5b1f4c1c8080701613f3ee451b01ec0861499ea7d9b60877a6b23",
|
sha256 = "5006614c33e358699b4e072c649cd4c3866f7d41a691449d5156f6c6e07a4c60",
|
||||||
urls = [
|
urls = [
|
||||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.3/prysm-web-ui.tar.gz",
|
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.3/prysm-web-ui.tar.gz",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ go_library(
|
|||||||
"doc.go",
|
"doc.go",
|
||||||
"errors.go",
|
"errors.go",
|
||||||
],
|
],
|
||||||
importpath = "github.com/prysmaticlabs/prysm/api/client/beacon",
|
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/beacon",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/core/helpers:go_default_library",
|
"//beacon-chain/core/helpers:go_default_library",
|
||||||
@@ -20,6 +20,7 @@ go_library(
|
|||||||
"//encoding/ssz/detect:go_default_library",
|
"//encoding/ssz/detect:go_default_library",
|
||||||
"//io/file:go_default_library",
|
"//io/file:go_default_library",
|
||||||
"//network/forks:go_default_library",
|
"//network/forks:go_default_library",
|
||||||
|
"//proto/eth/v1:go_default_library",
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
"//runtime/version:go_default_library",
|
"//runtime/version:go_default_library",
|
||||||
"//time/slots:go_default_library",
|
"//time/slots:go_default_library",
|
||||||
@@ -40,8 +41,9 @@ go_test(
|
|||||||
deps = [
|
deps = [
|
||||||
"//beacon-chain/state:go_default_library",
|
"//beacon-chain/state:go_default_library",
|
||||||
"//config/params:go_default_library",
|
"//config/params:go_default_library",
|
||||||
|
"//consensus-types/blocks:go_default_library",
|
||||||
|
"//consensus-types/blocks/testing:go_default_library",
|
||||||
"//consensus-types/primitives:go_default_library",
|
"//consensus-types/primitives:go_default_library",
|
||||||
"//consensus-types/wrapper:go_default_library",
|
|
||||||
"//encoding/ssz/detect:go_default_library",
|
"//encoding/ssz/detect:go_default_library",
|
||||||
"//network/forks:go_default_library",
|
"//network/forks:go_default_library",
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
|
|||||||
@@ -6,29 +6,28 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
|
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
|
||||||
"github.com/prysmaticlabs/prysm/io/file"
|
"github.com/prysmaticlabs/prysm/v4/io/file"
|
||||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"golang.org/x/mod/semver"
|
"golang.org/x/mod/semver"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OriginData represents the BeaconState and SignedBeaconBlock necessary to start an empty Beacon Node
|
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
|
||||||
// using Checkpoint Sync.
|
// using Checkpoint Sync.
|
||||||
type OriginData struct {
|
type OriginData struct {
|
||||||
wsd *WeakSubjectivityData
|
sb []byte
|
||||||
sb []byte
|
bb []byte
|
||||||
bb []byte
|
st state.BeaconState
|
||||||
st state.BeaconState
|
b interfaces.ReadOnlySignedBeaconBlock
|
||||||
b interfaces.SignedBeaconBlock
|
vu *detect.VersionedUnmarshaler
|
||||||
vu *detect.VersionedUnmarshaler
|
br [32]byte
|
||||||
br [32]byte
|
sr [32]byte
|
||||||
sr [32]byte
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SaveBlock saves the downloaded block to a unique file in the given path.
|
// SaveBlock saves the downloaded block to a unique file in the given path.
|
||||||
@@ -50,12 +49,12 @@ func (o *OriginData) StateBytes() []byte {
|
|||||||
return o.sb
|
return o.sb
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockBytes returns the ssz-encoded bytes of the downloaded SignedBeaconBlock value.
|
// BlockBytes returns the ssz-encoded bytes of the downloaded ReadOnlySignedBeaconBlock value.
|
||||||
func (o *OriginData) BlockBytes() []byte {
|
func (o *OriginData) BlockBytes() []byte {
|
||||||
return o.bb
|
return o.bb
|
||||||
}
|
}
|
||||||
|
|
||||||
func fname(prefix string, vu *detect.VersionedUnmarshaler, slot types.Slot, root [32]byte) string {
|
func fname(prefix string, vu *detect.VersionedUnmarshaler, slot primitives.Slot, root [32]byte) string {
|
||||||
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, vu.Config.ConfigName, version.String(vu.Fork), slot, root)
|
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, vu.Config.ConfigName, version.String(vu.Fork), slot, root)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -75,6 +74,9 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
|
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
|
||||||
}
|
}
|
||||||
|
if s.Slot() != s.LatestBlockHeader().Slot {
|
||||||
|
return nil, fmt.Errorf("finalized state slot does not match latest block header slot %d != %d", s.Slot(), s.LatestBlockHeader().Slot)
|
||||||
|
}
|
||||||
|
|
||||||
sr, err := s.HashTreeRoot(ctx)
|
sr, err := s.HashTreeRoot(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -101,8 +103,8 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("BeaconState slot=%d, Block slot=%d", s.Slot(), b.Block().Slot())
|
log.Printf("BeaconState slot=%d, Block slot=%d", s.Slot(), b.Block().Slot())
|
||||||
log.Printf("BeaconState htr=%#xd, Block state_root=%#x", sr, b.Block().StateRoot())
|
log.Printf("BeaconState htr=%#x, Block state_root=%#x", sr, b.Block().StateRoot())
|
||||||
log.Printf("BeaconState latest_block_header htr=%#xd, block htr=%#x", br, realBlockRoot)
|
log.Printf("BeaconState latest_block_header htr=%#x, block htr=%#x", br, realBlockRoot)
|
||||||
return &OriginData{
|
return &OriginData{
|
||||||
st: s,
|
st: s,
|
||||||
b: b,
|
b: b,
|
||||||
@@ -114,17 +116,17 @@ func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, er
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + SignedBeaconBlock
|
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + ReadOnlySignedBeaconBlock
|
||||||
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
||||||
// a weak subjectivity checkpoint beacon node flag to be used for validation.
|
// a weak subjectivity checkpoint beacon node flag to be used for validation.
|
||||||
type WeakSubjectivityData struct {
|
type WeakSubjectivityData struct {
|
||||||
BlockRoot [32]byte
|
BlockRoot [32]byte
|
||||||
StateRoot [32]byte
|
StateRoot [32]byte
|
||||||
Epoch types.Epoch
|
Epoch primitives.Epoch
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckpointString returns the standard string representation of a Checkpoint.
|
// CheckpointString returns the standard string representation of a Checkpoint.
|
||||||
// The format is a a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||||
func (wsd *WeakSubjectivityData) CheckpointString() string {
|
func (wsd *WeakSubjectivityData) CheckpointString() string {
|
||||||
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
|
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
|
||||||
@@ -236,7 +238,7 @@ func computeBackwardsCompatible(ctx context.Context, client *Client) (*WeakSubje
|
|||||||
|
|
||||||
// this method downloads the head state, which can be used to find the correct chain config
|
// this method downloads the head state, which can be used to find the correct chain config
|
||||||
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
|
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
|
||||||
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (types.Epoch, error) {
|
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (primitives.Epoch, error) {
|
||||||
headBytes, err := client.GetState(ctx, IdHead)
|
headBytes, err := client.GetState(ctx, IdHead)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
|||||||
@@ -10,21 +10,21 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
|
blocktest "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks/testing"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||||
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
"github.com/prysmaticlabs/prysm/network/forks"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
"github.com/prysmaticlabs/prysm/v4/encoding/ssz/detect"
|
||||||
"github.com/prysmaticlabs/prysm/testing/util"
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
|
||||||
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
|
|
||||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
type testRT struct {
|
type testRT struct {
|
||||||
@@ -102,7 +102,7 @@ func TestFname(t *testing.T) {
|
|||||||
Config: params.MainnetConfig(),
|
Config: params.MainnetConfig(),
|
||||||
Fork: version.Phase0,
|
Fork: version.Phase0,
|
||||||
}
|
}
|
||||||
slot := types.Slot(23)
|
slot := primitives.Slot(23)
|
||||||
prefix := "block"
|
prefix := "block"
|
||||||
var root [32]byte
|
var root [32]byte
|
||||||
copy(root[:], []byte{0x23, 0x23, 0x23})
|
copy(root[:], []byte{0x23, 0x23, 0x23})
|
||||||
@@ -134,10 +134,14 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
|
|||||||
require.NoError(t, wst.SetFork(fork))
|
require.NoError(t, wst.SetFork(fork))
|
||||||
|
|
||||||
// set up checkpoint block
|
// set up checkpoint block
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
require.NoError(t, wrapper.SetBlockParentRoot(b, cfg.ZeroHash))
|
require.NoError(t, err)
|
||||||
require.NoError(t, wrapper.SetBlockSlot(b, wSlot))
|
b, err = blocktest.SetBlockParentRoot(b, cfg.ZeroHash)
|
||||||
require.NoError(t, wrapper.SetProposerIndex(b, 0))
|
require.NoError(t, err)
|
||||||
|
b, err = blocktest.SetBlockSlot(b, wSlot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
b, err = blocktest.SetProposerIndex(b, 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
|
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
|
||||||
header, err := b.Header()
|
header, err := b.Header()
|
||||||
@@ -151,7 +155,8 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
|
|||||||
wRoot, err := wst.HashTreeRoot(ctx)
|
wRoot, err := wst.HashTreeRoot(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.NoError(t, wrapper.SetBlockStateRoot(b, wRoot))
|
b, err = blocktest.SetBlockStateRoot(b, wRoot)
|
||||||
|
require.NoError(t, err)
|
||||||
serBlock, err := b.MarshalSSZ()
|
serBlock, err := b.MarshalSSZ()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
bRoot, err := b.Block().HashTreeRoot()
|
bRoot, err := b.Block().HashTreeRoot()
|
||||||
@@ -230,10 +235,14 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
|||||||
require.NoError(t, wst.SetFork(fork))
|
require.NoError(t, wst.SetFork(fork))
|
||||||
|
|
||||||
// set up checkpoint block
|
// set up checkpoint block
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
require.NoError(t, wrapper.SetBlockParentRoot(b, cfg.ZeroHash))
|
require.NoError(t, err)
|
||||||
require.NoError(t, wrapper.SetBlockSlot(b, wSlot))
|
b, err = blocktest.SetBlockParentRoot(b, cfg.ZeroHash)
|
||||||
require.NoError(t, wrapper.SetProposerIndex(b, 0))
|
require.NoError(t, err)
|
||||||
|
b, err = blocktest.SetBlockSlot(b, wSlot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
b, err = blocktest.SetProposerIndex(b, 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
|
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
|
||||||
header, err := b.Header()
|
header, err := b.Header()
|
||||||
@@ -247,7 +256,8 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
|||||||
wRoot, err := wst.HashTreeRoot(ctx)
|
wRoot, err := wst.HashTreeRoot(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.NoError(t, wrapper.SetBlockStateRoot(b, wRoot))
|
b, err = blocktest.SetBlockStateRoot(b, wRoot)
|
||||||
|
require.NoError(t, err)
|
||||||
serBlock, err := b.MarshalSSZ()
|
serBlock, err := b.MarshalSSZ()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
bRoot, err := b.Block().HashTreeRoot()
|
bRoot, err := b.Block().HashTreeRoot()
|
||||||
@@ -325,7 +335,7 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
|
|||||||
require.Equal(t, expectedEpoch, actualEpoch)
|
require.Equal(t, expectedEpoch, actualEpoch)
|
||||||
}
|
}
|
||||||
|
|
||||||
func forkForEpoch(cfg *params.BeaconChainConfig, epoch types.Epoch) (*ethpb.Fork, error) {
|
func forkForEpoch(cfg *params.BeaconChainConfig, epoch primitives.Epoch) (*ethpb.Fork, error) {
|
||||||
os := forks.NewOrderedSchedule(cfg)
|
os := forks.NewOrderedSchedule(cfg)
|
||||||
currentVersion, err := os.VersionForEpoch(epoch)
|
currentVersion, err := os.VersionForEpoch(epoch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -347,7 +357,7 @@ func forkForEpoch(cfg *params.BeaconChainConfig, epoch types.Epoch) (*ethpb.Fork
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func defaultTestHeadState(t *testing.T, cfg *params.BeaconChainConfig) (state.BeaconState, types.Epoch) {
|
func defaultTestHeadState(t *testing.T, cfg *params.BeaconChainConfig) (state.BeaconState, primitives.Epoch) {
|
||||||
st, err := util.NewBeaconStateAltair()
|
st, err := util.NewBeaconStateAltair()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -388,11 +398,7 @@ func populateValidators(cfg *params.BeaconChainConfig, st state.BeaconState, val
|
|||||||
if err := st.SetValidators(validators); err != nil {
|
if err := st.SetValidators(validators); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := st.SetBalances(balances); err != nil {
|
return st.SetBalances(balances)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDownloadFinalizedData(t *testing.T) {
|
func TestDownloadFinalizedData(t *testing.T) {
|
||||||
@@ -408,12 +414,17 @@ func TestDownloadFinalizedData(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
fork, err := forkForEpoch(cfg, epoch)
|
fork, err := forkForEpoch(cfg, epoch)
|
||||||
require.NoError(t, st.SetFork(fork))
|
require.NoError(t, st.SetFork(fork))
|
||||||
|
require.NoError(t, st.SetSlot(slot))
|
||||||
|
|
||||||
// set up checkpoint block
|
// set up checkpoint block
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
require.NoError(t, wrapper.SetBlockParentRoot(b, cfg.ZeroHash))
|
require.NoError(t, err)
|
||||||
require.NoError(t, wrapper.SetBlockSlot(b, slot))
|
b, err = blocktest.SetBlockParentRoot(b, cfg.ZeroHash)
|
||||||
require.NoError(t, wrapper.SetProposerIndex(b, 0))
|
require.NoError(t, err)
|
||||||
|
b, err = blocktest.SetBlockSlot(b, slot)
|
||||||
|
require.NoError(t, err)
|
||||||
|
b, err = blocktest.SetProposerIndex(b, 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
|
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
|
||||||
header, err := b.Header()
|
header, err := b.Header()
|
||||||
@@ -427,7 +438,8 @@ func TestDownloadFinalizedData(t *testing.T) {
|
|||||||
sr, err := st.HashTreeRoot(ctx)
|
sr, err := st.HashTreeRoot(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.NoError(t, wrapper.SetBlockStateRoot(b, sr))
|
b, err = blocktest.SetBlockStateRoot(b, sr)
|
||||||
|
require.NoError(t, err)
|
||||||
mb, err := b.MarshalSSZ()
|
mb, err := b.MarshalSSZ()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
br, err := b.Block().HashTreeRoot()
|
br, err := b.Block().HashTreeRoot()
|
||||||
|
|||||||
@@ -16,25 +16,28 @@ import (
|
|||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/network/forks"
|
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||||
|
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/rpc/apimiddleware"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/apimiddleware"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
getSignedBlockPath = "/eth/v2/beacon/blocks"
|
getSignedBlockPath = "/eth/v2/beacon/blocks"
|
||||||
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
|
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
|
||||||
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
|
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
|
||||||
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
|
getWeakSubjectivityPath = "/eth/v1/beacon/weak_subjectivity"
|
||||||
getForkSchedulePath = "/eth/v1/config/fork_schedule"
|
getForkSchedulePath = "/eth/v1/config/fork_schedule"
|
||||||
getStatePath = "/eth/v2/debug/beacon/states"
|
getConfigSpecPath = "/eth/v1/config/spec"
|
||||||
getNodeVersionPath = "/eth/v1/node/version"
|
getStatePath = "/eth/v2/debug/beacon/states"
|
||||||
|
getNodeVersionPath = "/eth/v1/node/version"
|
||||||
|
changeBLStoExecutionPath = "/eth/v1/beacon/pool/bls_to_execution_changes"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
|
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
|
||||||
@@ -61,7 +64,7 @@ func IdFromRoot(r [32]byte) StateOrBlockId {
|
|||||||
|
|
||||||
// IdFromSlot encodes a Slot in the format expected by the API in places where a slot can be used to identify
|
// IdFromSlot encodes a Slot in the format expected by the API in places where a slot can be used to identify
|
||||||
// a BeaconState or SignedBeaconBlock.
|
// a BeaconState or SignedBeaconBlock.
|
||||||
func IdFromSlot(s types.Slot) StateOrBlockId {
|
func IdFromSlot(s primitives.Slot) StateOrBlockId {
|
||||||
return StateOrBlockId(strconv.FormatUint(uint64(s), 10))
|
return StateOrBlockId(strconv.FormatUint(uint64(s), 10))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -95,8 +98,6 @@ func WithTimeout(timeout time.Duration) ClientOpt {
|
|||||||
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
|
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
hc *http.Client
|
hc *http.Client
|
||||||
host string
|
|
||||||
scheme string
|
|
||||||
baseURL *url.URL
|
baseURL *url.URL
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,7 +149,6 @@ func withSSZEncoding() reqOption {
|
|||||||
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
|
// get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
|
||||||
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
|
func (c *Client) get(ctx context.Context, path string, opts ...reqOption) ([]byte, error) {
|
||||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||||
log.Printf("requesting %s", u.String())
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -254,6 +254,20 @@ func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, er
|
|||||||
return ofs, nil
|
return ofs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||||
|
func (c *Client) GetConfigSpec(ctx context.Context) (*v1.SpecResponse, error) {
|
||||||
|
body, err := c.get(ctx, getConfigSpecPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error requesting configSpecPath")
|
||||||
|
}
|
||||||
|
fsr := &v1.SpecResponse{}
|
||||||
|
err = json.Unmarshal(body, fsr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fsr, nil
|
||||||
|
}
|
||||||
|
|
||||||
type NodeVersion struct {
|
type NodeVersion struct {
|
||||||
implementation string
|
implementation string
|
||||||
semver string
|
semver string
|
||||||
@@ -339,12 +353,66 @@ func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &WeakSubjectivityData{
|
return &WeakSubjectivityData{
|
||||||
Epoch: types.Epoch(epoch),
|
Epoch: primitives.Epoch(epoch),
|
||||||
BlockRoot: bytesutil.ToBytes32(blockRoot),
|
BlockRoot: bytesutil.ToBytes32(blockRoot),
|
||||||
StateRoot: bytesutil.ToBytes32(stateRoot),
|
StateRoot: bytesutil.ToBytes32(stateRoot),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SubmitChangeBLStoExecution calls a beacon API endpoint to set the withdrawal addresses based on the given signed messages.
|
||||||
|
// If the API responds with something other than OK there will be failure messages associated to the corresponding request message.
|
||||||
|
func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*apimiddleware.SignedBLSToExecutionChangeJson) error {
|
||||||
|
u := c.baseURL.ResolveReference(&url.URL{Path: changeBLStoExecutionPath})
|
||||||
|
body, err := json.Marshal(request)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "failed to marshal JSON")
|
||||||
|
}
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), bytes.NewBuffer(body))
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "invalid format, failed to create new POST request object")
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
resp, err := c.hc.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err = resp.Body.Close()
|
||||||
|
}()
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
decoder := json.NewDecoder(resp.Body)
|
||||||
|
decoder.DisallowUnknownFields()
|
||||||
|
errorJson := &apimiddleware.IndexedVerificationFailureErrorJson{}
|
||||||
|
if err := decoder.Decode(errorJson); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to decode error JSON for %s", resp.Request.URL)
|
||||||
|
}
|
||||||
|
for _, failure := range errorJson.Failures {
|
||||||
|
w := request[failure.Index].Message
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"validator_index": w.ValidatorIndex,
|
||||||
|
"withdrawal_address": w.ToExecutionAddress,
|
||||||
|
}).Error(failure.Message)
|
||||||
|
}
|
||||||
|
return errors.Errorf("POST error %d: %s", errorJson.Code, errorJson.Message)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBLStoExecutionChanges gets all the set withdrawal messages in the node's operation pool.
|
||||||
|
// Returns a struct representation of json response.
|
||||||
|
func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*apimiddleware.BLSToExecutionChangesPoolResponseJson, error) {
|
||||||
|
body, err := c.get(ctx, changeBLStoExecutionPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
poolResponse := &apimiddleware.BLSToExecutionChangesPoolResponseJson{}
|
||||||
|
err = json.Unmarshal(body, poolResponse)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return poolResponse, nil
|
||||||
|
}
|
||||||
|
|
||||||
func non200Err(response *http.Response) error {
|
func non200Err(response *http.Response) error {
|
||||||
bodyBytes, err := io.ReadAll(response.Body)
|
bodyBytes, err := io.ReadAll(response.Body)
|
||||||
var body string
|
var body string
|
||||||
@@ -390,7 +458,7 @@ func (f *forkResponse) Fork() (*ethpb.Fork, error) {
|
|||||||
return ðpb.Fork{
|
return ðpb.Fork{
|
||||||
CurrentVersion: cSlice,
|
CurrentVersion: cSlice,
|
||||||
PreviousVersion: pSlice,
|
PreviousVersion: pSlice,
|
||||||
Epoch: types.Epoch(epoch),
|
Epoch: primitives.Epoch(epoch),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -415,7 +483,7 @@ func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, e
|
|||||||
version := bytesutil.ToBytes4(vSlice)
|
version := bytesutil.ToBytes4(vSlice)
|
||||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||||
Version: version,
|
Version: version,
|
||||||
Epoch: types.Epoch(uint64(epoch)),
|
Epoch: primitives.Epoch(uint64(epoch)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
sort.Sort(ofs)
|
sort.Sort(ofs)
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseNodeVersion(t *testing.T) {
|
func TestParseNodeVersion(t *testing.T) {
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
Package beacon provides a client for interacting with the standard Eth Beacon Node API.
|
Package beacon provides a client for interacting with the standard Eth Beacon Node API.
|
||||||
Interactive swagger documentation for the API is available here: https://ethereum.github.io/beacon-APIs/
|
Interactive swagger documentation for the API is available here: https://ethereum.github.io/beacon-APIs/
|
||||||
|
|
||||||
*/
|
*/
|
||||||
package beacon
|
package beacon
|
||||||
|
|||||||
@@ -3,20 +3,31 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
|||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
|
"bid.go",
|
||||||
"client.go",
|
"client.go",
|
||||||
"errors.go",
|
"errors.go",
|
||||||
"types.go",
|
"types.go",
|
||||||
],
|
],
|
||||||
importpath = "github.com/prysmaticlabs/prysm/api/client/builder",
|
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
|
"//consensus-types:go_default_library",
|
||||||
|
"//consensus-types/blocks:go_default_library",
|
||||||
|
"//consensus-types/interfaces:go_default_library",
|
||||||
"//consensus-types/primitives:go_default_library",
|
"//consensus-types/primitives:go_default_library",
|
||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
|
"//math:go_default_library",
|
||||||
|
"//monitoring/tracing:go_default_library",
|
||||||
|
"//network:go_default_library",
|
||||||
|
"//network/authorization:go_default_library",
|
||||||
"//proto/engine/v1:go_default_library",
|
"//proto/engine/v1:go_default_library",
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
|
"//runtime/version:go_default_library",
|
||||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
|
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
|
"@io_opencensus_go//trace:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -29,13 +40,16 @@ go_test(
|
|||||||
data = glob(["testdata/**"]),
|
data = glob(["testdata/**"]),
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
"//config/fieldparams:go_default_library",
|
"//config/params:go_default_library",
|
||||||
|
"//consensus-types/blocks:go_default_library",
|
||||||
"//consensus-types/primitives:go_default_library",
|
"//consensus-types/primitives:go_default_library",
|
||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
"//proto/engine/v1:go_default_library",
|
"//proto/engine/v1:go_default_library",
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
|
"//testing/assert:go_default_library",
|
||||||
"//testing/require:go_default_library",
|
"//testing/require:go_default_library",
|
||||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||||
|
"@com_github_golang_protobuf//proto:go_default_library",
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
201
api/client/builder/bid.go
Normal file
201
api/client/builder/bid.go
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
package builder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
ssz "github.com/prysmaticlabs/fastssz"
|
||||||
|
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/math"
|
||||||
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SignedBid is an interface describing the method set of a signed builder bid.
|
||||||
|
type SignedBid interface {
|
||||||
|
Message() (Bid, error)
|
||||||
|
Signature() []byte
|
||||||
|
Version() int
|
||||||
|
IsNil() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bid is an interface describing the method set of a builder bid.
|
||||||
|
type Bid interface {
|
||||||
|
Header() (interfaces.ExecutionData, error)
|
||||||
|
Value() []byte
|
||||||
|
Pubkey() []byte
|
||||||
|
Version() int
|
||||||
|
IsNil() bool
|
||||||
|
HashTreeRoot() ([32]byte, error)
|
||||||
|
HashTreeRootWith(hh *ssz.Hasher) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type signedBuilderBid struct {
|
||||||
|
p *ethpb.SignedBuilderBid
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrappedSignedBuilderBid is a constructor which wraps a protobuf signed bit into an interface.
|
||||||
|
func WrappedSignedBuilderBid(p *ethpb.SignedBuilderBid) (SignedBid, error) {
|
||||||
|
w := signedBuilderBid{p: p}
|
||||||
|
if w.IsNil() {
|
||||||
|
return nil, consensus_types.ErrNilObjectWrapped
|
||||||
|
}
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message --
|
||||||
|
func (b signedBuilderBid) Message() (Bid, error) {
|
||||||
|
return WrappedBuilderBid(b.p.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signature --
|
||||||
|
func (b signedBuilderBid) Signature() []byte {
|
||||||
|
return b.p.Signature
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version --
|
||||||
|
func (b signedBuilderBid) Version() int {
|
||||||
|
return version.Bellatrix
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil --
|
||||||
|
func (b signedBuilderBid) IsNil() bool {
|
||||||
|
return b.p == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type signedBuilderBidCapella struct {
|
||||||
|
p *ethpb.SignedBuilderBidCapella
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrappedSignedBuilderBidCapella is a constructor which wraps a protobuf signed bit into an interface.
|
||||||
|
func WrappedSignedBuilderBidCapella(p *ethpb.SignedBuilderBidCapella) (SignedBid, error) {
|
||||||
|
w := signedBuilderBidCapella{p: p}
|
||||||
|
if w.IsNil() {
|
||||||
|
return nil, consensus_types.ErrNilObjectWrapped
|
||||||
|
}
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message --
|
||||||
|
func (b signedBuilderBidCapella) Message() (Bid, error) {
|
||||||
|
return WrappedBuilderBidCapella(b.p.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signature --
|
||||||
|
func (b signedBuilderBidCapella) Signature() []byte {
|
||||||
|
return b.p.Signature
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version --
|
||||||
|
func (b signedBuilderBidCapella) Version() int {
|
||||||
|
return version.Capella
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil --
|
||||||
|
func (b signedBuilderBidCapella) IsNil() bool {
|
||||||
|
return b.p == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type builderBid struct {
|
||||||
|
p *ethpb.BuilderBid
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrappedBuilderBid is a constructor which wraps a protobuf bid into an interface.
|
||||||
|
func WrappedBuilderBid(p *ethpb.BuilderBid) (Bid, error) {
|
||||||
|
w := builderBid{p: p}
|
||||||
|
if w.IsNil() {
|
||||||
|
return nil, consensus_types.ErrNilObjectWrapped
|
||||||
|
}
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header --
|
||||||
|
func (b builderBid) Header() (interfaces.ExecutionData, error) {
|
||||||
|
return blocks.WrappedExecutionPayloadHeader(b.p.Header)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version --
|
||||||
|
func (b builderBid) Version() int {
|
||||||
|
return version.Bellatrix
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value --
|
||||||
|
func (b builderBid) Value() []byte {
|
||||||
|
return b.p.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pubkey --
|
||||||
|
func (b builderBid) Pubkey() []byte {
|
||||||
|
return b.p.Pubkey
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil --
|
||||||
|
func (b builderBid) IsNil() bool {
|
||||||
|
return b.p == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashTreeRoot --
|
||||||
|
func (b builderBid) HashTreeRoot() ([32]byte, error) {
|
||||||
|
return b.p.HashTreeRoot()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashTreeRootWith --
|
||||||
|
func (b builderBid) HashTreeRootWith(hh *ssz.Hasher) error {
|
||||||
|
return b.p.HashTreeRootWith(hh)
|
||||||
|
}
|
||||||
|
|
||||||
|
type builderBidCapella struct {
|
||||||
|
p *ethpb.BuilderBidCapella
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrappedBuilderBidCapella is a constructor which wraps a protobuf bid into an interface.
|
||||||
|
func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
|
||||||
|
w := builderBidCapella{p: p}
|
||||||
|
if w.IsNil() {
|
||||||
|
return nil, consensus_types.ErrNilObjectWrapped
|
||||||
|
}
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header returns the execution data interface.
|
||||||
|
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
|
||||||
|
if b.p == nil {
|
||||||
|
return nil, errors.New("builder bid is nil")
|
||||||
|
}
|
||||||
|
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||||
|
v := big.NewInt(0).SetBytes(bytesutil.ReverseByteOrder(b.p.Value))
|
||||||
|
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, math.WeiToGwei(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version --
|
||||||
|
func (b builderBidCapella) Version() int {
|
||||||
|
return version.Capella
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value --
|
||||||
|
func (b builderBidCapella) Value() []byte {
|
||||||
|
return b.p.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pubkey --
|
||||||
|
func (b builderBidCapella) Pubkey() []byte {
|
||||||
|
return b.p.Pubkey
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil --
|
||||||
|
func (b builderBidCapella) IsNil() bool {
|
||||||
|
return b.p == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashTreeRoot --
|
||||||
|
func (b builderBidCapella) HashTreeRoot() ([32]byte, error) {
|
||||||
|
return b.p.HashTreeRoot()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashTreeRootWith --
|
||||||
|
func (b builderBidCapella) HashTreeRootWith(hh *ssz.Hasher) error {
|
||||||
|
return b.p.HashTreeRootWith(hh)
|
||||||
|
}
|
||||||
@@ -9,15 +9,21 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strings"
|
||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/network"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/network/authorization"
|
||||||
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -28,17 +34,13 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
||||||
|
var errMalformedRequest = errors.New("required request data are missing")
|
||||||
|
var errNotBlinded = errors.New("submitted block is not blinded")
|
||||||
|
var submitBlindedBlockTimeout = 3 * time.Second
|
||||||
|
|
||||||
// ClientOpt is a functional option for the Client type (http.Client wrapper)
|
// ClientOpt is a functional option for the Client type (http.Client wrapper)
|
||||||
type ClientOpt func(*Client)
|
type ClientOpt func(*Client)
|
||||||
|
|
||||||
// WithTimeout sets the .Timeout attribute of the wrapped http.Client.
|
|
||||||
func WithTimeout(timeout time.Duration) ClientOpt {
|
|
||||||
return func(c *Client) {
|
|
||||||
c.hc.Timeout = timeout
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type observer interface {
|
type observer interface {
|
||||||
observe(r *http.Request) error
|
observe(r *http.Request) error
|
||||||
}
|
}
|
||||||
@@ -81,6 +83,15 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
|||||||
|
|
||||||
var _ observer = &requestLogger{}
|
var _ observer = &requestLogger{}
|
||||||
|
|
||||||
|
// BuilderClient provides a collection of helper methods for calling Builder API endpoints.
|
||||||
|
type BuilderClient interface {
|
||||||
|
NodeURL() string
|
||||||
|
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
|
||||||
|
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
|
||||||
|
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error)
|
||||||
|
Status(ctx context.Context) error
|
||||||
|
}
|
||||||
|
|
||||||
// Client provides a collection of helper methods for calling Builder API endpoints.
|
// Client provides a collection of helper methods for calling Builder API endpoints.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
hc *http.Client
|
hc *http.Client
|
||||||
@@ -92,7 +103,8 @@ type Client struct {
|
|||||||
// `host` is the base host + port used to construct request urls. This value can be
|
// `host` is the base host + port used to construct request urls. This value can be
|
||||||
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
|
// a URL string, or NewClient will assume an http endpoint if just `host:port` is used.
|
||||||
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||||
u, err := urlForHost(host)
|
endpoint := covertEndPoint(host)
|
||||||
|
u, err := urlForHost(endpoint.Url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -127,44 +139,59 @@ func (c *Client) NodeURL() string {
|
|||||||
|
|
||||||
type reqOption func(*http.Request)
|
type reqOption func(*http.Request)
|
||||||
|
|
||||||
// do is a generic, opinionated GET function to reduce boilerplate amongst the getters in this packageapi/client/builder/types.go.
|
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder/types.go.
|
||||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) ([]byte, error) {
|
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, err error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "builder.client.do")
|
||||||
|
defer func() {
|
||||||
|
tracing.AnnotateError(span, err)
|
||||||
|
span.End()
|
||||||
|
}()
|
||||||
|
|
||||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||||
log.Printf("requesting %s", u.String())
|
|
||||||
|
span.AddAttributes(trace.StringAttribute("url", u.String()),
|
||||||
|
trace.StringAttribute("method", method))
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
|
req, err := http.NewRequestWithContext(ctx, method, u.String(), body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return
|
||||||
}
|
}
|
||||||
|
req.Header.Add("User-Agent", version.BuildData())
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
o(req)
|
o(req)
|
||||||
}
|
}
|
||||||
for _, o := range c.obvs {
|
for _, o := range c.obvs {
|
||||||
if err := o.observe(req); err != nil {
|
if err = o.observe(req); err != nil {
|
||||||
return nil, err
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r, err := c.hc.Do(req)
|
r, err := c.hc.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
err = r.Body.Close()
|
closeErr := r.Body.Close()
|
||||||
|
if closeErr != nil {
|
||||||
|
log.WithError(closeErr).Error("Failed to close response body")
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
if r.StatusCode != http.StatusOK {
|
if r.StatusCode != http.StatusOK {
|
||||||
return nil, non200Err(r)
|
err = non200Err(r)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
b, err := io.ReadAll(r.Body)
|
res, err = io.ReadAll(r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error reading http response body from GetBlock")
|
err = errors.Wrap(err, "error reading http response body from builder server")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
return b, nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var execHeaderTemplate = template.Must(template.New("").Parse(getExecHeaderPath))
|
var execHeaderTemplate = template.Must(template.New("").Parse(getExecHeaderPath))
|
||||||
|
|
||||||
func execHeaderPath(slot types.Slot, parentHash [32]byte, pubkey [48]byte) (string, error) {
|
func execHeaderPath(slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (string, error) {
|
||||||
v := struct {
|
v := struct {
|
||||||
Slot types.Slot
|
Slot primitives.Slot
|
||||||
ParentHash string
|
ParentHash string
|
||||||
Pubkey string
|
Pubkey string
|
||||||
}{
|
}{
|
||||||
@@ -180,8 +207,8 @@ func execHeaderPath(slot types.Slot, parentHash [32]byte, pubkey [48]byte) (stri
|
|||||||
return b.String(), nil
|
return b.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHeader is used by a proposing validator to request an ExecutionPayloadHeader from the Builder node.
|
// GetHeader is used by a proposing validator to request an execution payload header from the Builder node.
|
||||||
func (c *Client) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]byte, pubkey [48]byte) (*ethpb.SignedBuilderBid, error) {
|
func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error) {
|
||||||
path, err := execHeaderPath(slot, parentHash, pubkey)
|
path, err := execHeaderPath(slot, parentHash, pubkey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -190,42 +217,139 @@ func (c *Client) GetHeader(ctx context.Context, slot types.Slot, parentHash [32]
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
hr := &ExecHeaderResponse{}
|
v := &VersionResponse{}
|
||||||
if err := json.Unmarshal(hb, hr); err != nil {
|
if err := json.Unmarshal(hb, v); err != nil {
|
||||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||||
}
|
}
|
||||||
return hr.ToProto()
|
switch strings.ToLower(v.Version) {
|
||||||
|
case strings.ToLower(version.String(version.Capella)):
|
||||||
|
hr := &ExecHeaderResponseCapella{}
|
||||||
|
if err := json.Unmarshal(hb, hr); err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||||
|
}
|
||||||
|
p, err := hr.ToProto()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not extract proto message from header")
|
||||||
|
}
|
||||||
|
return WrappedSignedBuilderBidCapella(p)
|
||||||
|
case strings.ToLower(version.String(version.Bellatrix)):
|
||||||
|
hr := &ExecHeaderResponse{}
|
||||||
|
if err := json.Unmarshal(hb, hr); err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||||
|
}
|
||||||
|
p, err := hr.ToProto()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not extract proto message from header")
|
||||||
|
}
|
||||||
|
return WrappedSignedBuilderBid(p)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
|
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
|
||||||
// fields with 0x prefixes) and posts to the builder validator registration endpoint.
|
// fields with 0x prefixes) and posts to the builder validator registration endpoint.
|
||||||
func (c *Client) RegisterValidator(ctx context.Context, svr *ethpb.SignedValidatorRegistrationV1) error {
|
func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
|
||||||
v := &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr}
|
ctx, span := trace.StartSpan(ctx, "builder.client.RegisterValidator")
|
||||||
body, err := json.Marshal(v)
|
defer span.End()
|
||||||
if err != nil {
|
span.AddAttributes(trace.Int64Attribute("num_reqs", int64(len(svr))))
|
||||||
return errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
|
|
||||||
|
if len(svr) == 0 {
|
||||||
|
err := errors.Wrap(errMalformedRequest, "empty validator registration list")
|
||||||
|
tracing.AnnotateError(span, err)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
vs := make([]*SignedValidatorRegistration, len(svr))
|
||||||
|
for i := 0; i < len(svr); i++ {
|
||||||
|
vs[i] = &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr[i]}
|
||||||
|
}
|
||||||
|
body, err := json.Marshal(vs)
|
||||||
|
if err != nil {
|
||||||
|
err := errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||||
|
tracing.AnnotateError(span, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
_, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body))
|
_, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||||
// The response is the full ExecutionPayload used to create the blinded block.
|
// The response is the full execution payload used to create the blinded block.
|
||||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb *ethpb.SignedBlindedBeaconBlockBellatrix) (*v1.ExecutionPayload, error) {
|
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
|
||||||
v := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: sb}
|
if !sb.IsBlinded() {
|
||||||
body, err := json.Marshal(v)
|
return nil, errNotBlinded
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
|
|
||||||
}
|
}
|
||||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body))
|
switch sb.Version() {
|
||||||
if err != nil {
|
case version.Bellatrix:
|
||||||
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
|
psb, err := sb.PbBlindedBellatrixBlock()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not get protobuf block")
|
||||||
|
}
|
||||||
|
b := &SignedBlindedBeaconBlockBellatrix{SignedBlindedBeaconBlockBellatrix: psb}
|
||||||
|
body, err := json.Marshal(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockBellatrix value body in SubmitBlindedBlock")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
|
||||||
|
defer cancel()
|
||||||
|
versionOpt := func(r *http.Request) {
|
||||||
|
r.Header.Add("Eth-Consensus-Version", version.String(version.Bellatrix))
|
||||||
|
}
|
||||||
|
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockBellatrix to the builder api")
|
||||||
|
}
|
||||||
|
ep := &ExecPayloadResponse{}
|
||||||
|
if err := json.Unmarshal(rb, ep); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
|
||||||
|
}
|
||||||
|
if strings.ToLower(ep.Version) != version.String(version.Bellatrix) {
|
||||||
|
return nil, errors.New("not a bellatrix payload")
|
||||||
|
}
|
||||||
|
p, err := ep.ToProto()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||||
|
}
|
||||||
|
return blocks.WrappedExecutionPayload(p)
|
||||||
|
case version.Capella:
|
||||||
|
psb, err := sb.PbBlindedCapellaBlock()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not get protobuf block")
|
||||||
|
}
|
||||||
|
b := &SignedBlindedBeaconBlockCapella{SignedBlindedBeaconBlockCapella: psb}
|
||||||
|
body, err := json.Marshal(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error encoding the SignedBlindedBeaconBlockCapella value body in SubmitBlindedBlockCapella")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, submitBlindedBlockTimeout)
|
||||||
|
defer cancel()
|
||||||
|
versionOpt := func(r *http.Request) {
|
||||||
|
r.Header.Add("Eth-Consensus-Version", version.String(version.Capella))
|
||||||
|
}
|
||||||
|
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), versionOpt)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error posting the SignedBlindedBeaconBlockCapella to the builder api")
|
||||||
|
}
|
||||||
|
ep := &ExecPayloadResponseCapella{}
|
||||||
|
if err := json.Unmarshal(rb, ep); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlockCapella response")
|
||||||
|
}
|
||||||
|
if strings.ToLower(ep.Version) != version.String(version.Capella) {
|
||||||
|
return nil, errors.New("not a capella payload")
|
||||||
|
}
|
||||||
|
p, err := ep.ToProto()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "could not extract proto message from payload")
|
||||||
|
}
|
||||||
|
return blocks.WrappedExecutionPayloadCapella(p, 0)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported block version %s", version.String(sb.Version()))
|
||||||
}
|
}
|
||||||
ep := &ExecPayloadResponse{}
|
|
||||||
if err := json.Unmarshal(rb, ep); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "error unmarshaling the builder SubmitBlindedBlock response")
|
|
||||||
}
|
|
||||||
return ep.ToProto()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status asks the remote builder server for a health check. A response of 200 with an empty body is the success/healthy
|
// Status asks the remote builder server for a health check. A response of 200 with an empty body is the success/healthy
|
||||||
@@ -238,6 +362,7 @@ func (c *Client) Status(ctx context.Context) error {
|
|||||||
|
|
||||||
func non200Err(response *http.Response) error {
|
func non200Err(response *http.Response) error {
|
||||||
bodyBytes, err := io.ReadAll(response.Body)
|
bodyBytes, err := io.ReadAll(response.Body)
|
||||||
|
var errMessage ErrorMessage
|
||||||
var body string
|
var body string
|
||||||
if err != nil {
|
if err != nil {
|
||||||
body = "(Unable to read response body.)"
|
body = "(Unable to read response body.)"
|
||||||
@@ -246,9 +371,38 @@ func non200Err(response *http.Response) error {
|
|||||||
}
|
}
|
||||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||||
switch response.StatusCode {
|
switch response.StatusCode {
|
||||||
|
case 204:
|
||||||
|
log.WithError(ErrNoContent).Debug(msg)
|
||||||
|
return ErrNoContent
|
||||||
|
case 400:
|
||||||
|
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||||
|
return errors.Wrap(jsonErr, "unable to read response body")
|
||||||
|
}
|
||||||
|
log.WithError(ErrBadRequest).Debug(msg)
|
||||||
|
return errors.Wrap(ErrBadRequest, errMessage.Message)
|
||||||
case 404:
|
case 404:
|
||||||
return errors.Wrap(ErrNotFound, msg)
|
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||||
|
return errors.Wrap(jsonErr, "unable to read response body")
|
||||||
|
}
|
||||||
|
log.WithError(ErrNotFound).Debug(msg)
|
||||||
|
return errors.Wrap(ErrNotFound, errMessage.Message)
|
||||||
|
case 500:
|
||||||
|
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||||
|
return errors.Wrap(jsonErr, "unable to read response body")
|
||||||
|
}
|
||||||
|
log.WithError(ErrNotOK).Debug(msg)
|
||||||
|
return errors.Wrap(ErrNotOK, errMessage.Message)
|
||||||
default:
|
default:
|
||||||
return errors.Wrap(ErrNotOK, msg)
|
log.WithError(ErrNotOK).Debug(msg)
|
||||||
|
return errors.Wrap(ErrNotOK, fmt.Sprintf("unsupported error code: %d", response.StatusCode))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func covertEndPoint(ep string) network.Endpoint {
|
||||||
|
return network.Endpoint{
|
||||||
|
Url: ep,
|
||||||
|
Auth: network.AuthorizationData{ // Auth is not used for builder.
|
||||||
|
Method: authorization.None,
|
||||||
|
Value: "",
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,19 +3,24 @@ package builder
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math/big"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/go-bitfield"
|
"github.com/prysmaticlabs/go-bitfield"
|
||||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||||
|
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
type roundtrip func(*http.Request) (*http.Response, error)
|
type roundtrip func(*http.Request) (*http.Response, error)
|
||||||
@@ -57,9 +62,15 @@ func TestClient_Status(t *testing.T) {
|
|||||||
require.NoError(t, r.Body.Close())
|
require.NoError(t, r.Body.Close())
|
||||||
}()
|
}()
|
||||||
require.Equal(t, statusPath, r.URL.Path)
|
require.Equal(t, statusPath, r.URL.Path)
|
||||||
|
message := ErrorMessage{
|
||||||
|
Code: 500,
|
||||||
|
Message: "Internal server error",
|
||||||
|
}
|
||||||
|
resp, err := json.Marshal(message)
|
||||||
|
require.NoError(t, err)
|
||||||
return &http.Response{
|
return &http.Response{
|
||||||
StatusCode: http.StatusInternalServerError,
|
StatusCode: http.StatusInternalServerError,
|
||||||
Body: io.NopCloser(bytes.NewBuffer(nil)),
|
Body: io.NopCloser(bytes.NewBuffer(resp)),
|
||||||
Request: r.Clone(ctx),
|
Request: r.Clone(ctx),
|
||||||
}, nil
|
}, nil
|
||||||
}),
|
}),
|
||||||
@@ -73,7 +84,7 @@ func TestClient_Status(t *testing.T) {
|
|||||||
|
|
||||||
func TestClient_RegisterValidator(t *testing.T) {
|
func TestClient_RegisterValidator(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
expectedBody := `{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"}}`
|
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`
|
||||||
expectedPath := "/eth/v1/builder/validators"
|
expectedPath := "/eth/v1/builder/validators"
|
||||||
hc := &http.Client{
|
hc := &http.Client{
|
||||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||||
@@ -98,86 +109,236 @@ func TestClient_RegisterValidator(t *testing.T) {
|
|||||||
}
|
}
|
||||||
reg := ð.SignedValidatorRegistrationV1{
|
reg := ð.SignedValidatorRegistrationV1{
|
||||||
Message: ð.ValidatorRegistrationV1{
|
Message: ð.ValidatorRegistrationV1{
|
||||||
FeeRecipient: ezDecode(t, fieldparams.EthBurnAddressHex),
|
FeeRecipient: ezDecode(t, params.BeaconConfig().EthBurnAddressHex),
|
||||||
GasLimit: 23,
|
GasLimit: 23,
|
||||||
Timestamp: 42,
|
Timestamp: 42,
|
||||||
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||||
},
|
},
|
||||||
|
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||||
}
|
}
|
||||||
require.NoError(t, c.RegisterValidator(ctx, reg))
|
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClient_GetHeader(t *testing.T) {
|
func TestClient_GetHeader(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||||
hc := &http.Client{
|
|
||||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
|
||||||
require.Equal(t, expectedPath, r.URL.Path)
|
|
||||||
return &http.Response{
|
|
||||||
StatusCode: http.StatusInternalServerError,
|
|
||||||
Body: io.NopCloser(bytes.NewBuffer(nil)),
|
|
||||||
Request: r.Clone(ctx),
|
|
||||||
}, nil
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
c := &Client{
|
|
||||||
hc: hc,
|
|
||||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
|
||||||
}
|
|
||||||
var slot types.Slot = 23
|
var slot types.Slot = 23
|
||||||
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||||
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
|
||||||
require.ErrorIs(t, err, ErrNotOK)
|
|
||||||
|
|
||||||
hc = &http.Client{
|
t.Run("server error", func(t *testing.T) {
|
||||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
hc := &http.Client{
|
||||||
require.Equal(t, expectedPath, r.URL.Path)
|
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||||
return &http.Response{
|
require.Equal(t, expectedPath, r.URL.Path)
|
||||||
StatusCode: http.StatusOK,
|
message := ErrorMessage{
|
||||||
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponse)),
|
Code: 500,
|
||||||
Request: r.Clone(ctx),
|
Message: "Internal server error",
|
||||||
}, nil
|
}
|
||||||
}),
|
resp, err := json.Marshal(message)
|
||||||
}
|
require.NoError(t, err)
|
||||||
c = &Client{
|
return &http.Response{
|
||||||
hc: hc,
|
StatusCode: http.StatusInternalServerError,
|
||||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
Body: io.NopCloser(bytes.NewBuffer(resp)),
|
||||||
}
|
Request: r.Clone(ctx),
|
||||||
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
}, nil
|
||||||
require.NoError(t, err)
|
}),
|
||||||
expectedSig := ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505")
|
}
|
||||||
require.Equal(t, true, bytes.Equal(expectedSig, h.Signature))
|
c := &Client{
|
||||||
expectedTxRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
hc: hc,
|
||||||
require.Equal(t, true, bytes.Equal(expectedTxRoot, h.Message.Header.TransactionsRoot))
|
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||||
require.Equal(t, uint64(1), h.Message.Header.GasUsed)
|
}
|
||||||
value := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
|
||||||
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", h.Message.Value))
|
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||||
|
require.ErrorIs(t, err, ErrNotOK)
|
||||||
|
})
|
||||||
|
t.Run("header not available", func(t *testing.T) {
|
||||||
|
hc := &http.Client{
|
||||||
|
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||||
|
require.Equal(t, expectedPath, r.URL.Path)
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusNoContent,
|
||||||
|
Body: io.NopCloser(bytes.NewBuffer([]byte("No header is available."))),
|
||||||
|
Request: r.Clone(ctx),
|
||||||
|
}, nil
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
c := &Client{
|
||||||
|
hc: hc,
|
||||||
|
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||||
|
}
|
||||||
|
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||||
|
require.ErrorIs(t, err, ErrNoContent)
|
||||||
|
})
|
||||||
|
t.Run("bellatrix", func(t *testing.T) {
|
||||||
|
hc := &http.Client{
|
||||||
|
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||||
|
require.Equal(t, expectedPath, r.URL.Path)
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponse)),
|
||||||
|
Request: r.Clone(ctx),
|
||||||
|
}, nil
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
c := &Client{
|
||||||
|
hc: hc,
|
||||||
|
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||||
|
}
|
||||||
|
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||||
|
require.NoError(t, err)
|
||||||
|
expectedSig := ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505")
|
||||||
|
require.Equal(t, true, bytes.Equal(expectedSig, h.Signature()))
|
||||||
|
expectedTxRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
bid, err := h.Message()
|
||||||
|
require.NoError(t, err)
|
||||||
|
bidHeader, err := bid.Header()
|
||||||
|
require.NoError(t, err)
|
||||||
|
withdrawalsRoot, err := bidHeader.TransactionsRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, true, bytes.Equal(expectedTxRoot, withdrawalsRoot))
|
||||||
|
require.Equal(t, uint64(1), bidHeader.GasUsed())
|
||||||
|
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||||
|
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||||
|
require.DeepEqual(t, bidValue, value.Bytes())
|
||||||
|
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||||
|
})
|
||||||
|
t.Run("capella", func(t *testing.T) {
|
||||||
|
hc := &http.Client{
|
||||||
|
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||||
|
require.Equal(t, expectedPath, r.URL.Path)
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseCapella)),
|
||||||
|
Request: r.Clone(ctx),
|
||||||
|
}, nil
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
c := &Client{
|
||||||
|
hc: hc,
|
||||||
|
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||||
|
}
|
||||||
|
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||||
|
require.NoError(t, err)
|
||||||
|
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
bid, err := h.Message()
|
||||||
|
require.NoError(t, err)
|
||||||
|
bidHeader, err := bid.Header()
|
||||||
|
require.NoError(t, err)
|
||||||
|
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||||
|
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
|
||||||
|
bidValue := bytesutil.ReverseByteOrder(bid.Value())
|
||||||
|
require.DeepEqual(t, bidValue, value.Bytes())
|
||||||
|
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
|
||||||
|
})
|
||||||
|
t.Run("unsupported version", func(t *testing.T) {
|
||||||
|
hc := &http.Client{
|
||||||
|
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||||
|
require.Equal(t, expectedPath, r.URL.Path)
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseUnknownVersion)),
|
||||||
|
Request: r.Clone(ctx),
|
||||||
|
}, nil
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
c := &Client{
|
||||||
|
hc: hc,
|
||||||
|
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||||
|
}
|
||||||
|
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||||
|
require.ErrorContains(t, "unsupported header version", err)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSubmitBlindedBlock(t *testing.T) {
|
func TestSubmitBlindedBlock(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
hc := &http.Client{
|
|
||||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
t.Run("bellatrix", func(t *testing.T) {
|
||||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
hc := &http.Client{
|
||||||
return &http.Response{
|
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||||
StatusCode: http.StatusOK,
|
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
|
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||||
Request: r.Clone(ctx),
|
return &http.Response{
|
||||||
}, nil
|
StatusCode: http.StatusOK,
|
||||||
}),
|
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
|
||||||
}
|
Request: r.Clone(ctx),
|
||||||
c := &Client{
|
}, nil
|
||||||
hc: hc,
|
}),
|
||||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
}
|
||||||
}
|
c := &Client{
|
||||||
sbbb := testSignedBlindedBeaconBlockBellatrix(t)
|
hc: hc,
|
||||||
ep, err := c.SubmitBlindedBlock(ctx, sbbb)
|
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||||
require.NoError(t, err)
|
}
|
||||||
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash))
|
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||||
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
require.NoError(t, err)
|
||||||
require.Equal(t, fmt.Sprintf("%#x", bfpg.SSZBytes()), fmt.Sprintf("%#x", ep.BaseFeePerGas))
|
ep, err := c.SubmitBlindedBlock(ctx, sbbb)
|
||||||
require.Equal(t, uint64(1), ep.GasLimit)
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash()))
|
||||||
|
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, fmt.Sprintf("%#x", bfpg.SSZBytes()), fmt.Sprintf("%#x", ep.BaseFeePerGas()))
|
||||||
|
require.Equal(t, uint64(1), ep.GasLimit())
|
||||||
|
})
|
||||||
|
t.Run("capella", func(t *testing.T) {
|
||||||
|
hc := &http.Client{
|
||||||
|
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||||
|
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||||
|
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
|
||||||
|
Request: r.Clone(ctx),
|
||||||
|
}, nil
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
c := &Client{
|
||||||
|
hc: hc,
|
||||||
|
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||||
|
}
|
||||||
|
sbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockCapella(t))
|
||||||
|
require.NoError(t, err)
|
||||||
|
ep, err := c.SubmitBlindedBlock(ctx, sbb)
|
||||||
|
require.NoError(t, err)
|
||||||
|
withdrawals, err := ep.Withdrawals()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(withdrawals))
|
||||||
|
assert.Equal(t, uint64(1), withdrawals[0].Index)
|
||||||
|
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
|
||||||
|
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
|
||||||
|
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||||
|
})
|
||||||
|
t.Run("mismatched versions, expected bellatrix got capella", func(t *testing.T) {
|
||||||
|
hc := &http.Client{
|
||||||
|
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||||
|
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)), // send a Capella payload
|
||||||
|
Request: r.Clone(ctx),
|
||||||
|
}, nil
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
c := &Client{
|
||||||
|
hc: hc,
|
||||||
|
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||||
|
}
|
||||||
|
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = c.SubmitBlindedBlock(ctx, sbbb)
|
||||||
|
require.ErrorContains(t, "not a bellatrix payload", err)
|
||||||
|
})
|
||||||
|
t.Run("not blinded", func(t *testing.T) {
|
||||||
|
sbb, err := blocks.NewSignedBeaconBlock(ð.SignedBeaconBlockBellatrix{Block: ð.BeaconBlockBellatrix{Body: ð.BeaconBlockBodyBellatrix{}}})
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = (&Client{}).SubmitBlindedBlock(ctx, sbb)
|
||||||
|
require.ErrorIs(t, err, errNotBlinded)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaconBlockBellatrix {
|
func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaconBlockBellatrix {
|
||||||
@@ -300,7 +461,7 @@ func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaco
|
|||||||
SyncCommitteeSignature: make([]byte, 48),
|
SyncCommitteeSignature: make([]byte, 48),
|
||||||
SyncCommitteeBits: bitfield.Bitvector512{0x01},
|
SyncCommitteeBits: bitfield.Bitvector512{0x01},
|
||||||
},
|
},
|
||||||
ExecutionPayloadHeader: ð.ExecutionPayloadHeader{
|
ExecutionPayloadHeader: &v1.ExecutionPayloadHeader{
|
||||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
@@ -322,6 +483,149 @@ func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaco
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconBlockCapella {
|
||||||
|
return ð.SignedBlindedBeaconBlockCapella{
|
||||||
|
Block: ð.BlindedBeaconBlockCapella{
|
||||||
|
Slot: 1,
|
||||||
|
ProposerIndex: 1,
|
||||||
|
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
Body: ð.BlindedBeaconBlockBodyCapella{
|
||||||
|
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||||
|
Eth1Data: ð.Eth1Data{
|
||||||
|
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
DepositCount: 1,
|
||||||
|
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
},
|
||||||
|
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
|
||||||
|
ProposerSlashings: []*eth.ProposerSlashing{
|
||||||
|
{
|
||||||
|
Header_1: ð.SignedBeaconBlockHeader{
|
||||||
|
Header: ð.BeaconBlockHeader{
|
||||||
|
Slot: 1,
|
||||||
|
ProposerIndex: 1,
|
||||||
|
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
},
|
||||||
|
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||||
|
},
|
||||||
|
Header_2: ð.SignedBeaconBlockHeader{
|
||||||
|
Header: ð.BeaconBlockHeader{
|
||||||
|
Slot: 1,
|
||||||
|
ProposerIndex: 1,
|
||||||
|
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
},
|
||||||
|
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
AttesterSlashings: []*eth.AttesterSlashing{
|
||||||
|
{
|
||||||
|
Attestation_1: ð.IndexedAttestation{
|
||||||
|
AttestingIndices: []uint64{1},
|
||||||
|
Data: ð.AttestationData{
|
||||||
|
Slot: 1,
|
||||||
|
CommitteeIndex: 1,
|
||||||
|
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
Source: ð.Checkpoint{
|
||||||
|
Epoch: 1,
|
||||||
|
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
},
|
||||||
|
Target: ð.Checkpoint{
|
||||||
|
Epoch: 1,
|
||||||
|
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||||
|
},
|
||||||
|
Attestation_2: ð.IndexedAttestation{
|
||||||
|
AttestingIndices: []uint64{1},
|
||||||
|
Data: ð.AttestationData{
|
||||||
|
Slot: 1,
|
||||||
|
CommitteeIndex: 1,
|
||||||
|
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
Source: ð.Checkpoint{
|
||||||
|
Epoch: 1,
|
||||||
|
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
},
|
||||||
|
Target: ð.Checkpoint{
|
||||||
|
Epoch: 1,
|
||||||
|
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Attestations: []*eth.Attestation{
|
||||||
|
{
|
||||||
|
AggregationBits: bitfield.Bitlist{0x01},
|
||||||
|
Data: ð.AttestationData{
|
||||||
|
Slot: 1,
|
||||||
|
CommitteeIndex: 1,
|
||||||
|
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
Source: ð.Checkpoint{
|
||||||
|
Epoch: 1,
|
||||||
|
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
},
|
||||||
|
Target: ð.Checkpoint{
|
||||||
|
Epoch: 1,
|
||||||
|
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Deposits: []*eth.Deposit{
|
||||||
|
{
|
||||||
|
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
|
||||||
|
Data: ð.Deposit_Data{
|
||||||
|
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||||
|
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
Amount: 1,
|
||||||
|
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
VoluntaryExits: []*eth.SignedVoluntaryExit{
|
||||||
|
{
|
||||||
|
Exit: ð.VoluntaryExit{
|
||||||
|
Epoch: 1,
|
||||||
|
ValidatorIndex: 1,
|
||||||
|
},
|
||||||
|
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SyncAggregate: ð.SyncAggregate{
|
||||||
|
SyncCommitteeSignature: make([]byte, 48),
|
||||||
|
SyncCommitteeBits: bitfield.Bitvector512{0x01},
|
||||||
|
},
|
||||||
|
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderCapella{
|
||||||
|
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||||
|
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
BlockNumber: 1,
|
||||||
|
GasLimit: 1,
|
||||||
|
GasUsed: 1,
|
||||||
|
Timestamp: 1,
|
||||||
|
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
|
||||||
|
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestRequestLogger(t *testing.T) {
|
func TestRequestLogger(t *testing.T) {
|
||||||
wo := WithObserver(&requestLogger{})
|
wo := WithObserver(&requestLogger{})
|
||||||
c, err := NewClient("localhost:3500", wo)
|
c, err := NewClient("localhost:3500", wo)
|
||||||
|
|||||||
@@ -4,7 +4,14 @@ import "github.com/pkg/errors"
|
|||||||
|
|
||||||
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
|
// ErrNotOK is used to indicate when an HTTP request to the Beacon Node API failed with any non-2xx response code.
|
||||||
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
|
// More specific errors may be returned, but an error in reaction to a non-2xx response will always wrap ErrNotOK.
|
||||||
var ErrNotOK = errors.New("did not receive 2xx response from API")
|
var ErrNotOK = errors.New("did not receive 200 response from API")
|
||||||
|
|
||||||
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
|
// ErrNotFound specifically means that a '404 - NOT FOUND' response was received from the API.
|
||||||
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
|
var ErrNotFound = errors.Wrap(ErrNotOK, "recv 404 NotFound response from API")
|
||||||
|
|
||||||
|
// ErrBadRequest specifically means that a '400 - BAD REQUEST' response was received from the API.
|
||||||
|
var ErrBadRequest = errors.Wrap(ErrNotOK, "recv 400 BadRequest response from API")
|
||||||
|
|
||||||
|
// ErrNoContent specifically means that a '204 - No Content' response was received from the API.
|
||||||
|
// Typically, a 204 is a success but in this case for the Header API means No header is available
|
||||||
|
var ErrNoContent = errors.New("recv 204 no content response from API, No header is available")
|
||||||
|
|||||||
1
api/client/builder/testdata/blinded-block-capella.json
vendored
Normal file
1
api/client/builder/testdata/blinded-block-capella.json
vendored
Normal file
File diff suppressed because one or more lines are too long
1
api/client/builder/testdata/execution-payload-capella.json
vendored
Normal file
1
api/client/builder/testdata/execution-payload-capella.json
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"14074904626401341155369551180448584754667373453244490859944217516317499064576","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}
|
||||||
15
api/client/builder/testing/BUILD.bazel
Normal file
15
api/client/builder/testing/BUILD.bazel
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
load("@prysm//tools/go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["mock.go"],
|
||||||
|
importpath = "github.com/prysmaticlabs/prysm/v4/api/client/builder/testing",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//api/client/builder:go_default_library",
|
||||||
|
"//consensus-types/interfaces:go_default_library",
|
||||||
|
"//consensus-types/primitives:go_default_library",
|
||||||
|
"//encoding/bytesutil:go_default_library",
|
||||||
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
50
api/client/builder/testing/mock.go
Normal file
50
api/client/builder/testing/mock.go
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
package testing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/api/client/builder"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockClient is a mock implementation of BuilderClient.
|
||||||
|
type MockClient struct {
|
||||||
|
RegisteredVals map[[48]byte]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient creates a new, correctly initialized mock.
|
||||||
|
func NewClient() MockClient {
|
||||||
|
return MockClient{RegisteredVals: map[[48]byte]bool{}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeURL --
|
||||||
|
func (MockClient) NodeURL() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeader --
|
||||||
|
func (MockClient) GetHeader(_ context.Context, _ primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterValidator --
|
||||||
|
func (m MockClient) RegisterValidator(_ context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error {
|
||||||
|
for _, r := range svr {
|
||||||
|
b := bytesutil.ToBytes48(r.Message.Pubkey)
|
||||||
|
m.RegisteredVals[b] = true
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubmitBlindedBlock --
|
||||||
|
func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status --
|
||||||
|
func (MockClient) Status(_ context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -8,9 +8,10 @@ import (
|
|||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||||
|
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SignedValidatorRegistration struct {
|
type SignedValidatorRegistration struct {
|
||||||
@@ -23,20 +24,36 @@ type ValidatorRegistration struct {
|
|||||||
|
|
||||||
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
|
func (r *SignedValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
Message *ValidatorRegistration `json:"message,omitempty"`
|
Message *ValidatorRegistration `json:"message"`
|
||||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
Signature hexutil.Bytes `json:"signature"`
|
||||||
}{
|
}{
|
||||||
Message: &ValidatorRegistration{r.Message},
|
Message: &ValidatorRegistration{r.Message},
|
||||||
Signature: r.SignedValidatorRegistrationV1.Signature,
|
Signature: r.SignedValidatorRegistrationV1.Signature,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *SignedValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||||
|
if r.SignedValidatorRegistrationV1 == nil {
|
||||||
|
r.SignedValidatorRegistrationV1 = ð.SignedValidatorRegistrationV1{}
|
||||||
|
}
|
||||||
|
o := struct {
|
||||||
|
Message *ValidatorRegistration `json:"message"`
|
||||||
|
Signature hexutil.Bytes `json:"signature"`
|
||||||
|
}{}
|
||||||
|
if err := json.Unmarshal(b, &o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r.Message = o.Message.ValidatorRegistrationV1
|
||||||
|
r.Signature = o.Signature
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||||
GasLimit string `json:"gas_limit,omitempty"`
|
GasLimit string `json:"gas_limit"`
|
||||||
Timestamp string `json:"timestamp,omitempty"`
|
Timestamp string `json:"timestamp"`
|
||||||
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
|
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||||
}{
|
}{
|
||||||
FeeRecipient: r.FeeRecipient,
|
FeeRecipient: r.FeeRecipient,
|
||||||
GasLimit: fmt.Sprintf("%d", r.GasLimit),
|
GasLimit: fmt.Sprintf("%d", r.GasLimit),
|
||||||
@@ -45,29 +62,70 @@ func (r *ValidatorRegistration) MarshalJSON() ([]byte, error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *ValidatorRegistration) UnmarshalJSON(b []byte) error {
|
||||||
|
if r.ValidatorRegistrationV1 == nil {
|
||||||
|
r.ValidatorRegistrationV1 = ð.ValidatorRegistrationV1{}
|
||||||
|
}
|
||||||
|
o := struct {
|
||||||
|
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||||
|
GasLimit string `json:"gas_limit"`
|
||||||
|
Timestamp string `json:"timestamp"`
|
||||||
|
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||||
|
}{}
|
||||||
|
if err := json.Unmarshal(b, &o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.FeeRecipient = o.FeeRecipient
|
||||||
|
r.Pubkey = o.Pubkey
|
||||||
|
var err error
|
||||||
|
if r.GasLimit, err = strconv.ParseUint(o.GasLimit, 10, 64); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to parse gas limit")
|
||||||
|
}
|
||||||
|
if r.Timestamp, err = strconv.ParseUint(o.Timestamp, 10, 64); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to parse timestamp")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var errInvalidUint256 = errors.New("invalid Uint256")
|
||||||
|
var errDecodeUint256 = errors.New("unable to decode into Uint256")
|
||||||
|
|
||||||
type Uint256 struct {
|
type Uint256 struct {
|
||||||
*big.Int
|
*big.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
func stringToUint256(s string) Uint256 {
|
func isValidUint256(bi *big.Int) bool {
|
||||||
|
return bi.Cmp(big.NewInt(0)) >= 0 && bi.BitLen() <= 256
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringToUint256(s string) (Uint256, error) {
|
||||||
bi := new(big.Int)
|
bi := new(big.Int)
|
||||||
bi.SetString(s, 10)
|
_, ok := bi.SetString(s, 10)
|
||||||
return Uint256{Int: bi}
|
if !ok || !isValidUint256(bi) {
|
||||||
|
return Uint256{}, errors.Wrapf(errDecodeUint256, "value=%s", s)
|
||||||
|
}
|
||||||
|
return Uint256{Int: bi}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// sszBytesToUint256 creates a Uint256 from a ssz-style (little-endian byte slice) representation.
|
// sszBytesToUint256 creates a Uint256 from a ssz-style (little-endian byte slice) representation.
|
||||||
func sszBytesToUint256(b []byte) Uint256 {
|
func sszBytesToUint256(b []byte) (Uint256, error) {
|
||||||
bi := new(big.Int)
|
bi := bytesutil.LittleEndianBytesToBigInt(b)
|
||||||
return Uint256{Int: bi.SetBytes(bytesutil.ReverseByteOrder(b))}
|
if !isValidUint256(bi) {
|
||||||
|
return Uint256{}, errors.Wrapf(errDecodeUint256, "value=%s", b)
|
||||||
|
}
|
||||||
|
return Uint256{Int: bi}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256
|
// SSZBytes creates an ssz-style (little-endian byte slice) representation of the Uint256
|
||||||
func (s Uint256) SSZBytes() []byte {
|
func (s Uint256) SSZBytes() []byte {
|
||||||
return bytesutil.ReverseByteOrder(s.Int.Bytes())
|
if !isValidUint256(s.Int) {
|
||||||
|
return []byte{}
|
||||||
|
}
|
||||||
|
return bytesutil.PadTo(bytesutil.ReverseByteOrder(s.Int.Bytes()), 32)
|
||||||
}
|
}
|
||||||
|
|
||||||
var errUnmarshalUint256Failed = errors.New("unable to UnmarshalText into a Uint256 value")
|
|
||||||
|
|
||||||
func (s *Uint256) UnmarshalJSON(t []byte) error {
|
func (s *Uint256) UnmarshalJSON(t []byte) error {
|
||||||
start := 0
|
start := 0
|
||||||
end := len(t)
|
end := len(t)
|
||||||
@@ -86,7 +144,10 @@ func (s *Uint256) UnmarshalText(t []byte) error {
|
|||||||
}
|
}
|
||||||
z, ok := s.SetString(string(t), 10)
|
z, ok := s.SetString(string(t), 10)
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.Wrapf(errUnmarshalUint256Failed, "value=%s", string(t))
|
return errors.Wrapf(errDecodeUint256, "value=%s", t)
|
||||||
|
}
|
||||||
|
if !isValidUint256(z) {
|
||||||
|
return errors.Wrapf(errDecodeUint256, "value=%s", t)
|
||||||
}
|
}
|
||||||
s.Int = z
|
s.Int = z
|
||||||
return nil
|
return nil
|
||||||
@@ -103,6 +164,9 @@ func (s Uint256) MarshalJSON() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s Uint256) MarshalText() ([]byte, error) {
|
func (s Uint256) MarshalText() ([]byte, error) {
|
||||||
|
if !isValidUint256(s.Int) {
|
||||||
|
return nil, errors.Wrapf(errInvalidUint256, "value=%s", s.Int)
|
||||||
|
}
|
||||||
return []byte(s.String()), nil
|
return []byte(s.String()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -118,12 +182,16 @@ func (s Uint64String) MarshalText() ([]byte, error) {
|
|||||||
return []byte(fmt.Sprintf("%d", s)), nil
|
return []byte(fmt.Sprintf("%d", s)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type VersionResponse struct {
|
||||||
|
Version string `json:"version"`
|
||||||
|
}
|
||||||
|
|
||||||
type ExecHeaderResponse struct {
|
type ExecHeaderResponse struct {
|
||||||
Version string `json:"version,omitempty"`
|
Version string `json:"version"`
|
||||||
Data struct {
|
Data struct {
|
||||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
Signature hexutil.Bytes `json:"signature"`
|
||||||
Message *BuilderBid `json:"message,omitempty"`
|
Message *BuilderBid `json:"message"`
|
||||||
} `json:"data,omitempty"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
|
func (ehr *ExecHeaderResponse) ToProto() (*eth.SignedBuilderBid, error) {
|
||||||
@@ -149,8 +217,8 @@ func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *ExecutionPayloadHeader) ToProto() (*eth.ExecutionPayloadHeader, error) {
|
func (h *ExecutionPayloadHeader) ToProto() (*v1.ExecutionPayloadHeader, error) {
|
||||||
return ð.ExecutionPayloadHeader{
|
return &v1.ExecutionPayloadHeader{
|
||||||
ParentHash: h.ParentHash,
|
ParentHash: h.ParentHash,
|
||||||
FeeRecipient: h.FeeRecipient,
|
FeeRecipient: h.FeeRecipient,
|
||||||
StateRoot: h.StateRoot,
|
StateRoot: h.StateRoot,
|
||||||
@@ -169,31 +237,35 @@ func (h *ExecutionPayloadHeader) ToProto() (*eth.ExecutionPayloadHeader, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
type BuilderBid struct {
|
type BuilderBid struct {
|
||||||
Header *ExecutionPayloadHeader `json:"header,omitempty"`
|
Header *ExecutionPayloadHeader `json:"header"`
|
||||||
Value Uint256 `json:"value,omitempty"`
|
Value Uint256 `json:"value"`
|
||||||
Pubkey hexutil.Bytes `json:"pubkey,omitempty"`
|
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ExecutionPayloadHeader struct {
|
type ExecutionPayloadHeader struct {
|
||||||
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
|
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
StateRoot hexutil.Bytes `json:"state_root"`
|
||||||
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
|
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
|
||||||
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
|
LogsBloom hexutil.Bytes `json:"logs_bloom"`
|
||||||
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
|
PrevRandao hexutil.Bytes `json:"prev_randao"`
|
||||||
BlockNumber Uint64String `json:"block_number,omitempty"`
|
BlockNumber Uint64String `json:"block_number"`
|
||||||
GasLimit Uint64String `json:"gas_limit,omitempty"`
|
GasLimit Uint64String `json:"gas_limit"`
|
||||||
GasUsed Uint64String `json:"gas_used,omitempty"`
|
GasUsed Uint64String `json:"gas_used"`
|
||||||
Timestamp Uint64String `json:"timestamp,omitempty"`
|
Timestamp Uint64String `json:"timestamp"`
|
||||||
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
|
ExtraData hexutil.Bytes `json:"extra_data"`
|
||||||
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
|
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
|
||||||
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
|
BlockHash hexutil.Bytes `json:"block_hash"`
|
||||||
TransactionsRoot hexutil.Bytes `json:"transactions_root,omitempty"`
|
TransactionsRoot hexutil.Bytes `json:"transactions_root"`
|
||||||
*eth.ExecutionPayloadHeader
|
*v1.ExecutionPayloadHeader
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
|
func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
|
||||||
type MarshalCaller ExecutionPayloadHeader
|
type MarshalCaller ExecutionPayloadHeader
|
||||||
|
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeader.BaseFeePerGas)
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, errors.Wrapf(err, "invalid BaseFeePerGas")
|
||||||
|
}
|
||||||
return json.Marshal(&MarshalCaller{
|
return json.Marshal(&MarshalCaller{
|
||||||
ParentHash: h.ExecutionPayloadHeader.ParentHash,
|
ParentHash: h.ExecutionPayloadHeader.ParentHash,
|
||||||
FeeRecipient: h.ExecutionPayloadHeader.FeeRecipient,
|
FeeRecipient: h.ExecutionPayloadHeader.FeeRecipient,
|
||||||
@@ -206,7 +278,7 @@ func (h *ExecutionPayloadHeader) MarshalJSON() ([]byte, error) {
|
|||||||
GasUsed: Uint64String(h.ExecutionPayloadHeader.GasUsed),
|
GasUsed: Uint64String(h.ExecutionPayloadHeader.GasUsed),
|
||||||
Timestamp: Uint64String(h.ExecutionPayloadHeader.Timestamp),
|
Timestamp: Uint64String(h.ExecutionPayloadHeader.Timestamp),
|
||||||
ExtraData: h.ExecutionPayloadHeader.ExtraData,
|
ExtraData: h.ExecutionPayloadHeader.ExtraData,
|
||||||
BaseFeePerGas: sszBytesToUint256(h.ExecutionPayloadHeader.BaseFeePerGas),
|
BaseFeePerGas: baseFeePerGas,
|
||||||
BlockHash: h.ExecutionPayloadHeader.BlockHash,
|
BlockHash: h.ExecutionPayloadHeader.BlockHash,
|
||||||
TransactionsRoot: h.ExecutionPayloadHeader.TransactionsRoot,
|
TransactionsRoot: h.ExecutionPayloadHeader.TransactionsRoot,
|
||||||
})
|
})
|
||||||
@@ -226,25 +298,25 @@ func (h *ExecutionPayloadHeader) UnmarshalJSON(b []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ExecPayloadResponse struct {
|
type ExecPayloadResponse struct {
|
||||||
Version string `json:"version,omitempty"`
|
Version string `json:"version"`
|
||||||
Data ExecutionPayload `json:"data,omitempty"`
|
Data ExecutionPayload `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ExecutionPayload struct {
|
type ExecutionPayload struct {
|
||||||
ParentHash hexutil.Bytes `json:"parent_hash,omitempty"`
|
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||||
FeeRecipient hexutil.Bytes `json:"fee_recipient,omitempty"`
|
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
StateRoot hexutil.Bytes `json:"state_root"`
|
||||||
ReceiptsRoot hexutil.Bytes `json:"receipts_root,omitempty"`
|
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
|
||||||
LogsBloom hexutil.Bytes `json:"logs_bloom,omitempty"`
|
LogsBloom hexutil.Bytes `json:"logs_bloom"`
|
||||||
PrevRandao hexutil.Bytes `json:"prev_randao,omitempty"`
|
PrevRandao hexutil.Bytes `json:"prev_randao"`
|
||||||
BlockNumber Uint64String `json:"block_number,omitempty"`
|
BlockNumber Uint64String `json:"block_number"`
|
||||||
GasLimit Uint64String `json:"gas_limit,omitempty"`
|
GasLimit Uint64String `json:"gas_limit"`
|
||||||
GasUsed Uint64String `json:"gas_used,omitempty"`
|
GasUsed Uint64String `json:"gas_used"`
|
||||||
Timestamp Uint64String `json:"timestamp,omitempty"`
|
Timestamp Uint64String `json:"timestamp"`
|
||||||
ExtraData hexutil.Bytes `json:"extra_data,omitempty"`
|
ExtraData hexutil.Bytes `json:"extra_data"`
|
||||||
BaseFeePerGas Uint256 `json:"base_fee_per_gas,omitempty"`
|
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
|
||||||
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
|
BlockHash hexutil.Bytes `json:"block_hash"`
|
||||||
Transactions []hexutil.Bytes `json:"transactions,omitempty"`
|
Transactions []hexutil.Bytes `json:"transactions"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
|
func (r *ExecPayloadResponse) ToProto() (*v1.ExecutionPayload, error) {
|
||||||
@@ -274,6 +346,254 @@ func (p *ExecutionPayload) ToProto() (*v1.ExecutionPayload, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FromProto converts a proto execution payload type to our builder
|
||||||
|
// compatible payload type.
|
||||||
|
func FromProto(payload *v1.ExecutionPayload) (ExecutionPayload, error) {
|
||||||
|
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
|
||||||
|
if err != nil {
|
||||||
|
return ExecutionPayload{}, err
|
||||||
|
}
|
||||||
|
txs := make([]hexutil.Bytes, len(payload.Transactions))
|
||||||
|
for i := range payload.Transactions {
|
||||||
|
txs[i] = payload.Transactions[i]
|
||||||
|
}
|
||||||
|
return ExecutionPayload{
|
||||||
|
ParentHash: payload.ParentHash,
|
||||||
|
FeeRecipient: payload.FeeRecipient,
|
||||||
|
StateRoot: payload.StateRoot,
|
||||||
|
ReceiptsRoot: payload.ReceiptsRoot,
|
||||||
|
LogsBloom: payload.LogsBloom,
|
||||||
|
PrevRandao: payload.PrevRandao,
|
||||||
|
BlockNumber: Uint64String(payload.BlockNumber),
|
||||||
|
GasLimit: Uint64String(payload.GasLimit),
|
||||||
|
GasUsed: Uint64String(payload.GasUsed),
|
||||||
|
Timestamp: Uint64String(payload.Timestamp),
|
||||||
|
ExtraData: payload.ExtraData,
|
||||||
|
BaseFeePerGas: bFee,
|
||||||
|
BlockHash: payload.BlockHash,
|
||||||
|
Transactions: txs,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromProtoCapella converts a proto execution payload type for capella to our
|
||||||
|
// builder compatible payload type.
|
||||||
|
func FromProtoCapella(payload *v1.ExecutionPayloadCapella) (ExecutionPayloadCapella, error) {
|
||||||
|
bFee, err := sszBytesToUint256(payload.BaseFeePerGas)
|
||||||
|
if err != nil {
|
||||||
|
return ExecutionPayloadCapella{}, err
|
||||||
|
}
|
||||||
|
txs := make([]hexutil.Bytes, len(payload.Transactions))
|
||||||
|
for i := range payload.Transactions {
|
||||||
|
txs[i] = payload.Transactions[i]
|
||||||
|
}
|
||||||
|
withdrawals := make([]Withdrawal, len(payload.Withdrawals))
|
||||||
|
for i, w := range payload.Withdrawals {
|
||||||
|
withdrawals[i] = Withdrawal{
|
||||||
|
Index: Uint256{Int: big.NewInt(0).SetUint64(w.Index)},
|
||||||
|
ValidatorIndex: Uint256{Int: big.NewInt(0).SetUint64(uint64(w.ValidatorIndex))},
|
||||||
|
Address: w.Address,
|
||||||
|
Amount: Uint256{Int: big.NewInt(0).SetUint64(w.Amount)},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ExecutionPayloadCapella{
|
||||||
|
ParentHash: payload.ParentHash,
|
||||||
|
FeeRecipient: payload.FeeRecipient,
|
||||||
|
StateRoot: payload.StateRoot,
|
||||||
|
ReceiptsRoot: payload.ReceiptsRoot,
|
||||||
|
LogsBloom: payload.LogsBloom,
|
||||||
|
PrevRandao: payload.PrevRandao,
|
||||||
|
BlockNumber: Uint64String(payload.BlockNumber),
|
||||||
|
GasLimit: Uint64String(payload.GasLimit),
|
||||||
|
GasUsed: Uint64String(payload.GasUsed),
|
||||||
|
Timestamp: Uint64String(payload.Timestamp),
|
||||||
|
ExtraData: payload.ExtraData,
|
||||||
|
BaseFeePerGas: bFee,
|
||||||
|
BlockHash: payload.BlockHash,
|
||||||
|
Transactions: txs,
|
||||||
|
Withdrawals: withdrawals,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExecHeaderResponseCapella struct {
|
||||||
|
Data struct {
|
||||||
|
Signature hexutil.Bytes `json:"signature"`
|
||||||
|
Message *BuilderBidCapella `json:"message"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ehr *ExecHeaderResponseCapella) ToProto() (*eth.SignedBuilderBidCapella, error) {
|
||||||
|
bb, err := ehr.Data.Message.ToProto()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ð.SignedBuilderBidCapella{
|
||||||
|
Message: bb,
|
||||||
|
Signature: ehr.Data.Signature,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
|
||||||
|
header, err := bb.Header.ToProto()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ð.BuilderBidCapella{
|
||||||
|
Header: header,
|
||||||
|
Value: bb.Value.SSZBytes(),
|
||||||
|
Pubkey: bb.Pubkey,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ExecutionPayloadHeaderCapella) ToProto() (*v1.ExecutionPayloadHeaderCapella, error) {
|
||||||
|
return &v1.ExecutionPayloadHeaderCapella{
|
||||||
|
ParentHash: h.ParentHash,
|
||||||
|
FeeRecipient: h.FeeRecipient,
|
||||||
|
StateRoot: h.StateRoot,
|
||||||
|
ReceiptsRoot: h.ReceiptsRoot,
|
||||||
|
LogsBloom: h.LogsBloom,
|
||||||
|
PrevRandao: h.PrevRandao,
|
||||||
|
BlockNumber: uint64(h.BlockNumber),
|
||||||
|
GasLimit: uint64(h.GasLimit),
|
||||||
|
GasUsed: uint64(h.GasUsed),
|
||||||
|
Timestamp: uint64(h.Timestamp),
|
||||||
|
ExtraData: h.ExtraData,
|
||||||
|
BaseFeePerGas: h.BaseFeePerGas.SSZBytes(),
|
||||||
|
BlockHash: h.BlockHash,
|
||||||
|
TransactionsRoot: h.TransactionsRoot,
|
||||||
|
WithdrawalsRoot: h.WithdrawalsRoot,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type BuilderBidCapella struct {
|
||||||
|
Header *ExecutionPayloadHeaderCapella `json:"header"`
|
||||||
|
Value Uint256 `json:"value"`
|
||||||
|
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExecutionPayloadHeaderCapella struct {
|
||||||
|
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||||
|
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||||
|
StateRoot hexutil.Bytes `json:"state_root"`
|
||||||
|
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
|
||||||
|
LogsBloom hexutil.Bytes `json:"logs_bloom"`
|
||||||
|
PrevRandao hexutil.Bytes `json:"prev_randao"`
|
||||||
|
BlockNumber Uint64String `json:"block_number"`
|
||||||
|
GasLimit Uint64String `json:"gas_limit"`
|
||||||
|
GasUsed Uint64String `json:"gas_used"`
|
||||||
|
Timestamp Uint64String `json:"timestamp"`
|
||||||
|
ExtraData hexutil.Bytes `json:"extra_data"`
|
||||||
|
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
|
||||||
|
BlockHash hexutil.Bytes `json:"block_hash"`
|
||||||
|
TransactionsRoot hexutil.Bytes `json:"transactions_root"`
|
||||||
|
WithdrawalsRoot hexutil.Bytes `json:"withdrawals_root"`
|
||||||
|
*v1.ExecutionPayloadHeaderCapella
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ExecutionPayloadHeaderCapella) MarshalJSON() ([]byte, error) {
|
||||||
|
type MarshalCaller ExecutionPayloadHeaderCapella
|
||||||
|
baseFeePerGas, err := sszBytesToUint256(h.ExecutionPayloadHeaderCapella.BaseFeePerGas)
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, errors.Wrapf(err, "invalid BaseFeePerGas")
|
||||||
|
}
|
||||||
|
return json.Marshal(&MarshalCaller{
|
||||||
|
ParentHash: h.ExecutionPayloadHeaderCapella.ParentHash,
|
||||||
|
FeeRecipient: h.ExecutionPayloadHeaderCapella.FeeRecipient,
|
||||||
|
StateRoot: h.ExecutionPayloadHeaderCapella.StateRoot,
|
||||||
|
ReceiptsRoot: h.ExecutionPayloadHeaderCapella.ReceiptsRoot,
|
||||||
|
LogsBloom: h.ExecutionPayloadHeaderCapella.LogsBloom,
|
||||||
|
PrevRandao: h.ExecutionPayloadHeaderCapella.PrevRandao,
|
||||||
|
BlockNumber: Uint64String(h.ExecutionPayloadHeaderCapella.BlockNumber),
|
||||||
|
GasLimit: Uint64String(h.ExecutionPayloadHeaderCapella.GasLimit),
|
||||||
|
GasUsed: Uint64String(h.ExecutionPayloadHeaderCapella.GasUsed),
|
||||||
|
Timestamp: Uint64String(h.ExecutionPayloadHeaderCapella.Timestamp),
|
||||||
|
ExtraData: h.ExecutionPayloadHeaderCapella.ExtraData,
|
||||||
|
BaseFeePerGas: baseFeePerGas,
|
||||||
|
BlockHash: h.ExecutionPayloadHeaderCapella.BlockHash,
|
||||||
|
TransactionsRoot: h.ExecutionPayloadHeaderCapella.TransactionsRoot,
|
||||||
|
WithdrawalsRoot: h.ExecutionPayloadHeaderCapella.WithdrawalsRoot,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *ExecutionPayloadHeaderCapella) UnmarshalJSON(b []byte) error {
|
||||||
|
type UnmarshalCaller ExecutionPayloadHeaderCapella
|
||||||
|
uc := &UnmarshalCaller{}
|
||||||
|
if err := json.Unmarshal(b, uc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ep := ExecutionPayloadHeaderCapella(*uc)
|
||||||
|
*h = ep
|
||||||
|
var err error
|
||||||
|
h.ExecutionPayloadHeaderCapella, err = h.ToProto()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExecPayloadResponseCapella struct {
|
||||||
|
Version string `json:"version"`
|
||||||
|
Data ExecutionPayloadCapella `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExecutionPayloadCapella struct {
|
||||||
|
ParentHash hexutil.Bytes `json:"parent_hash"`
|
||||||
|
FeeRecipient hexutil.Bytes `json:"fee_recipient"`
|
||||||
|
StateRoot hexutil.Bytes `json:"state_root"`
|
||||||
|
ReceiptsRoot hexutil.Bytes `json:"receipts_root"`
|
||||||
|
LogsBloom hexutil.Bytes `json:"logs_bloom"`
|
||||||
|
PrevRandao hexutil.Bytes `json:"prev_randao"`
|
||||||
|
BlockNumber Uint64String `json:"block_number"`
|
||||||
|
GasLimit Uint64String `json:"gas_limit"`
|
||||||
|
GasUsed Uint64String `json:"gas_used"`
|
||||||
|
Timestamp Uint64String `json:"timestamp"`
|
||||||
|
ExtraData hexutil.Bytes `json:"extra_data"`
|
||||||
|
BaseFeePerGas Uint256 `json:"base_fee_per_gas"`
|
||||||
|
BlockHash hexutil.Bytes `json:"block_hash"`
|
||||||
|
Transactions []hexutil.Bytes `json:"transactions"`
|
||||||
|
Withdrawals []Withdrawal `json:"withdrawals"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ExecPayloadResponseCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
|
||||||
|
return r.Data.ToProto()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ExecutionPayloadCapella) ToProto() (*v1.ExecutionPayloadCapella, error) {
|
||||||
|
txs := make([][]byte, len(p.Transactions))
|
||||||
|
for i := range p.Transactions {
|
||||||
|
txs[i] = p.Transactions[i]
|
||||||
|
}
|
||||||
|
withdrawals := make([]*v1.Withdrawal, len(p.Withdrawals))
|
||||||
|
for i, w := range p.Withdrawals {
|
||||||
|
withdrawals[i] = &v1.Withdrawal{
|
||||||
|
Index: w.Index.Uint64(),
|
||||||
|
ValidatorIndex: types.ValidatorIndex(w.ValidatorIndex.Uint64()),
|
||||||
|
Address: w.Address,
|
||||||
|
Amount: w.Amount.Uint64(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &v1.ExecutionPayloadCapella{
|
||||||
|
ParentHash: p.ParentHash,
|
||||||
|
FeeRecipient: p.FeeRecipient,
|
||||||
|
StateRoot: p.StateRoot,
|
||||||
|
ReceiptsRoot: p.ReceiptsRoot,
|
||||||
|
LogsBloom: p.LogsBloom,
|
||||||
|
PrevRandao: p.PrevRandao,
|
||||||
|
BlockNumber: uint64(p.BlockNumber),
|
||||||
|
GasLimit: uint64(p.GasLimit),
|
||||||
|
GasUsed: uint64(p.GasUsed),
|
||||||
|
Timestamp: uint64(p.Timestamp),
|
||||||
|
ExtraData: p.ExtraData,
|
||||||
|
BaseFeePerGas: p.BaseFeePerGas.SSZBytes(),
|
||||||
|
BlockHash: p.BlockHash,
|
||||||
|
Transactions: txs,
|
||||||
|
Withdrawals: withdrawals,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Withdrawal struct {
|
||||||
|
Index Uint256 `json:"index"`
|
||||||
|
ValidatorIndex Uint256 `json:"validator_index"`
|
||||||
|
Address hexutil.Bytes `json:"address"`
|
||||||
|
Amount Uint256 `json:"amount"`
|
||||||
|
}
|
||||||
|
|
||||||
type SignedBlindedBeaconBlockBellatrix struct {
|
type SignedBlindedBeaconBlockBellatrix struct {
|
||||||
*eth.SignedBlindedBeaconBlockBellatrix
|
*eth.SignedBlindedBeaconBlockBellatrix
|
||||||
}
|
}
|
||||||
@@ -288,8 +608,8 @@ type BlindedBeaconBlockBodyBellatrix struct {
|
|||||||
|
|
||||||
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
Message *BlindedBeaconBlockBellatrix `json:"message,omitempty"`
|
Message *BlindedBeaconBlockBellatrix `json:"message"`
|
||||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
Signature hexutil.Bytes `json:"signature"`
|
||||||
}{
|
}{
|
||||||
Message: &BlindedBeaconBlockBellatrix{r.SignedBlindedBeaconBlockBellatrix.Block},
|
Message: &BlindedBeaconBlockBellatrix{r.SignedBlindedBeaconBlockBellatrix.Block},
|
||||||
Signature: r.SignedBlindedBeaconBlockBellatrix.Signature,
|
Signature: r.SignedBlindedBeaconBlockBellatrix.Signature,
|
||||||
@@ -299,10 +619,10 @@ func (r *SignedBlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
|||||||
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
func (b *BlindedBeaconBlockBellatrix) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
Slot string `json:"slot"`
|
Slot string `json:"slot"`
|
||||||
ProposerIndex string `json:"proposer_index,omitempty"`
|
ProposerIndex string `json:"proposer_index"`
|
||||||
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
|
ParentRoot hexutil.Bytes `json:"parent_root"`
|
||||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
StateRoot hexutil.Bytes `json:"state_root"`
|
||||||
Body *BlindedBeaconBlockBodyBellatrix `json:"body,omitempty"`
|
Body *BlindedBeaconBlockBodyBellatrix `json:"body"`
|
||||||
}{
|
}{
|
||||||
Slot: fmt.Sprintf("%d", b.Slot),
|
Slot: fmt.Sprintf("%d", b.Slot),
|
||||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||||
@@ -318,8 +638,8 @@ type ProposerSlashing struct {
|
|||||||
|
|
||||||
func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
|
func (s *ProposerSlashing) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1,omitempty"`
|
SignedHeader1 *SignedBeaconBlockHeader `json:"signed_header_1"`
|
||||||
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2,omitempty"`
|
SignedHeader2 *SignedBeaconBlockHeader `json:"signed_header_2"`
|
||||||
}{
|
}{
|
||||||
SignedHeader1: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_1},
|
SignedHeader1: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_1},
|
||||||
SignedHeader2: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_2},
|
SignedHeader2: &SignedBeaconBlockHeader{s.ProposerSlashing.Header_2},
|
||||||
@@ -332,8 +652,8 @@ type SignedBeaconBlockHeader struct {
|
|||||||
|
|
||||||
func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
func (h *SignedBeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
Header *BeaconBlockHeader `json:"message,omitempty"`
|
Header *BeaconBlockHeader `json:"message"`
|
||||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
Signature hexutil.Bytes `json:"signature"`
|
||||||
}{
|
}{
|
||||||
Header: &BeaconBlockHeader{h.SignedBeaconBlockHeader.Header},
|
Header: &BeaconBlockHeader{h.SignedBeaconBlockHeader.Header},
|
||||||
Signature: h.SignedBeaconBlockHeader.Signature,
|
Signature: h.SignedBeaconBlockHeader.Signature,
|
||||||
@@ -346,11 +666,11 @@ type BeaconBlockHeader struct {
|
|||||||
|
|
||||||
func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
func (h *BeaconBlockHeader) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
Slot string `json:"slot,omitempty"`
|
Slot string `json:"slot"`
|
||||||
ProposerIndex string `json:"proposer_index,omitempty"`
|
ProposerIndex string `json:"proposer_index"`
|
||||||
ParentRoot hexutil.Bytes `json:"parent_root,omitempty"`
|
ParentRoot hexutil.Bytes `json:"parent_root"`
|
||||||
StateRoot hexutil.Bytes `json:"state_root,omitempty"`
|
StateRoot hexutil.Bytes `json:"state_root"`
|
||||||
BodyRoot hexutil.Bytes `json:"body_root,omitempty"`
|
BodyRoot hexutil.Bytes `json:"body_root"`
|
||||||
}{
|
}{
|
||||||
Slot: fmt.Sprintf("%d", h.BeaconBlockHeader.Slot),
|
Slot: fmt.Sprintf("%d", h.BeaconBlockHeader.Slot),
|
||||||
ProposerIndex: fmt.Sprintf("%d", h.BeaconBlockHeader.ProposerIndex),
|
ProposerIndex: fmt.Sprintf("%d", h.BeaconBlockHeader.ProposerIndex),
|
||||||
@@ -370,9 +690,9 @@ func (a *IndexedAttestation) MarshalJSON() ([]byte, error) {
|
|||||||
indices[i] = fmt.Sprintf("%d", a.AttestingIndices[i])
|
indices[i] = fmt.Sprintf("%d", a.AttestingIndices[i])
|
||||||
}
|
}
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
AttestingIndices []string `json:"attesting_indices,omitempty"`
|
AttestingIndices []string `json:"attesting_indices"`
|
||||||
Data *AttestationData `json:"data,omitempty"`
|
Data *AttestationData `json:"data"`
|
||||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
Signature hexutil.Bytes `json:"signature"`
|
||||||
}{
|
}{
|
||||||
AttestingIndices: indices,
|
AttestingIndices: indices,
|
||||||
Data: &AttestationData{a.IndexedAttestation.Data},
|
Data: &AttestationData{a.IndexedAttestation.Data},
|
||||||
@@ -386,8 +706,8 @@ type AttesterSlashing struct {
|
|||||||
|
|
||||||
func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
|
func (s *AttesterSlashing) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
Attestation1 *IndexedAttestation `json:"attestation_1,omitempty"`
|
Attestation1 *IndexedAttestation `json:"attestation_1"`
|
||||||
Attestation2 *IndexedAttestation `json:"attestation_2,omitempty"`
|
Attestation2 *IndexedAttestation `json:"attestation_2"`
|
||||||
}{
|
}{
|
||||||
Attestation1: &IndexedAttestation{s.Attestation_1},
|
Attestation1: &IndexedAttestation{s.Attestation_1},
|
||||||
Attestation2: &IndexedAttestation{s.Attestation_2},
|
Attestation2: &IndexedAttestation{s.Attestation_2},
|
||||||
@@ -400,8 +720,8 @@ type Checkpoint struct {
|
|||||||
|
|
||||||
func (c *Checkpoint) MarshalJSON() ([]byte, error) {
|
func (c *Checkpoint) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
Epoch string `json:"epoch,omitempty"`
|
Epoch string `json:"epoch"`
|
||||||
Root hexutil.Bytes `json:"root,omitempty"`
|
Root hexutil.Bytes `json:"root"`
|
||||||
}{
|
}{
|
||||||
Epoch: fmt.Sprintf("%d", c.Checkpoint.Epoch),
|
Epoch: fmt.Sprintf("%d", c.Checkpoint.Epoch),
|
||||||
Root: c.Checkpoint.Root,
|
Root: c.Checkpoint.Root,
|
||||||
@@ -414,11 +734,11 @@ type AttestationData struct {
|
|||||||
|
|
||||||
func (a *AttestationData) MarshalJSON() ([]byte, error) {
|
func (a *AttestationData) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
Slot string `json:"slot,omitempty"`
|
Slot string `json:"slot"`
|
||||||
Index string `json:"index,omitempty"`
|
Index string `json:"index"`
|
||||||
BeaconBlockRoot hexutil.Bytes `json:"beacon_block_root,omitempty"`
|
BeaconBlockRoot hexutil.Bytes `json:"beacon_block_root"`
|
||||||
Source *Checkpoint `json:"source,omitempty"`
|
Source *Checkpoint `json:"source"`
|
||||||
Target *Checkpoint `json:"target,omitempty"`
|
Target *Checkpoint `json:"target"`
|
||||||
}{
|
}{
|
||||||
Slot: fmt.Sprintf("%d", a.AttestationData.Slot),
|
Slot: fmt.Sprintf("%d", a.AttestationData.Slot),
|
||||||
Index: fmt.Sprintf("%d", a.AttestationData.CommitteeIndex),
|
Index: fmt.Sprintf("%d", a.AttestationData.CommitteeIndex),
|
||||||
@@ -434,9 +754,9 @@ type Attestation struct {
|
|||||||
|
|
||||||
func (a *Attestation) MarshalJSON() ([]byte, error) {
|
func (a *Attestation) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
AggregationBits hexutil.Bytes `json:"aggregation_bits,omitempty"`
|
AggregationBits hexutil.Bytes `json:"aggregation_bits"`
|
||||||
Data *AttestationData `json:"data,omitempty"`
|
Data *AttestationData `json:"data"`
|
||||||
Signature hexutil.Bytes `json:"signature,omitempty" ssz-size:"96"`
|
Signature hexutil.Bytes `json:"signature" ssz-size:"96"`
|
||||||
}{
|
}{
|
||||||
AggregationBits: hexutil.Bytes(a.Attestation.AggregationBits),
|
AggregationBits: hexutil.Bytes(a.Attestation.AggregationBits),
|
||||||
Data: &AttestationData{a.Attestation.Data},
|
Data: &AttestationData{a.Attestation.Data},
|
||||||
@@ -450,10 +770,10 @@ type DepositData struct {
|
|||||||
|
|
||||||
func (d *DepositData) MarshalJSON() ([]byte, error) {
|
func (d *DepositData) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
PublicKey hexutil.Bytes `json:"pubkey,omitempty"`
|
PublicKey hexutil.Bytes `json:"pubkey"`
|
||||||
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials,omitempty"`
|
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"`
|
||||||
Amount string `json:"amount,omitempty"`
|
Amount string `json:"amount"`
|
||||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
Signature hexutil.Bytes `json:"signature"`
|
||||||
}{
|
}{
|
||||||
PublicKey: d.PublicKey,
|
PublicKey: d.PublicKey,
|
||||||
WithdrawalCredentials: d.WithdrawalCredentials,
|
WithdrawalCredentials: d.WithdrawalCredentials,
|
||||||
@@ -486,8 +806,8 @@ type SignedVoluntaryExit struct {
|
|||||||
|
|
||||||
func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
|
func (sve *SignedVoluntaryExit) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
Message *VoluntaryExit `json:"message,omitempty"`
|
Message *VoluntaryExit `json:"message"`
|
||||||
Signature hexutil.Bytes `json:"signature,omitempty"`
|
Signature hexutil.Bytes `json:"signature"`
|
||||||
}{
|
}{
|
||||||
Signature: sve.SignedVoluntaryExit.Signature,
|
Signature: sve.SignedVoluntaryExit.Signature,
|
||||||
Message: &VoluntaryExit{sve.SignedVoluntaryExit.Exit},
|
Message: &VoluntaryExit{sve.SignedVoluntaryExit.Exit},
|
||||||
@@ -500,8 +820,8 @@ type VoluntaryExit struct {
|
|||||||
|
|
||||||
func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
|
func (ve *VoluntaryExit) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
Epoch string `json:"epoch,omitempty"`
|
Epoch string `json:"epoch"`
|
||||||
ValidatorIndex string `json:"validator_index,omitempty"`
|
ValidatorIndex string `json:"validator_index"`
|
||||||
}{
|
}{
|
||||||
Epoch: fmt.Sprintf("%d", ve.Epoch),
|
Epoch: fmt.Sprintf("%d", ve.Epoch),
|
||||||
ValidatorIndex: fmt.Sprintf("%d", ve.ValidatorIndex),
|
ValidatorIndex: fmt.Sprintf("%d", ve.ValidatorIndex),
|
||||||
@@ -514,8 +834,8 @@ type SyncAggregate struct {
|
|||||||
|
|
||||||
func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
|
func (s *SyncAggregate) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits,omitempty"`
|
SyncCommitteeBits hexutil.Bytes `json:"sync_committee_bits"`
|
||||||
SyncCommitteeSignature hexutil.Bytes `json:"sync_committee_signature,omitempty"`
|
SyncCommitteeSignature hexutil.Bytes `json:"sync_committee_signature"`
|
||||||
}{
|
}{
|
||||||
SyncCommitteeBits: hexutil.Bytes(s.SyncAggregate.SyncCommitteeBits),
|
SyncCommitteeBits: hexutil.Bytes(s.SyncAggregate.SyncCommitteeBits),
|
||||||
SyncCommitteeSignature: s.SyncAggregate.SyncCommitteeSignature,
|
SyncCommitteeSignature: s.SyncAggregate.SyncCommitteeSignature,
|
||||||
@@ -528,9 +848,9 @@ type Eth1Data struct {
|
|||||||
|
|
||||||
func (e *Eth1Data) MarshalJSON() ([]byte, error) {
|
func (e *Eth1Data) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
DepositRoot hexutil.Bytes `json:"deposit_root,omitempty"`
|
DepositRoot hexutil.Bytes `json:"deposit_root"`
|
||||||
DepositCount string `json:"deposit_count,omitempty"`
|
DepositCount string `json:"deposit_count"`
|
||||||
BlockHash hexutil.Bytes `json:"block_hash,omitempty"`
|
BlockHash hexutil.Bytes `json:"block_hash"`
|
||||||
}{
|
}{
|
||||||
DepositRoot: e.DepositRoot,
|
DepositRoot: e.DepositRoot,
|
||||||
DepositCount: fmt.Sprintf("%d", e.DepositCount),
|
DepositCount: fmt.Sprintf("%d", e.DepositCount),
|
||||||
@@ -560,16 +880,16 @@ func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
|
|||||||
pros[i] = &ProposerSlashing{ProposerSlashing: b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings[i]}
|
pros[i] = &ProposerSlashing{ProposerSlashing: b.BlindedBeaconBlockBodyBellatrix.ProposerSlashings[i]}
|
||||||
}
|
}
|
||||||
return json.Marshal(struct {
|
return json.Marshal(struct {
|
||||||
RandaoReveal hexutil.Bytes `json:"randao_reveal,omitempty"`
|
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
|
||||||
Eth1Data *Eth1Data `json:"eth1_data,omitempty"`
|
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||||
Graffiti hexutil.Bytes `json:"graffiti,omitempty"`
|
Graffiti hexutil.Bytes `json:"graffiti"`
|
||||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings,omitempty"`
|
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||||
AttesterSlashings []*AttesterSlashing `json:"attester_slashings,omitempty"`
|
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
|
||||||
Attestations []*Attestation `json:"attestations,omitempty"`
|
Attestations []*Attestation `json:"attestations"`
|
||||||
Deposits []*Deposit `json:"deposits,omitempty"`
|
Deposits []*Deposit `json:"deposits"`
|
||||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits,omitempty"`
|
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||||
SyncAggregate *SyncAggregate `json:"sync_aggregate,omitempty"`
|
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||||
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header,omitempty"`
|
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
|
||||||
}{
|
}{
|
||||||
RandaoReveal: b.RandaoReveal,
|
RandaoReveal: b.RandaoReveal,
|
||||||
Eth1Data: &Eth1Data{b.BlindedBeaconBlockBodyBellatrix.Eth1Data},
|
Eth1Data: &Eth1Data{b.BlindedBeaconBlockBodyBellatrix.Eth1Data},
|
||||||
@@ -583,3 +903,129 @@ func (b *BlindedBeaconBlockBodyBellatrix) MarshalJSON() ([]byte, error) {
|
|||||||
ExecutionPayloadHeader: &ExecutionPayloadHeader{ExecutionPayloadHeader: b.BlindedBeaconBlockBodyBellatrix.ExecutionPayloadHeader},
|
ExecutionPayloadHeader: &ExecutionPayloadHeader{ExecutionPayloadHeader: b.BlindedBeaconBlockBodyBellatrix.ExecutionPayloadHeader},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SignedBLSToExecutionChange struct {
|
||||||
|
*eth.SignedBLSToExecutionChange
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ch *SignedBLSToExecutionChange) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Message *BLSToExecutionChange `json:"message"`
|
||||||
|
Signature hexutil.Bytes `json:"signature"`
|
||||||
|
}{
|
||||||
|
Signature: ch.Signature,
|
||||||
|
Message: &BLSToExecutionChange{ch.Message},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type BLSToExecutionChange struct {
|
||||||
|
*eth.BLSToExecutionChange
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(struct {
|
||||||
|
ValidatorIndex string `json:"validator_index"`
|
||||||
|
FromBlsPubkey hexutil.Bytes `json:"from_bls_pubkey"`
|
||||||
|
ToExecutionAddress hexutil.Bytes `json:"to_execution_address"`
|
||||||
|
}{
|
||||||
|
ValidatorIndex: fmt.Sprintf("%d", ch.ValidatorIndex),
|
||||||
|
FromBlsPubkey: ch.FromBlsPubkey,
|
||||||
|
ToExecutionAddress: ch.ToExecutionAddress,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type SignedBlindedBeaconBlockCapella struct {
|
||||||
|
*eth.SignedBlindedBeaconBlockCapella
|
||||||
|
}
|
||||||
|
|
||||||
|
type BlindedBeaconBlockCapella struct {
|
||||||
|
*eth.BlindedBeaconBlockCapella
|
||||||
|
}
|
||||||
|
|
||||||
|
type BlindedBeaconBlockBodyCapella struct {
|
||||||
|
*eth.BlindedBeaconBlockBodyCapella
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *SignedBlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Message *BlindedBeaconBlockCapella `json:"message"`
|
||||||
|
Signature hexutil.Bytes `json:"signature"`
|
||||||
|
}{
|
||||||
|
Message: &BlindedBeaconBlockCapella{b.Block},
|
||||||
|
Signature: b.Signature,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BlindedBeaconBlockCapella) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(struct {
|
||||||
|
Slot string `json:"slot"`
|
||||||
|
ProposerIndex string `json:"proposer_index"`
|
||||||
|
ParentRoot hexutil.Bytes `json:"parent_root"`
|
||||||
|
StateRoot hexutil.Bytes `json:"state_root"`
|
||||||
|
Body *BlindedBeaconBlockBodyCapella `json:"body"`
|
||||||
|
}{
|
||||||
|
Slot: fmt.Sprintf("%d", b.Slot),
|
||||||
|
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||||
|
ParentRoot: b.ParentRoot,
|
||||||
|
StateRoot: b.StateRoot,
|
||||||
|
Body: &BlindedBeaconBlockBodyCapella{b.Body},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BlindedBeaconBlockBodyCapella) MarshalJSON() ([]byte, error) {
|
||||||
|
sve := make([]*SignedVoluntaryExit, len(b.VoluntaryExits))
|
||||||
|
for i := range b.VoluntaryExits {
|
||||||
|
sve[i] = &SignedVoluntaryExit{SignedVoluntaryExit: b.VoluntaryExits[i]}
|
||||||
|
}
|
||||||
|
deps := make([]*Deposit, len(b.Deposits))
|
||||||
|
for i := range b.Deposits {
|
||||||
|
deps[i] = &Deposit{Deposit: b.Deposits[i]}
|
||||||
|
}
|
||||||
|
atts := make([]*Attestation, len(b.Attestations))
|
||||||
|
for i := range b.Attestations {
|
||||||
|
atts[i] = &Attestation{Attestation: b.Attestations[i]}
|
||||||
|
}
|
||||||
|
atsl := make([]*AttesterSlashing, len(b.AttesterSlashings))
|
||||||
|
for i := range b.AttesterSlashings {
|
||||||
|
atsl[i] = &AttesterSlashing{AttesterSlashing: b.AttesterSlashings[i]}
|
||||||
|
}
|
||||||
|
pros := make([]*ProposerSlashing, len(b.ProposerSlashings))
|
||||||
|
for i := range b.ProposerSlashings {
|
||||||
|
pros[i] = &ProposerSlashing{ProposerSlashing: b.ProposerSlashings[i]}
|
||||||
|
}
|
||||||
|
chs := make([]*SignedBLSToExecutionChange, len(b.BlsToExecutionChanges))
|
||||||
|
for i := range b.BlsToExecutionChanges {
|
||||||
|
chs[i] = &SignedBLSToExecutionChange{SignedBLSToExecutionChange: b.BlsToExecutionChanges[i]}
|
||||||
|
}
|
||||||
|
return json.Marshal(struct {
|
||||||
|
RandaoReveal hexutil.Bytes `json:"randao_reveal"`
|
||||||
|
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||||
|
Graffiti hexutil.Bytes `json:"graffiti"`
|
||||||
|
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||||
|
AttesterSlashings []*AttesterSlashing `json:"attester_slashings"`
|
||||||
|
Attestations []*Attestation `json:"attestations"`
|
||||||
|
Deposits []*Deposit `json:"deposits"`
|
||||||
|
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||||
|
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||||
|
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||||
|
ExecutionPayloadHeader *ExecutionPayloadHeaderCapella `json:"execution_payload_header"`
|
||||||
|
}{
|
||||||
|
RandaoReveal: b.RandaoReveal,
|
||||||
|
Eth1Data: &Eth1Data{b.Eth1Data},
|
||||||
|
Graffiti: b.Graffiti,
|
||||||
|
ProposerSlashings: pros,
|
||||||
|
AttesterSlashings: atsl,
|
||||||
|
Attestations: atts,
|
||||||
|
Deposits: deps,
|
||||||
|
VoluntaryExits: sve,
|
||||||
|
BLSToExecutionChanges: chs,
|
||||||
|
SyncAggregate: &SyncAggregate{b.SyncAggregate},
|
||||||
|
ExecutionPayloadHeader: &ExecutionPayloadHeaderCapella{ExecutionPayloadHeaderCapella: b.ExecutionPayloadHeader},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type ErrorMessage struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Stacktraces []string `json:"stacktraces,omitempty"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,15 +4,20 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/prysmaticlabs/go-bitfield"
|
"github.com/prysmaticlabs/go-bitfield"
|
||||||
v1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ezDecode(t *testing.T, s string) []byte {
|
func ezDecode(t *testing.T, s string) []byte {
|
||||||
@@ -31,7 +36,8 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Signature: make([]byte, 96),
|
Signature: make([]byte, 96),
|
||||||
}
|
}
|
||||||
je, err := json.Marshal(&SignedValidatorRegistration{SignedValidatorRegistrationV1: svr})
|
a := &SignedValidatorRegistration{SignedValidatorRegistrationV1: svr}
|
||||||
|
je, err := json.Marshal(a)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// decode with a struct w/ plain strings so we can check the string encoding of the hex fields
|
// decode with a struct w/ plain strings so we can check the string encoding of the hex fields
|
||||||
un := struct {
|
un := struct {
|
||||||
@@ -45,6 +51,14 @@ func TestSignedValidatorRegistration_MarshalJSON(t *testing.T) {
|
|||||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Signature)
|
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Signature)
|
||||||
require.Equal(t, "0x0000000000000000000000000000000000000000", un.Message.FeeRecipient)
|
require.Equal(t, "0x0000000000000000000000000000000000000000", un.Message.FeeRecipient)
|
||||||
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Message.Pubkey)
|
require.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", un.Message.Pubkey)
|
||||||
|
|
||||||
|
t.Run("roundtrip", func(t *testing.T) {
|
||||||
|
b := &SignedValidatorRegistration{}
|
||||||
|
if err := json.Unmarshal(je, b); err != nil {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
require.Equal(t, proto.Equal(a.SignedValidatorRegistrationV1, b.SignedValidatorRegistrationV1), true)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var testExampleHeaderResponse = `{
|
var testExampleHeaderResponse = `{
|
||||||
@@ -74,6 +88,62 @@ var testExampleHeaderResponse = `{
|
|||||||
}
|
}
|
||||||
}`
|
}`
|
||||||
|
|
||||||
|
var testExampleHeaderResponseCapella = `{
|
||||||
|
"version": "capella",
|
||||||
|
"data": {
|
||||||
|
"message": {
|
||||||
|
"header": {
|
||||||
|
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||||
|
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"block_number": "1",
|
||||||
|
"gas_limit": "1",
|
||||||
|
"gas_used": "1",
|
||||||
|
"timestamp": "1",
|
||||||
|
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||||
|
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||||
|
},
|
||||||
|
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||||
|
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||||
|
},
|
||||||
|
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
var testExampleHeaderResponseUnknownVersion = `{
|
||||||
|
"version": "bad",
|
||||||
|
"data": {
|
||||||
|
"message": {
|
||||||
|
"header": {
|
||||||
|
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||||
|
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"block_number": "1",
|
||||||
|
"gas_limit": "1",
|
||||||
|
"gas_used": "1",
|
||||||
|
"timestamp": "1",
|
||||||
|
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||||
|
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||||
|
},
|
||||||
|
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||||
|
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||||
|
},
|
||||||
|
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
func TestExecutionHeaderResponseUnmarshal(t *testing.T) {
|
func TestExecutionHeaderResponseUnmarshal(t *testing.T) {
|
||||||
hr := &ExecHeaderResponse{}
|
hr := &ExecHeaderResponse{}
|
||||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), hr))
|
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), hr))
|
||||||
@@ -173,9 +243,115 @@ func TestExecutionHeaderResponseUnmarshal(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExecutionHeaderResponseCapellaUnmarshal(t *testing.T) {
|
||||||
|
hr := &ExecHeaderResponseCapella{}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseCapella), hr))
|
||||||
|
cases := []struct {
|
||||||
|
expected string
|
||||||
|
actual string
|
||||||
|
name string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
expected: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
|
||||||
|
actual: hexutil.Encode(hr.Data.Signature),
|
||||||
|
name: "Signature",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||||
|
actual: hexutil.Encode(hr.Data.Message.Pubkey),
|
||||||
|
name: "ExecHeaderResponse.Pubkey",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "652312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||||
|
actual: hr.Data.Message.Value.String(),
|
||||||
|
name: "ExecHeaderResponse.Value",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(hr.Data.Message.Header.ParentHash),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.ParentHash",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||||
|
actual: hexutil.Encode(hr.Data.Message.Header.FeeRecipient),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.FeeRecipient",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(hr.Data.Message.Header.StateRoot),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.StateRoot",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(hr.Data.Message.Header.ReceiptsRoot),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.ReceiptsRoot",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
actual: hexutil.Encode(hr.Data.Message.Header.LogsBloom),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.LogsBloom",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(hr.Data.Message.Header.PrevRandao),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.PrevRandao",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "1",
|
||||||
|
actual: fmt.Sprintf("%d", hr.Data.Message.Header.BlockNumber),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.BlockNumber",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "1",
|
||||||
|
actual: fmt.Sprintf("%d", hr.Data.Message.Header.GasLimit),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.GasLimit",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "1",
|
||||||
|
actual: fmt.Sprintf("%d", hr.Data.Message.Header.GasUsed),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.GasUsed",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "1",
|
||||||
|
actual: fmt.Sprintf("%d", hr.Data.Message.Header.Timestamp),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.Timestamp",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(hr.Data.Message.Header.ExtraData),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.ExtraData",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||||
|
actual: fmt.Sprintf("%d", hr.Data.Message.Header.BaseFeePerGas),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.BaseFeePerGas",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(hr.Data.Message.Header.BlockHash),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.BlockHash",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(hr.Data.Message.Header.TransactionsRoot),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.TransactionsRoot",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(hr.Data.Message.Header.WithdrawalsRoot),
|
||||||
|
name: "ExecHeaderResponse.ExecutionPayloadHeader.WithdrawalsRoot",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, c := range cases {
|
||||||
|
require.Equal(t, c.expected, c.actual, fmt.Sprintf("unexpected value for field %s", c.name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestExecutionHeaderResponseToProto(t *testing.T) {
|
func TestExecutionHeaderResponseToProto(t *testing.T) {
|
||||||
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||||
v := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
require.NoError(t, err)
|
||||||
|
v, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||||
|
require.NoError(t, err)
|
||||||
hr := &ExecHeaderResponse{}
|
hr := &ExecHeaderResponse{}
|
||||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), hr))
|
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), hr))
|
||||||
p, err := hr.ToProto()
|
p, err := hr.ToProto()
|
||||||
@@ -205,7 +381,7 @@ func TestExecutionHeaderResponseToProto(t *testing.T) {
|
|||||||
|
|
||||||
expected := ð.SignedBuilderBid{
|
expected := ð.SignedBuilderBid{
|
||||||
Message: ð.BuilderBid{
|
Message: ð.BuilderBid{
|
||||||
Header: ð.ExecutionPayloadHeader{
|
Header: &v1.ExecutionPayloadHeader{
|
||||||
ParentHash: parentHash,
|
ParentHash: parentHash,
|
||||||
FeeRecipient: feeRecipient,
|
FeeRecipient: feeRecipient,
|
||||||
StateRoot: stateRoot,
|
StateRoot: stateRoot,
|
||||||
@@ -229,6 +405,67 @@ func TestExecutionHeaderResponseToProto(t *testing.T) {
|
|||||||
require.DeepEqual(t, expected, p)
|
require.DeepEqual(t, expected, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExecutionHeaderResponseCapellaToProto(t *testing.T) {
|
||||||
|
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||||
|
require.NoError(t, err)
|
||||||
|
v, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||||
|
require.NoError(t, err)
|
||||||
|
hr := &ExecHeaderResponseCapella{}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseCapella), hr))
|
||||||
|
p, err := hr.ToProto()
|
||||||
|
require.NoError(t, err)
|
||||||
|
signature, err := hexutil.Decode("0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505")
|
||||||
|
require.NoError(t, err)
|
||||||
|
pubkey, err := hexutil.Decode("0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||||
|
require.NoError(t, err)
|
||||||
|
parentHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
feeRecipient, err := hexutil.Decode("0xabcf8e0d4e9587369b2301d0790347320302cc09")
|
||||||
|
require.NoError(t, err)
|
||||||
|
stateRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
receiptsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
logsBloom, err := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||||
|
require.NoError(t, err)
|
||||||
|
prevRandao, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
extraData, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
blockHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
txRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
withdrawalsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expected := ð.SignedBuilderBidCapella{
|
||||||
|
Message: ð.BuilderBidCapella{
|
||||||
|
Header: &v1.ExecutionPayloadHeaderCapella{
|
||||||
|
ParentHash: parentHash,
|
||||||
|
FeeRecipient: feeRecipient,
|
||||||
|
StateRoot: stateRoot,
|
||||||
|
ReceiptsRoot: receiptsRoot,
|
||||||
|
LogsBloom: logsBloom,
|
||||||
|
PrevRandao: prevRandao,
|
||||||
|
BlockNumber: 1,
|
||||||
|
GasLimit: 1,
|
||||||
|
GasUsed: 1,
|
||||||
|
Timestamp: 1,
|
||||||
|
ExtraData: extraData,
|
||||||
|
BaseFeePerGas: bfpg.SSZBytes(),
|
||||||
|
BlockHash: blockHash,
|
||||||
|
TransactionsRoot: txRoot,
|
||||||
|
WithdrawalsRoot: withdrawalsRoot,
|
||||||
|
},
|
||||||
|
Value: v.SSZBytes(),
|
||||||
|
Pubkey: pubkey,
|
||||||
|
},
|
||||||
|
Signature: signature,
|
||||||
|
}
|
||||||
|
require.DeepEqual(t, expected, p)
|
||||||
|
}
|
||||||
|
|
||||||
var testExampleExecutionPayload = `{
|
var testExampleExecutionPayload = `{
|
||||||
"version": "bellatrix",
|
"version": "bellatrix",
|
||||||
"data": {
|
"data": {
|
||||||
@@ -251,6 +488,36 @@ var testExampleExecutionPayload = `{
|
|||||||
}
|
}
|
||||||
}`
|
}`
|
||||||
|
|
||||||
|
var testExampleExecutionPayloadCapella = `{
|
||||||
|
"version": "capella",
|
||||||
|
"data": {
|
||||||
|
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||||
|
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"block_number": "1",
|
||||||
|
"gas_limit": "1",
|
||||||
|
"gas_used": "1",
|
||||||
|
"timestamp": "1",
|
||||||
|
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||||
|
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
"transactions": [
|
||||||
|
"0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||||
|
],
|
||||||
|
"withdrawals": [
|
||||||
|
{
|
||||||
|
"index": "1",
|
||||||
|
"validator_index": "1",
|
||||||
|
"address": "0xcf8e0d4e9587369b2301d0790347320302cc0943",
|
||||||
|
"amount": "1"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
func TestExecutionPayloadResponseUnmarshal(t *testing.T) {
|
func TestExecutionPayloadResponseUnmarshal(t *testing.T) {
|
||||||
epr := &ExecPayloadResponse{}
|
epr := &ExecPayloadResponse{}
|
||||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
|
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
|
||||||
@@ -333,6 +600,95 @@ func TestExecutionPayloadResponseUnmarshal(t *testing.T) {
|
|||||||
require.Equal(t, txHash, hexutil.Encode(epr.Data.Transactions[0]))
|
require.Equal(t, txHash, hexutil.Encode(epr.Data.Transactions[0]))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExecutionPayloadResponseCapellaUnmarshal(t *testing.T) {
|
||||||
|
epr := &ExecPayloadResponseCapella{}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadCapella), epr))
|
||||||
|
cases := []struct {
|
||||||
|
expected string
|
||||||
|
actual string
|
||||||
|
name string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(epr.Data.ParentHash),
|
||||||
|
name: "ExecPayloadResponse.ExecutionPayload.ParentHash",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||||
|
actual: hexutil.Encode(epr.Data.FeeRecipient),
|
||||||
|
name: "ExecPayloadResponse.ExecutionPayload.FeeRecipient",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(epr.Data.StateRoot),
|
||||||
|
name: "ExecPayloadResponse.ExecutionPayload.StateRoot",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(epr.Data.ReceiptsRoot),
|
||||||
|
name: "ExecPayloadResponse.ExecutionPayload.ReceiptsRoot",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
actual: hexutil.Encode(epr.Data.LogsBloom),
|
||||||
|
name: "ExecPayloadResponse.ExecutionPayload.LogsBloom",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(epr.Data.PrevRandao),
|
||||||
|
name: "ExecPayloadResponse.ExecutionPayload.PrevRandao",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "1",
|
||||||
|
actual: fmt.Sprintf("%d", epr.Data.BlockNumber),
|
||||||
|
name: "ExecPayloadResponse.ExecutionPayload.BlockNumber",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "1",
|
||||||
|
actual: fmt.Sprintf("%d", epr.Data.GasLimit),
|
||||||
|
name: "ExecPayloadResponse.ExecutionPayload.GasLimit",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "1",
|
||||||
|
actual: fmt.Sprintf("%d", epr.Data.GasUsed),
|
||||||
|
name: "ExecPayloadResponse.ExecutionPayload.GasUsed",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "1",
|
||||||
|
actual: fmt.Sprintf("%d", epr.Data.Timestamp),
|
||||||
|
name: "ExecPayloadResponse.ExecutionPayload.Timestamp",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(epr.Data.ExtraData),
|
||||||
|
name: "ExecPayloadResponse.ExecutionPayload.ExtraData",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "452312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||||
|
actual: fmt.Sprintf("%d", epr.Data.BaseFeePerGas),
|
||||||
|
name: "ExecPayloadResponse.ExecutionPayload.BaseFeePerGas",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expected: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||||
|
actual: hexutil.Encode(epr.Data.BlockHash),
|
||||||
|
name: "ExecPayloadResponse.ExecutionPayload.BlockHash",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, c := range cases {
|
||||||
|
require.Equal(t, c.expected, c.actual, fmt.Sprintf("unexpected value for field %s", c.name))
|
||||||
|
}
|
||||||
|
require.Equal(t, 1, len(epr.Data.Transactions))
|
||||||
|
txHash := "0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86"
|
||||||
|
require.Equal(t, txHash, hexutil.Encode(epr.Data.Transactions[0]))
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(epr.Data.Withdrawals))
|
||||||
|
w := epr.Data.Withdrawals[0]
|
||||||
|
assert.Equal(t, uint64(1), w.Index.Uint64())
|
||||||
|
assert.Equal(t, uint64(1), w.ValidatorIndex.Uint64())
|
||||||
|
assert.DeepEqual(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943", w.Address.String())
|
||||||
|
assert.Equal(t, uint64(1), w.Amount.Uint64())
|
||||||
|
}
|
||||||
|
|
||||||
func TestExecutionPayloadResponseToProto(t *testing.T) {
|
func TestExecutionPayloadResponseToProto(t *testing.T) {
|
||||||
hr := &ExecPayloadResponse{}
|
hr := &ExecPayloadResponse{}
|
||||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), hr))
|
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), hr))
|
||||||
@@ -360,7 +716,8 @@ func TestExecutionPayloadResponseToProto(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
txList := [][]byte{tx}
|
txList := [][]byte{tx}
|
||||||
|
|
||||||
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||||
|
require.NoError(t, err)
|
||||||
expected := &v1.ExecutionPayload{
|
expected := &v1.ExecutionPayload{
|
||||||
ParentHash: parentHash,
|
ParentHash: parentHash,
|
||||||
FeeRecipient: feeRecipient,
|
FeeRecipient: feeRecipient,
|
||||||
@@ -380,6 +737,65 @@ func TestExecutionPayloadResponseToProto(t *testing.T) {
|
|||||||
require.DeepEqual(t, expected, p)
|
require.DeepEqual(t, expected, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExecutionPayloadResponseCapellaToProto(t *testing.T) {
|
||||||
|
hr := &ExecPayloadResponseCapella{}
|
||||||
|
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadCapella), hr))
|
||||||
|
p, err := hr.ToProto()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
parentHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
feeRecipient, err := hexutil.Decode("0xabcf8e0d4e9587369b2301d0790347320302cc09")
|
||||||
|
require.NoError(t, err)
|
||||||
|
stateRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
receiptsRoot, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
logsBloom, err := hexutil.Decode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||||
|
require.NoError(t, err)
|
||||||
|
prevRandao, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
extraData, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
blockHash, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tx, err := hexutil.Decode("0x02f878831469668303f51d843b9ac9f9843b9aca0082520894c93269b73096998db66be0441e836d873535cb9c8894a19041886f000080c001a031cc29234036afbf9a1fb9476b463367cb1f957ac0b919b69bbc798436e604aaa018c4e9c3914eb27aadd0b91e10b18655739fcf8c1fc398763a9f1beecb8ddc86")
|
||||||
|
require.NoError(t, err)
|
||||||
|
txList := [][]byte{tx}
|
||||||
|
address, err := hexutil.Decode("0xcf8e0d4e9587369b2301d0790347320302cc0943")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||||
|
require.NoError(t, err)
|
||||||
|
expected := &v1.ExecutionPayloadCapella{
|
||||||
|
ParentHash: parentHash,
|
||||||
|
FeeRecipient: feeRecipient,
|
||||||
|
StateRoot: stateRoot,
|
||||||
|
ReceiptsRoot: receiptsRoot,
|
||||||
|
LogsBloom: logsBloom,
|
||||||
|
PrevRandao: prevRandao,
|
||||||
|
BlockNumber: 1,
|
||||||
|
GasLimit: 1,
|
||||||
|
GasUsed: 1,
|
||||||
|
Timestamp: 1,
|
||||||
|
ExtraData: extraData,
|
||||||
|
BaseFeePerGas: bfpg.SSZBytes(),
|
||||||
|
BlockHash: blockHash,
|
||||||
|
Transactions: txList,
|
||||||
|
Withdrawals: []*v1.Withdrawal{
|
||||||
|
{
|
||||||
|
Index: 1,
|
||||||
|
ValidatorIndex: 1,
|
||||||
|
Address: address,
|
||||||
|
Amount: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
require.DeepEqual(t, expected, p)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func pbEth1Data() *eth.Eth1Data {
|
func pbEth1Data() *eth.Eth1Data {
|
||||||
return ð.Eth1Data{
|
return ð.Eth1Data{
|
||||||
DepositRoot: make([]byte, 32),
|
DepositRoot: make([]byte, 32),
|
||||||
@@ -567,9 +983,10 @@ func TestProposerSlashings(t *testing.T) {
|
|||||||
require.Equal(t, expected, string(b))
|
require.Equal(t, expected, string(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
func pbExecutionPayloadHeader(t *testing.T) *eth.ExecutionPayloadHeader {
|
func pbExecutionPayloadHeader(t *testing.T) *v1.ExecutionPayloadHeader {
|
||||||
bfpg := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||||
return ð.ExecutionPayloadHeader{
|
require.NoError(t, err)
|
||||||
|
return &v1.ExecutionPayloadHeader{
|
||||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
@@ -587,6 +1004,28 @@ func pbExecutionPayloadHeader(t *testing.T) *eth.ExecutionPayloadHeader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func pbExecutionPayloadHeaderCapella(t *testing.T) *v1.ExecutionPayloadHeaderCapella {
|
||||||
|
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||||
|
require.NoError(t, err)
|
||||||
|
return &v1.ExecutionPayloadHeaderCapella{
|
||||||
|
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||||
|
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
BlockNumber: 1,
|
||||||
|
GasLimit: 1,
|
||||||
|
GasUsed: 1,
|
||||||
|
Timestamp: 1,
|
||||||
|
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
BaseFeePerGas: bfpg.SSZBytes(),
|
||||||
|
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestExecutionPayloadHeader_MarshalJSON(t *testing.T) {
|
func TestExecutionPayloadHeader_MarshalJSON(t *testing.T) {
|
||||||
h := &ExecutionPayloadHeader{
|
h := &ExecutionPayloadHeader{
|
||||||
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
|
ExecutionPayloadHeader: pbExecutionPayloadHeader(t),
|
||||||
@@ -597,6 +1036,16 @@ func TestExecutionPayloadHeader_MarshalJSON(t *testing.T) {
|
|||||||
require.Equal(t, expected, string(b))
|
require.Equal(t, expected, string(b))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExecutionPayloadHeaderCapella_MarshalJSON(t *testing.T) {
|
||||||
|
h := &ExecutionPayloadHeaderCapella{
|
||||||
|
ExecutionPayloadHeaderCapella: pbExecutionPayloadHeaderCapella(t),
|
||||||
|
}
|
||||||
|
b, err := json.Marshal(h)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expected := `{"parent_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","fee_recipient":"0xabcf8e0d4e9587369b2301d0790347320302cc09","state_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","receipts_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","logs_bloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","prev_randao":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","block_number":"1","gas_limit":"1","gas_used":"1","timestamp":"1","extra_data":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","base_fee_per_gas":"452312848583266388373324160190187140051835877600158453279131187530910662656","block_hash":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","transactions_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2","withdrawals_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"}`
|
||||||
|
require.Equal(t, expected, string(b))
|
||||||
|
}
|
||||||
|
|
||||||
var testBuilderBid = `{
|
var testBuilderBid = `{
|
||||||
"version":"bellatrix",
|
"version":"bellatrix",
|
||||||
"data":{
|
"data":{
|
||||||
@@ -648,6 +1097,49 @@ func TestMathBigUnmarshal(t *testing.T) {
|
|||||||
require.NoError(t, u256.UnmarshalText([]byte("452312848583266388373324160190187140051835877600158453279131187530910662656")))
|
require.NoError(t, u256.UnmarshalText([]byte("452312848583266388373324160190187140051835877600158453279131187530910662656")))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIsValidUint256(t *testing.T) {
|
||||||
|
value, ok := new(big.Int), false
|
||||||
|
|
||||||
|
// negative uint256.max - 1
|
||||||
|
_, ok = value.SetString("-10000000000000000000000000000000000000000000000000000000000000000", 16)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
require.Equal(t, 257, value.BitLen())
|
||||||
|
require.Equal(t, false, isValidUint256(value))
|
||||||
|
|
||||||
|
// negative uint256.max
|
||||||
|
_, ok = value.SetString("-ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
require.Equal(t, 256, value.BitLen())
|
||||||
|
require.Equal(t, false, isValidUint256(value))
|
||||||
|
|
||||||
|
// negative number
|
||||||
|
_, ok = value.SetString("-1", 16)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
require.Equal(t, false, isValidUint256(value))
|
||||||
|
|
||||||
|
// uint256.min
|
||||||
|
_, ok = value.SetString("0", 16)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
require.Equal(t, true, isValidUint256(value))
|
||||||
|
|
||||||
|
// positive number
|
||||||
|
_, ok = value.SetString("1", 16)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
require.Equal(t, true, isValidUint256(value))
|
||||||
|
|
||||||
|
// uint256.max
|
||||||
|
_, ok = value.SetString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 16)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
require.Equal(t, 256, value.BitLen())
|
||||||
|
require.Equal(t, true, isValidUint256(value))
|
||||||
|
|
||||||
|
// uint256.max + 1
|
||||||
|
_, ok = value.SetString("10000000000000000000000000000000000000000000000000000000000000000", 16)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
require.Equal(t, 257, value.BitLen())
|
||||||
|
require.Equal(t, false, isValidUint256(value))
|
||||||
|
}
|
||||||
|
|
||||||
func TestUint256Unmarshal(t *testing.T) {
|
func TestUint256Unmarshal(t *testing.T) {
|
||||||
base10 := "452312848583266388373324160190187140051835877600158453279131187530910662656"
|
base10 := "452312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||||
bi := new(big.Int)
|
bi := new(big.Int)
|
||||||
@@ -664,6 +1156,36 @@ func TestUint256Unmarshal(t *testing.T) {
|
|||||||
require.Equal(t, expected, string(m))
|
require.Equal(t, expected, string(m))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUint256UnmarshalNegative(t *testing.T) {
|
||||||
|
m := "-1"
|
||||||
|
var value Uint256
|
||||||
|
err := value.UnmarshalText([]byte(m))
|
||||||
|
require.ErrorContains(t, "unable to decode into Uint256", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUint256UnmarshalMin(t *testing.T) {
|
||||||
|
m := "0"
|
||||||
|
var value Uint256
|
||||||
|
err := value.UnmarshalText([]byte(m))
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUint256UnmarshalMax(t *testing.T) {
|
||||||
|
// 2**256-1 (uint256.max)
|
||||||
|
m := "115792089237316195423570985008687907853269984665640564039457584007913129639935"
|
||||||
|
var value Uint256
|
||||||
|
err := value.UnmarshalText([]byte(m))
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUint256UnmarshalTooBig(t *testing.T) {
|
||||||
|
// 2**256 (one more than uint256.max)
|
||||||
|
m := "115792089237316195423570985008687907853269984665640564039457584007913129639936"
|
||||||
|
var value Uint256
|
||||||
|
err := value.UnmarshalText([]byte(m))
|
||||||
|
require.ErrorContains(t, "unable to decode into Uint256", err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
||||||
expected, err := os.ReadFile("testdata/blinded-block.json")
|
expected, err := os.ReadFile("testdata/blinded-block.json")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -693,11 +1215,43 @@ func TestMarshalBlindedBeaconBlockBodyBellatrix(t *testing.T) {
|
|||||||
require.Equal(t, string(expected[0:len(expected)-1]), string(m))
|
require.Equal(t, string(expected[0:len(expected)-1]), string(m))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMarshalBlindedBeaconBlockBodyCapella(t *testing.T) {
|
||||||
|
expected, err := os.ReadFile("testdata/blinded-block-capella.json")
|
||||||
|
require.NoError(t, err)
|
||||||
|
b := &BlindedBeaconBlockCapella{BlindedBeaconBlockCapella: ð.BlindedBeaconBlockCapella{
|
||||||
|
Slot: 1,
|
||||||
|
ProposerIndex: 1,
|
||||||
|
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||||
|
Body: ð.BlindedBeaconBlockBodyCapella{
|
||||||
|
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||||
|
Eth1Data: pbEth1Data(),
|
||||||
|
Graffiti: ezDecode(t, "0xdeadbeefc0ffee"),
|
||||||
|
ProposerSlashings: []*eth.ProposerSlashing{pbProposerSlashing(t)},
|
||||||
|
AttesterSlashings: []*eth.AttesterSlashing{pbAttesterSlashing(t)},
|
||||||
|
Attestations: []*eth.Attestation{pbAttestation(t)},
|
||||||
|
Deposits: []*eth.Deposit{pbDeposit(t)},
|
||||||
|
VoluntaryExits: []*eth.SignedVoluntaryExit{pbSignedVoluntaryExit(t)},
|
||||||
|
SyncAggregate: pbSyncAggregate(),
|
||||||
|
ExecutionPayloadHeader: pbExecutionPayloadHeaderCapella(t),
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
m, err := json.Marshal(b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// string error output is easier to deal with
|
||||||
|
// -1 end slice index on expected is to get rid of trailing newline
|
||||||
|
// if you update this fixture and this test breaks, you probably removed the trailing newline
|
||||||
|
require.Equal(t, string(expected[0:len(expected)-1]), string(m))
|
||||||
|
}
|
||||||
|
|
||||||
func TestRoundTripUint256(t *testing.T) {
|
func TestRoundTripUint256(t *testing.T) {
|
||||||
vs := "452312848583266388373324160190187140051835877600158453279131187530910662656"
|
vs := "4523128485832663883733241601901871400518358776001584532791311875309106626"
|
||||||
u := stringToUint256(vs)
|
u, err := stringToUint256(vs)
|
||||||
|
require.NoError(t, err)
|
||||||
sb := u.SSZBytes()
|
sb := u.SSZBytes()
|
||||||
uu := sszBytesToUint256(sb)
|
require.Equal(t, 32, len(sb))
|
||||||
|
uu, err := sszBytesToUint256(sb)
|
||||||
|
require.NoError(t, err)
|
||||||
require.Equal(t, true, bytes.Equal(u.SSZBytes(), uu.SSZBytes()))
|
require.Equal(t, true, bytes.Equal(u.SSZBytes(), uu.SSZBytes()))
|
||||||
require.Equal(t, vs, uu.String())
|
require.Equal(t, vs, uu.String())
|
||||||
}
|
}
|
||||||
@@ -724,3 +1278,101 @@ func TestExecutionPayloadHeaderRoundtrip(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.DeepEqual(t, string(expected[0:len(expected)-1]), string(m))
|
require.DeepEqual(t, string(expected[0:len(expected)-1]), string(m))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExecutionPayloadHeaderCapellaRoundtrip(t *testing.T) {
|
||||||
|
expected, err := os.ReadFile("testdata/execution-payload-capella.json")
|
||||||
|
require.NoError(t, err)
|
||||||
|
hu := &ExecutionPayloadHeaderCapella{}
|
||||||
|
require.NoError(t, json.Unmarshal(expected, hu))
|
||||||
|
m, err := json.Marshal(hu)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.DeepEqual(t, string(expected[0:len(expected)-1]), string(m))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestErrorMessage_non200Err(t *testing.T) {
|
||||||
|
mockRequest := &http.Request{
|
||||||
|
URL: &url.URL{Path: "example.com"},
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args *http.Response
|
||||||
|
wantMessage string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "204",
|
||||||
|
args: func() *http.Response {
|
||||||
|
message := ErrorMessage{
|
||||||
|
Code: 204,
|
||||||
|
Message: "No header is available",
|
||||||
|
}
|
||||||
|
r, err := json.Marshal(message)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return &http.Response{
|
||||||
|
Request: mockRequest,
|
||||||
|
StatusCode: 204,
|
||||||
|
Body: io.NopCloser(bytes.NewReader(r)),
|
||||||
|
}
|
||||||
|
}(),
|
||||||
|
wantMessage: "No header is available",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "400",
|
||||||
|
args: func() *http.Response {
|
||||||
|
message := ErrorMessage{
|
||||||
|
Code: 400,
|
||||||
|
Message: "Unknown hash: missing parent hash",
|
||||||
|
}
|
||||||
|
r, err := json.Marshal(message)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return &http.Response{
|
||||||
|
Request: mockRequest,
|
||||||
|
StatusCode: 400,
|
||||||
|
Body: io.NopCloser(bytes.NewReader(r)),
|
||||||
|
}
|
||||||
|
}(),
|
||||||
|
wantMessage: "Unknown hash: missing parent hash",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "500",
|
||||||
|
args: func() *http.Response {
|
||||||
|
message := ErrorMessage{
|
||||||
|
Code: 500,
|
||||||
|
Message: "Internal server error",
|
||||||
|
}
|
||||||
|
r, err := json.Marshal(message)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return &http.Response{
|
||||||
|
Request: mockRequest,
|
||||||
|
StatusCode: 500,
|
||||||
|
Body: io.NopCloser(bytes.NewReader(r)),
|
||||||
|
}
|
||||||
|
}(),
|
||||||
|
wantMessage: "Internal server error",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "205",
|
||||||
|
args: func() *http.Response {
|
||||||
|
message := ErrorMessage{
|
||||||
|
Code: 205,
|
||||||
|
Message: "Reset Content",
|
||||||
|
}
|
||||||
|
r, err := json.Marshal(message)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return &http.Response{
|
||||||
|
Request: mockRequest,
|
||||||
|
StatusCode: 205,
|
||||||
|
Body: io.NopCloser(bytes.NewReader(r)),
|
||||||
|
}
|
||||||
|
}(),
|
||||||
|
wantMessage: "did not receive 200 response from API",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
err := non200Err(tt.args)
|
||||||
|
if err != nil && tt.wantMessage != "" {
|
||||||
|
require.ErrorContains(t, tt.wantMessage, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,9 +5,10 @@ go_library(
|
|||||||
srcs = [
|
srcs = [
|
||||||
"gateway.go",
|
"gateway.go",
|
||||||
"log.go",
|
"log.go",
|
||||||
|
"modifiers.go",
|
||||||
"options.go",
|
"options.go",
|
||||||
],
|
],
|
||||||
importpath = "github.com/prysmaticlabs/prysm/api/gateway",
|
importpath = "github.com/prysmaticlabs/prysm/v4/api/gateway",
|
||||||
visibility = [
|
visibility = [
|
||||||
"//beacon-chain:__subpackages__",
|
"//beacon-chain:__subpackages__",
|
||||||
"//validator:__subpackages__",
|
"//validator:__subpackages__",
|
||||||
@@ -23,6 +24,7 @@ go_library(
|
|||||||
"@org_golang_google_grpc//:go_default_library",
|
"@org_golang_google_grpc//:go_default_library",
|
||||||
"@org_golang_google_grpc//connectivity:go_default_library",
|
"@org_golang_google_grpc//connectivity:go_default_library",
|
||||||
"@org_golang_google_grpc//credentials:go_default_library",
|
"@org_golang_google_grpc//credentials:go_default_library",
|
||||||
|
"@org_golang_google_protobuf//proto:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -10,11 +10,12 @@ go_library(
|
|||||||
"process_request.go",
|
"process_request.go",
|
||||||
"structs.go",
|
"structs.go",
|
||||||
],
|
],
|
||||||
importpath = "github.com/prysmaticlabs/prysm/api/gateway/apimiddleware",
|
importpath = "github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//api/grpc:go_default_library",
|
"//api/grpc:go_default_library",
|
||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
|
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||||
"@com_github_gorilla_mux//:go_default_library",
|
"@com_github_gorilla_mux//:go_default_library",
|
||||||
"@com_github_pkg_errors//:go_default_library",
|
"@com_github_pkg_errors//:go_default_library",
|
||||||
|
|||||||
@@ -111,7 +111,7 @@ func (m *ApiProxyMiddleware) WithMiddleware(path string) http.HandlerFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.Method == "DELETE" {
|
if req.Method == "DELETE" && req.Body != http.NoBody {
|
||||||
if errJson := handleDeleteRequestForEndpoint(endpoint, req); errJson != nil {
|
if errJson := handleDeleteRequestForEndpoint(endpoint, req); errJson != nil {
|
||||||
WriteError(w, errJson, nil)
|
WriteError(w, errJson, nil)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
butil "github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
butil "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
"github.com/wealdtech/go-bytesutil"
|
"github.com/wealdtech/go-bytesutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHandleURLParameters(t *testing.T) {
|
func TestHandleURLParameters(t *testing.T) {
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/wealdtech/go-bytesutil"
|
"github.com/wealdtech/go-bytesutil"
|
||||||
@@ -31,26 +32,26 @@ func processField(s interface{}, processors []fieldProcessor) error {
|
|||||||
sliceElem := t.Field(i).Type.Elem()
|
sliceElem := t.Field(i).Type.Elem()
|
||||||
kind := sliceElem.Kind()
|
kind := sliceElem.Kind()
|
||||||
// Recursively process slices to struct pointers.
|
// Recursively process slices to struct pointers.
|
||||||
if kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct {
|
switch {
|
||||||
|
case kind == reflect.Ptr && sliceElem.Elem().Kind() == reflect.Struct:
|
||||||
for j := 0; j < v.Field(i).Len(); j++ {
|
for j := 0; j < v.Field(i).Len(); j++ {
|
||||||
if err := processField(v.Field(i).Index(j).Interface(), processors); err != nil {
|
if err := processField(v.Field(i).Index(j).Interface(), processors); err != nil {
|
||||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Process each string in string slices.
|
// Process each string in string slices.
|
||||||
if kind == reflect.String {
|
case kind == reflect.String:
|
||||||
for _, proc := range processors {
|
for _, proc := range processors {
|
||||||
_, hasTag := t.Field(i).Tag.Lookup(proc.tag)
|
_, hasTag := t.Field(i).Tag.Lookup(proc.tag)
|
||||||
if hasTag {
|
if !hasTag {
|
||||||
for j := 0; j < v.Field(i).Len(); j++ {
|
continue
|
||||||
if err := proc.f(v.Field(i).Index(j)); err != nil {
|
}
|
||||||
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
for j := 0; j < v.Field(i).Len(); j++ {
|
||||||
}
|
if err := proc.f(v.Field(i).Index(j)); err != nil {
|
||||||
|
return errors.Wrapf(err, "could not process field '%s'", t.Field(i).Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
// Recursively process struct pointers.
|
// Recursively process struct pointers.
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
@@ -100,6 +101,20 @@ func base64ToHexProcessor(v reflect.Value) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func base64ToChecksumAddressProcessor(v reflect.Value) error {
|
||||||
|
if v.String() == "" {
|
||||||
|
// Empty hex values are represented as "0x".
|
||||||
|
v.SetString("0x")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b, err := base64.StdEncoding.DecodeString(v.String())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
v.SetString(common.BytesToAddress(b).Hex())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func base64ToUint256Processor(v reflect.Value) error {
|
func base64ToUint256Processor(v reflect.Value) error {
|
||||||
if v.String() == "" {
|
if v.String() == "" {
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/api/grpc"
|
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DeserializeRequestBodyIntoContainer deserializes the request's body into an endpoint-specific struct.
|
// DeserializeRequestBodyIntoContainer deserializes the request's body into an endpoint-specific struct.
|
||||||
@@ -104,6 +104,8 @@ func ReadGrpcResponseBody(r io.Reader) ([]byte, ErrorJson) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// HandleGrpcResponseError acts on an error that resulted from a grpc-gateway's response.
|
// HandleGrpcResponseError acts on an error that resulted from a grpc-gateway's response.
|
||||||
|
// Whether there was an error is indicated by the bool return value. In case of an error,
|
||||||
|
// there is no need to write to the response because it's taken care of by the function.
|
||||||
func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []byte, w http.ResponseWriter) (bool, ErrorJson) {
|
func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []byte, w http.ResponseWriter) (bool, ErrorJson) {
|
||||||
responseHasError := false
|
responseHasError := false
|
||||||
if err := json.Unmarshal(respBody, errJson); err != nil {
|
if err := json.Unmarshal(respBody, errJson); err != nil {
|
||||||
@@ -149,6 +151,10 @@ func ProcessMiddlewareResponseFields(responseContainer interface{}) ErrorJson {
|
|||||||
tag: "hex",
|
tag: "hex",
|
||||||
f: base64ToHexProcessor,
|
f: base64ToHexProcessor,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
tag: "address",
|
||||||
|
f: base64ToChecksumAddressProcessor,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
tag: "enum",
|
tag: "enum",
|
||||||
f: enumToLowercaseProcessor,
|
f: enumToLowercaseProcessor,
|
||||||
|
|||||||
@@ -8,9 +8,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/api/grpc"
|
"github.com/prysmaticlabs/prysm/v4/api/grpc"
|
||||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
"github.com/sirupsen/logrus/hooks/test"
|
"github.com/sirupsen/logrus/hooks/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -31,21 +31,25 @@ func defaultRequestContainer() *testRequestContainer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type testResponseContainer struct {
|
type testResponseContainer struct {
|
||||||
TestString string
|
TestString string
|
||||||
TestHex string `hex:"true"`
|
TestHex string `hex:"true"`
|
||||||
TestEmptyHex string `hex:"true"`
|
TestEmptyHex string `hex:"true"`
|
||||||
TestUint256 string `uint256:"true"`
|
TestAddress string `address:"true"`
|
||||||
TestEnum string `enum:"true"`
|
TestEmptyAddress string `address:"true"`
|
||||||
TestTime string `time:"true"`
|
TestUint256 string `uint256:"true"`
|
||||||
|
TestEnum string `enum:"true"`
|
||||||
|
TestTime string `time:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func defaultResponseContainer() *testResponseContainer {
|
func defaultResponseContainer() *testResponseContainer {
|
||||||
return &testResponseContainer{
|
return &testResponseContainer{
|
||||||
TestString: "test string",
|
TestString: "test string",
|
||||||
TestHex: "Zm9v", // base64 encoding of "foo"
|
TestHex: "Zm9v", // base64 encoding of "foo"
|
||||||
TestEmptyHex: "",
|
TestEmptyHex: "",
|
||||||
TestEnum: "Test Enum",
|
TestAddress: "Zm9v",
|
||||||
TestTime: "2006-01-02T15:04:05Z",
|
TestEmptyAddress: "",
|
||||||
|
TestEnum: "Test Enum",
|
||||||
|
TestTime: "2006-01-02T15:04:05Z",
|
||||||
|
|
||||||
// base64 encoding of 4196 in little-endian
|
// base64 encoding of 4196 in little-endian
|
||||||
TestUint256: "ZBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=",
|
TestUint256: "ZBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=",
|
||||||
@@ -247,6 +251,8 @@ func TestProcessMiddlewareResponseFields(t *testing.T) {
|
|||||||
require.Equal(t, true, errJson == nil)
|
require.Equal(t, true, errJson == nil)
|
||||||
assert.Equal(t, "0x666f6f", container.TestHex)
|
assert.Equal(t, "0x666f6f", container.TestHex)
|
||||||
assert.Equal(t, "0x", container.TestEmptyHex)
|
assert.Equal(t, "0x", container.TestEmptyHex)
|
||||||
|
assert.Equal(t, "0x0000000000000000000000000000000000666F6f", container.TestAddress)
|
||||||
|
assert.Equal(t, "0x", container.TestEmptyAddress)
|
||||||
assert.Equal(t, "4196", container.TestUint256)
|
assert.Equal(t, "4196", container.TestUint256)
|
||||||
assert.Equal(t, "test enum", container.TestEnum)
|
assert.Equal(t, "test enum", container.TestEnum)
|
||||||
assert.Equal(t, "1136214245", container.TestTime)
|
assert.Equal(t, "1136214245", container.TestTime)
|
||||||
@@ -292,7 +298,7 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
|
|||||||
v, ok = writer.Header()["Content-Length"]
|
v, ok = writer.Header()["Content-Length"]
|
||||||
require.Equal(t, true, ok, "header not found")
|
require.Equal(t, true, ok, "header not found")
|
||||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||||
assert.Equal(t, "181", v[0])
|
assert.Equal(t, "224", v[0])
|
||||||
assert.Equal(t, 204, writer.Code)
|
assert.Equal(t, 204, writer.Code)
|
||||||
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
|
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -13,8 +13,8 @@ import (
|
|||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/api/gateway/apimiddleware"
|
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||||
"github.com/prysmaticlabs/prysm/runtime"
|
"github.com/prysmaticlabs/prysm/v4/runtime"
|
||||||
"github.com/rs/cors"
|
"github.com/rs/cors"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
@@ -70,15 +70,16 @@ type Gateway struct {
|
|||||||
func New(ctx context.Context, opts ...Option) (*Gateway, error) {
|
func New(ctx context.Context, opts ...Option) (*Gateway, error) {
|
||||||
g := &Gateway{
|
g := &Gateway{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
cfg: &config{
|
cfg: &config{},
|
||||||
router: mux.NewRouter(),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
if err := opt(g); err != nil {
|
if err := opt(g); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if g.cfg.router == nil {
|
||||||
|
g.cfg.router = mux.NewRouter()
|
||||||
|
}
|
||||||
return g, nil
|
return g, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -121,8 +122,9 @@ func (g *Gateway) Start() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
g.server = &http.Server{
|
g.server = &http.Server{
|
||||||
Addr: g.cfg.gatewayAddr,
|
Addr: g.cfg.gatewayAddr,
|
||||||
Handler: corsMux,
|
Handler: corsMux,
|
||||||
|
ReadHeaderTimeout: time.Second,
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
|||||||
@@ -10,10 +10,10 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/prysmaticlabs/prysm/api/gateway/apimiddleware"
|
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|||||||
30
api/gateway/modifiers.go
Normal file
30
api/gateway/modifiers.go
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
package gateway
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
func HttpResponseModifier(ctx context.Context, w http.ResponseWriter, _ proto.Message) error {
|
||||||
|
md, ok := gwruntime.ServerMetadataFromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// set http status code
|
||||||
|
if vals := md.HeaderMD.Get("x-http-code"); len(vals) > 0 {
|
||||||
|
code, err := strconv.Atoi(vals[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// delete the headers to not expose any grpc-metadata in http response
|
||||||
|
delete(md.HeaderMD, "x-http-code")
|
||||||
|
delete(w.Header(), "Grpc-Metadata-X-Http-Code")
|
||||||
|
w.WriteHeader(code)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -5,16 +5,11 @@ import (
|
|||||||
|
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||||
"github.com/prysmaticlabs/prysm/api/gateway/apimiddleware"
|
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Option func(g *Gateway) error
|
type Option func(g *Gateway) error
|
||||||
|
|
||||||
func (g *Gateway) SetRouter(r *mux.Router) *Gateway {
|
|
||||||
g.cfg.router = r
|
|
||||||
return g
|
|
||||||
}
|
|
||||||
|
|
||||||
func WithPbHandlers(handlers []*PbMux) Option {
|
func WithPbHandlers(handlers []*PbMux) Option {
|
||||||
return func(g *Gateway) error {
|
return func(g *Gateway) error {
|
||||||
g.cfg.pbHandlers = handlers
|
g.cfg.pbHandlers = handlers
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ go_library(
|
|||||||
"grpcutils.go",
|
"grpcutils.go",
|
||||||
"parameters.go",
|
"parameters.go",
|
||||||
],
|
],
|
||||||
importpath = "github.com/prysmaticlabs/prysm/api/grpc",
|
importpath = "github.com/prysmaticlabs/prysm/v4/api/grpc",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"@com_github_sirupsen_logrus//:go_default_library",
|
"@com_github_sirupsen_logrus//:go_default_library",
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
|||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = ["pagination.go"],
|
srcs = ["pagination.go"],
|
||||||
importpath = "github.com/prysmaticlabs/prysm/api/pagination",
|
importpath = "github.com/prysmaticlabs/prysm/v4/api/pagination",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//config/params:go_default_library",
|
"//config/params:go_default_library",
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StartAndEndPage takes in the requested page token, wanted page size, total page size.
|
// StartAndEndPage takes in the requested page token, wanted page size, total page size.
|
||||||
|
|||||||
@@ -3,9 +3,9 @@ package pagination_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/api/pagination"
|
"github.com/prysmaticlabs/prysm/v4/api/pagination"
|
||||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestStartAndEndPage(t *testing.T) {
|
func TestStartAndEndPage(t *testing.T) {
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ go_library(
|
|||||||
"multilock.go",
|
"multilock.go",
|
||||||
"scatter.go",
|
"scatter.go",
|
||||||
],
|
],
|
||||||
importpath = "github.com/prysmaticlabs/prysm/async",
|
importpath = "github.com/prysmaticlabs/prysm/v4/async",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
|
deps = ["@com_github_sirupsen_logrus//:go_default_library"],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
|||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = ["abool.go"],
|
srcs = ["abool.go"],
|
||||||
importpath = "github.com/prysmaticlabs/prysm/async/abool",
|
importpath = "github.com/prysmaticlabs/prysm/v4/async/abool",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -6,8 +6,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/async"
|
"github.com/prysmaticlabs/prysm/v4/async"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -3,20 +3,21 @@ package async_test
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/async"
|
"github.com/prysmaticlabs/prysm/v4/async"
|
||||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
"github.com/prysmaticlabs/prysm/testing/util"
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDebounce_NoEvents(t *testing.T) {
|
func TestDebounce_NoEvents(t *testing.T) {
|
||||||
eventsChan := make(chan interface{}, 100)
|
eventsChan := make(chan interface{}, 100)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
interval := time.Second
|
interval := time.Second
|
||||||
timesHandled := 0
|
timesHandled := int32(0)
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@@ -26,21 +27,21 @@ func TestDebounce_NoEvents(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||||
timesHandled++
|
atomic.AddInt32(×Handled, 1)
|
||||||
})
|
})
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
if util.WaitTimeout(wg, interval*2) {
|
if util.WaitTimeout(wg, interval*2) {
|
||||||
t.Fatalf("Test should have exited by now, timed out")
|
t.Fatalf("Test should have exited by now, timed out")
|
||||||
}
|
}
|
||||||
assert.Equal(t, 0, timesHandled, "Wrong number of handled calls")
|
assert.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDebounce_CtxClosing(t *testing.T) {
|
func TestDebounce_CtxClosing(t *testing.T) {
|
||||||
eventsChan := make(chan interface{}, 100)
|
eventsChan := make(chan interface{}, 100)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
interval := time.Second
|
interval := time.Second
|
||||||
timesHandled := 0
|
timesHandled := int32(0)
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@@ -62,23 +63,23 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||||
timesHandled++
|
atomic.AddInt32(×Handled, 1)
|
||||||
})
|
})
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
if util.WaitTimeout(wg, interval*2) {
|
if util.WaitTimeout(wg, interval*2) {
|
||||||
t.Fatalf("Test should have exited by now, timed out")
|
t.Fatalf("Test should have exited by now, timed out")
|
||||||
}
|
}
|
||||||
assert.Equal(t, 0, timesHandled, "Wrong number of handled calls")
|
assert.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||||
eventsChan := make(chan interface{}, 100)
|
eventsChan := make(chan interface{}, 100)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
interval := time.Second
|
interval := time.Second
|
||||||
timesHandled := 0
|
timesHandled := int32(0)
|
||||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||||
timesHandled++
|
atomic.AddInt32(×Handled, 1)
|
||||||
})
|
})
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
eventsChan <- struct{}{}
|
eventsChan <- struct{}{}
|
||||||
@@ -86,7 +87,7 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
|||||||
// We should expect 100 rapid fire changes to only have caused
|
// We should expect 100 rapid fire changes to only have caused
|
||||||
// 1 handler to trigger after the debouncing period.
|
// 1 handler to trigger after the debouncing period.
|
||||||
time.Sleep(interval * 2)
|
time.Sleep(interval * 2)
|
||||||
assert.Equal(t, 1, timesHandled, "Wrong number of handled calls")
|
assert.Equal(t, int32(1), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,23 +95,23 @@ func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
|
|||||||
eventsChan := make(chan interface{}, 100)
|
eventsChan := make(chan interface{}, 100)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
interval := time.Second
|
interval := time.Second
|
||||||
timesHandled := 0
|
timesHandled := int32(0)
|
||||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||||
timesHandled++
|
atomic.AddInt32(×Handled, 1)
|
||||||
})
|
})
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
eventsChan <- struct{}{}
|
eventsChan <- struct{}{}
|
||||||
}
|
}
|
||||||
require.Equal(t, 0, timesHandled, "Events must prevent from handler execution")
|
require.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Events must prevent from handler execution")
|
||||||
|
|
||||||
// By this time the first event should be triggered.
|
// By this time the first event should be triggered.
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
assert.Equal(t, 1, timesHandled, "Wrong number of handled calls")
|
assert.Equal(t, int32(1), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||||
|
|
||||||
// Second event.
|
// Second event.
|
||||||
eventsChan <- struct{}{}
|
eventsChan <- struct{}{}
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
assert.Equal(t, 2, timesHandled, "Wrong number of handled calls")
|
assert.Equal(t, int32(2), atomic.LoadInt32(×Handled), "Wrong number of handled calls")
|
||||||
|
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ go_library(
|
|||||||
"feed.go",
|
"feed.go",
|
||||||
"subscription.go",
|
"subscription.go",
|
||||||
],
|
],
|
||||||
importpath = "github.com/prysmaticlabs/prysm/async/event",
|
importpath = "github.com/prysmaticlabs/prysm/v4/async/event",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = ["//time/mclock:go_default_library"],
|
deps = ["//time/mclock:go_default_library"],
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ package event_test
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/async/event"
|
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ExampleFeed_acknowledgedEvents() {
|
func ExampleFeed_acknowledgedEvents() {
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/async/event"
|
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This example demonstrates how SubscriptionScope can be used to control the lifetime of
|
// This example demonstrates how SubscriptionScope can be used to control the lifetime of
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ package event_test
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/async/event"
|
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ExampleNewSubscription() {
|
func ExampleNewSubscription() {
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFeedPanics(t *testing.T) {
|
func TestFeedPanics(t *testing.T) {
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/time/mclock"
|
"github.com/prysmaticlabs/prysm/v4/time/mclock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// waitQuotient is divided against the max backoff time, in order to have N requests based on the full
|
// waitQuotient is divided against the max backoff time, in order to have N requests based on the full
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
var errInts = errors.New("error in subscribeInts")
|
var errInts = errors.New("error in subscribeInts")
|
||||||
|
|||||||
@@ -2,24 +2,25 @@ package async_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/async"
|
"github.com/prysmaticlabs/prysm/v4/async"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestEveryRuns(t *testing.T) {
|
func TestEveryRuns(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
i := 0
|
i := int32(0)
|
||||||
async.RunEvery(ctx, 100*time.Millisecond, func() {
|
async.RunEvery(ctx, 100*time.Millisecond, func() {
|
||||||
i++
|
atomic.AddInt32(&i, 1)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Sleep for a bit and ensure the value has increased.
|
// Sleep for a bit and ensure the value has increased.
|
||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
|
|
||||||
if i == 0 {
|
if atomic.LoadInt32(&i) == 0 {
|
||||||
t.Error("Counter failed to increment with ticker")
|
t.Error("Counter failed to increment with ticker")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -28,12 +29,12 @@ func TestEveryRuns(t *testing.T) {
|
|||||||
// Sleep for a bit to let the cancel take place.
|
// Sleep for a bit to let the cancel take place.
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
last := i
|
last := atomic.LoadInt32(&i)
|
||||||
|
|
||||||
// Sleep for a bit and ensure the value has not increased.
|
// Sleep for a bit and ensure the value has not increased.
|
||||||
time.Sleep(200 * time.Millisecond)
|
time.Sleep(200 * time.Millisecond)
|
||||||
|
|
||||||
if i != last {
|
if atomic.LoadInt32(&i) != last {
|
||||||
t.Error("Counter incremented after stop")
|
t.Error("Counter incremented after stop")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,9 @@ Copyright 2017 Albert Tedja
|
|||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
You may obtain a copy of the License at
|
You may obtain a copy of the License at
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
Unless required by applicable law or agreed to in writing, software
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
@@ -49,7 +51,7 @@ func (lk *Lock) Lock() {
|
|||||||
lk.unlock <- 1
|
lk.unlock <- 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unlocks this lock. Must be called after Lock.
|
// Unlock unlocks this lock. Must be called after Lock.
|
||||||
// Can only be invoked if there is a previous call to Lock.
|
// Can only be invoked if there is a previous call to Lock.
|
||||||
func (lk *Lock) Unlock() {
|
func (lk *Lock) Unlock() {
|
||||||
<-lk.unlock
|
<-lk.unlock
|
||||||
@@ -65,14 +67,14 @@ func (lk *Lock) Unlock() {
|
|||||||
<-lk.lock
|
<-lk.lock
|
||||||
}
|
}
|
||||||
|
|
||||||
// Temporarily unlocks, gives up the cpu time to other goroutine, and attempts to lock again.
|
// Yield temporarily unlocks, gives up the cpu time to other goroutine, and attempts to lock again.
|
||||||
func (lk *Lock) Yield() {
|
func (lk *Lock) Yield() {
|
||||||
lk.Unlock()
|
lk.Unlock()
|
||||||
runtime.Gosched()
|
runtime.Gosched()
|
||||||
lk.Lock()
|
lk.Lock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new multilock for the specified keys
|
// NewMultilock creates a new multilock for the specified keys
|
||||||
func NewMultilock(locks ...string) *Lock {
|
func NewMultilock(locks ...string) *Lock {
|
||||||
if len(locks) == 0 {
|
if len(locks) == 0 {
|
||||||
return nil
|
return nil
|
||||||
@@ -87,7 +89,7 @@ func NewMultilock(locks ...string) *Lock {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleans old unused locks. Returns removed keys.
|
// Clean cleans old unused locks. Returns removed keys.
|
||||||
func Clean() []string {
|
func Clean() []string {
|
||||||
locks.lock <- 1
|
locks.lock <- 1
|
||||||
defer func() { <-locks.lock }()
|
defer func() { <-locks.lock }()
|
||||||
|
|||||||
@@ -3,7 +3,9 @@ Copyright 2017 Albert Tedja
|
|||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
You may obtain a copy of the License at
|
You may obtain a copy of the License at
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
Unless required by applicable law or agreed to in writing, software
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
@@ -22,22 +24,22 @@ import (
|
|||||||
|
|
||||||
func TestUnique(t *testing.T) {
|
func TestUnique(t *testing.T) {
|
||||||
var arr []string
|
var arr []string
|
||||||
assert := assert.New(t)
|
a := assert.New(t)
|
||||||
|
|
||||||
arr = []string{"a", "b", "c"}
|
arr = []string{"a", "b", "c"}
|
||||||
assert.Equal(arr, unique(arr))
|
a.Equal(arr, unique(arr))
|
||||||
|
|
||||||
arr = []string{"a", "a", "a"}
|
arr = []string{"a", "a", "a"}
|
||||||
assert.Equal([]string{"a"}, unique(arr))
|
a.Equal([]string{"a"}, unique(arr))
|
||||||
|
|
||||||
arr = []string{"a", "a", "b"}
|
arr = []string{"a", "a", "b"}
|
||||||
assert.Equal([]string{"a", "b"}, unique(arr))
|
a.Equal([]string{"a", "b"}, unique(arr))
|
||||||
|
|
||||||
arr = []string{"a", "b", "a"}
|
arr = []string{"a", "b", "a"}
|
||||||
assert.Equal([]string{"a", "b"}, unique(arr))
|
a.Equal([]string{"a", "b"}, unique(arr))
|
||||||
|
|
||||||
arr = []string{"a", "b", "c", "b", "d"}
|
arr = []string{"a", "b", "c", "b", "d"}
|
||||||
assert.Equal([]string{"a", "b", "c", "d"}, unique(arr))
|
a.Equal([]string{"a", "b", "c", "d"}, unique(arr))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetChan(t *testing.T) {
|
func TestGetChan(t *testing.T) {
|
||||||
@@ -45,9 +47,9 @@ func TestGetChan(t *testing.T) {
|
|||||||
ch2 := getChan("aa")
|
ch2 := getChan("aa")
|
||||||
ch3 := getChan("a")
|
ch3 := getChan("a")
|
||||||
|
|
||||||
assert := assert.New(t)
|
a := assert.New(t)
|
||||||
assert.NotEqual(ch1, ch2)
|
a.NotEqual(ch1, ch2)
|
||||||
assert.Equal(ch1, ch3)
|
a.Equal(ch1, ch3)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLockUnlock(_ *testing.T) {
|
func TestLockUnlock(_ *testing.T) {
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ type WorkerResults struct {
|
|||||||
|
|
||||||
// Scatter scatters a computation across multiple goroutines.
|
// Scatter scatters a computation across multiple goroutines.
|
||||||
// This breaks the task in to a number of chunks and executes those chunks in parallel with the function provided.
|
// This breaks the task in to a number of chunks and executes those chunks in parallel with the function provided.
|
||||||
// Results returned are collected and presented a a set of WorkerResults, which can be reassembled by the calling function.
|
// Results returned are collected and presented as a set of WorkerResults, which can be reassembled by the calling function.
|
||||||
// Any error that occurs in the workers will be passed back to the calling function.
|
// Any error that occurs in the workers will be passed back to the calling function.
|
||||||
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, error)) ([]*WorkerResults, error) {
|
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, error)) ([]*WorkerResults, error) {
|
||||||
if inputLen <= 0 {
|
if inputLen <= 0 {
|
||||||
|
|||||||
@@ -5,9 +5,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/async"
|
"github.com/prysmaticlabs/prysm/v4/async"
|
||||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDouble(t *testing.T) {
|
func TestDouble(t *testing.T) {
|
||||||
|
|||||||
@@ -4,14 +4,16 @@ go_library(
|
|||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"chain_info.go",
|
"chain_info.go",
|
||||||
|
"chain_info_forkchoice.go",
|
||||||
"error.go",
|
"error.go",
|
||||||
"execution_engine.go",
|
"execution_engine.go",
|
||||||
|
"forkchoice_update_execution.go",
|
||||||
"head.go",
|
"head.go",
|
||||||
"head_sync_committee_info.go",
|
"head_sync_committee_info.go",
|
||||||
"init_sync_process_block.go",
|
"init_sync_process_block.go",
|
||||||
"log.go",
|
"log.go",
|
||||||
|
"merge_ascii_art.go",
|
||||||
"metrics.go",
|
"metrics.go",
|
||||||
"new_slot.go",
|
|
||||||
"options.go",
|
"options.go",
|
||||||
"pow_block.go",
|
"pow_block.go",
|
||||||
"process_attestation.go",
|
"process_attestation.go",
|
||||||
@@ -21,10 +23,9 @@ go_library(
|
|||||||
"receive_attestation.go",
|
"receive_attestation.go",
|
||||||
"receive_block.go",
|
"receive_block.go",
|
||||||
"service.go",
|
"service.go",
|
||||||
"state_balance_cache.go",
|
|
||||||
"weak_subjectivity_checks.go",
|
"weak_subjectivity_checks.go",
|
||||||
],
|
],
|
||||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain",
|
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain",
|
||||||
visibility = [
|
visibility = [
|
||||||
"//beacon-chain:__subpackages__",
|
"//beacon-chain:__subpackages__",
|
||||||
"//cmd/beacon-chain:__subpackages__",
|
"//cmd/beacon-chain:__subpackages__",
|
||||||
@@ -34,7 +35,6 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//async:go_default_library",
|
"//async:go_default_library",
|
||||||
"//async/event:go_default_library",
|
"//async/event:go_default_library",
|
||||||
"//beacon-chain/blockchain/store:go_default_library",
|
|
||||||
"//beacon-chain/cache:go_default_library",
|
"//beacon-chain/cache:go_default_library",
|
||||||
"//beacon-chain/cache/depositcache:go_default_library",
|
"//beacon-chain/cache/depositcache:go_default_library",
|
||||||
"//beacon-chain/core/altair:go_default_library",
|
"//beacon-chain/core/altair:go_default_library",
|
||||||
@@ -49,25 +49,26 @@ go_library(
|
|||||||
"//beacon-chain/db:go_default_library",
|
"//beacon-chain/db:go_default_library",
|
||||||
"//beacon-chain/db/filters:go_default_library",
|
"//beacon-chain/db/filters:go_default_library",
|
||||||
"//beacon-chain/db/kv:go_default_library",
|
"//beacon-chain/db/kv:go_default_library",
|
||||||
|
"//beacon-chain/execution:go_default_library",
|
||||||
"//beacon-chain/forkchoice:go_default_library",
|
"//beacon-chain/forkchoice:go_default_library",
|
||||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
|
||||||
"//beacon-chain/forkchoice/types:go_default_library",
|
"//beacon-chain/forkchoice/types:go_default_library",
|
||||||
"//beacon-chain/operations/attestations:go_default_library",
|
"//beacon-chain/operations/attestations:go_default_library",
|
||||||
|
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||||
"//beacon-chain/operations/slashings:go_default_library",
|
"//beacon-chain/operations/slashings:go_default_library",
|
||||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||||
"//beacon-chain/p2p:go_default_library",
|
"//beacon-chain/p2p:go_default_library",
|
||||||
"//beacon-chain/powchain:go_default_library",
|
"//beacon-chain/startup:go_default_library",
|
||||||
"//beacon-chain/state:go_default_library",
|
"//beacon-chain/state:go_default_library",
|
||||||
"//beacon-chain/state/stategen:go_default_library",
|
"//beacon-chain/state/stategen:go_default_library",
|
||||||
"//cmd/beacon-chain/flags:go_default_library",
|
|
||||||
"//config/features:go_default_library",
|
"//config/features:go_default_library",
|
||||||
"//config/fieldparams:go_default_library",
|
"//config/fieldparams:go_default_library",
|
||||||
"//config/params:go_default_library",
|
"//config/params:go_default_library",
|
||||||
"//consensus-types/forks/bellatrix:go_default_library",
|
"//consensus-types:go_default_library",
|
||||||
|
"//consensus-types/blocks:go_default_library",
|
||||||
"//consensus-types/interfaces:go_default_library",
|
"//consensus-types/interfaces:go_default_library",
|
||||||
|
"//consensus-types/payload-attribute:go_default_library",
|
||||||
"//consensus-types/primitives:go_default_library",
|
"//consensus-types/primitives:go_default_library",
|
||||||
"//consensus-types/wrapper:go_default_library",
|
|
||||||
"//crypto/bls:go_default_library",
|
"//crypto/bls:go_default_library",
|
||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
"//math:go_default_library",
|
"//math:go_default_library",
|
||||||
@@ -106,19 +107,20 @@ go_test(
|
|||||||
"chain_info_test.go",
|
"chain_info_test.go",
|
||||||
"checktags_test.go",
|
"checktags_test.go",
|
||||||
"execution_engine_test.go",
|
"execution_engine_test.go",
|
||||||
|
"forkchoice_update_execution_test.go",
|
||||||
"head_sync_committee_info_test.go",
|
"head_sync_committee_info_test.go",
|
||||||
"head_test.go",
|
"head_test.go",
|
||||||
"init_test.go",
|
"init_test.go",
|
||||||
"log_test.go",
|
"log_test.go",
|
||||||
"metrics_test.go",
|
"metrics_test.go",
|
||||||
"mock_test.go",
|
"mock_test.go",
|
||||||
"new_slot_test.go",
|
|
||||||
"pow_block_test.go",
|
"pow_block_test.go",
|
||||||
"process_attestation_test.go",
|
"process_attestation_test.go",
|
||||||
"process_block_test.go",
|
"process_block_test.go",
|
||||||
"receive_attestation_test.go",
|
"receive_attestation_test.go",
|
||||||
"receive_block_test.go",
|
"receive_block_test.go",
|
||||||
"service_test.go",
|
"service_test.go",
|
||||||
|
"setup_test.go",
|
||||||
"weak_subjectivity_checks_test.go",
|
"weak_subjectivity_checks_test.go",
|
||||||
],
|
],
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
@@ -132,15 +134,16 @@ go_test(
|
|||||||
"//beacon-chain/core/transition:go_default_library",
|
"//beacon-chain/core/transition:go_default_library",
|
||||||
"//beacon-chain/db:go_default_library",
|
"//beacon-chain/db:go_default_library",
|
||||||
"//beacon-chain/db/testing:go_default_library",
|
"//beacon-chain/db/testing:go_default_library",
|
||||||
|
"//beacon-chain/execution:go_default_library",
|
||||||
|
"//beacon-chain/execution/testing:go_default_library",
|
||||||
|
"//beacon-chain/forkchoice/types:go_default_library",
|
||||||
"//beacon-chain/p2p:go_default_library",
|
"//beacon-chain/p2p:go_default_library",
|
||||||
"//beacon-chain/powchain:go_default_library",
|
"//beacon-chain/state/state-native:go_default_library",
|
||||||
"//beacon-chain/powchain/testing:go_default_library",
|
|
||||||
"//beacon-chain/state/stateutil:go_default_library",
|
"//beacon-chain/state/stateutil:go_default_library",
|
||||||
"//beacon-chain/state/v1:go_default_library",
|
|
||||||
"//beacon-chain/state/v3:go_default_library",
|
|
||||||
"//config/fieldparams:go_default_library",
|
"//config/fieldparams:go_default_library",
|
||||||
"//config/params:go_default_library",
|
"//config/params:go_default_library",
|
||||||
"//consensus-types/wrapper:go_default_library",
|
"//consensus-types/blocks:go_default_library",
|
||||||
|
"//consensus-types/blocks/testing:go_default_library",
|
||||||
"//container/trie:go_default_library",
|
"//container/trie:go_default_library",
|
||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
@@ -167,6 +170,7 @@ go_test(
|
|||||||
"mock_test.go",
|
"mock_test.go",
|
||||||
"receive_block_test.go",
|
"receive_block_test.go",
|
||||||
"service_norace_test.go",
|
"service_norace_test.go",
|
||||||
|
"setup_test.go",
|
||||||
],
|
],
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
gc_goopts = [
|
gc_goopts = [
|
||||||
@@ -188,11 +192,13 @@ go_test(
|
|||||||
"//beacon-chain/core/transition:go_default_library",
|
"//beacon-chain/core/transition:go_default_library",
|
||||||
"//beacon-chain/db:go_default_library",
|
"//beacon-chain/db:go_default_library",
|
||||||
"//beacon-chain/db/testing:go_default_library",
|
"//beacon-chain/db/testing:go_default_library",
|
||||||
|
"//beacon-chain/execution:go_default_library",
|
||||||
|
"//beacon-chain/execution/testing:go_default_library",
|
||||||
|
"//beacon-chain/forkchoice/types:go_default_library",
|
||||||
"//beacon-chain/p2p:go_default_library",
|
"//beacon-chain/p2p:go_default_library",
|
||||||
"//beacon-chain/powchain:go_default_library",
|
|
||||||
"//beacon-chain/powchain/testing:go_default_library",
|
|
||||||
"//config/params:go_default_library",
|
"//config/params:go_default_library",
|
||||||
"//consensus-types/wrapper:go_default_library",
|
"//consensus-types/blocks:go_default_library",
|
||||||
|
"//consensus-types/blocks/testing:go_default_library",
|
||||||
"//container/trie:go_default_library",
|
"//container/trie:go_default_library",
|
||||||
"//encoding/bytesutil:go_default_library",
|
"//encoding/bytesutil:go_default_library",
|
||||||
"//proto/prysm/v1alpha1:go_default_library",
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
|
|||||||
@@ -1,23 +1,23 @@
|
|||||||
package blockchain
|
package blockchain
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -26,23 +26,34 @@ import (
|
|||||||
type ChainInfoFetcher interface {
|
type ChainInfoFetcher interface {
|
||||||
HeadFetcher
|
HeadFetcher
|
||||||
FinalizationFetcher
|
FinalizationFetcher
|
||||||
GenesisFetcher
|
|
||||||
CanonicalFetcher
|
CanonicalFetcher
|
||||||
ForkFetcher
|
ForkFetcher
|
||||||
TimeFetcher
|
|
||||||
HeadDomainFetcher
|
HeadDomainFetcher
|
||||||
|
ForkchoiceFetcher
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeadUpdater defines a common interface for methods in blockchain service
|
// ForkchoiceFetcher defines a common interface for methods that access directly
|
||||||
// which allow to update the head info
|
// forkchoice information. These typically require a lock and external callers
|
||||||
type HeadUpdater interface {
|
// are requested to call methods within this blockchain package that takes care
|
||||||
UpdateHead(context.Context) error
|
// of locking forkchoice
|
||||||
|
type ForkchoiceFetcher interface {
|
||||||
|
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
|
||||||
|
CachedHeadRoot() [32]byte
|
||||||
|
GetProposerHead() [32]byte
|
||||||
|
SetForkChoiceGenesisTime(uint64)
|
||||||
|
UpdateHead(context.Context, primitives.Slot)
|
||||||
|
HighestReceivedBlockSlot() primitives.Slot
|
||||||
|
ReceivedBlocksLastEpoch() (uint64, error)
|
||||||
|
InsertNode(context.Context, state.BeaconState, [32]byte) error
|
||||||
|
ForkChoiceDump(context.Context) (*ethpbv1.ForkChoiceDump, error)
|
||||||
|
NewSlot(context.Context, primitives.Slot) error
|
||||||
|
ProposerBoost() [32]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||||
type TimeFetcher interface {
|
type TimeFetcher interface {
|
||||||
GenesisTime() time.Time
|
GenesisTime() time.Time
|
||||||
CurrentSlot() types.Slot
|
CurrentSlot() primitives.Slot
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenesisFetcher retrieves the Ethereum consensus data related to its genesis.
|
// GenesisFetcher retrieves the Ethereum consensus data related to its genesis.
|
||||||
@@ -53,24 +64,32 @@ type GenesisFetcher interface {
|
|||||||
// HeadFetcher defines a common interface for methods in blockchain service which
|
// HeadFetcher defines a common interface for methods in blockchain service which
|
||||||
// directly retrieve head related data.
|
// directly retrieve head related data.
|
||||||
type HeadFetcher interface {
|
type HeadFetcher interface {
|
||||||
HeadSlot() types.Slot
|
HeadSlot() primitives.Slot
|
||||||
HeadRoot(ctx context.Context) ([]byte, error)
|
HeadRoot(ctx context.Context) ([]byte, error)
|
||||||
HeadBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error)
|
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||||
HeadState(ctx context.Context) (state.BeaconState, error)
|
HeadState(ctx context.Context) (state.BeaconState, error)
|
||||||
HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error)
|
HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconState, error)
|
||||||
|
HeadValidatorsIndices(ctx context.Context, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error)
|
||||||
HeadGenesisValidatorsRoot() [32]byte
|
HeadGenesisValidatorsRoot() [32]byte
|
||||||
HeadETH1Data() *ethpb.Eth1Data
|
HeadETH1Data() *ethpb.Eth1Data
|
||||||
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool)
|
HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool)
|
||||||
HeadValidatorIndexToPublicKey(ctx context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
|
HeadValidatorIndexToPublicKey(ctx context.Context, index primitives.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error)
|
||||||
ChainHeads() ([][32]byte, []types.Slot)
|
ChainHeads() ([][32]byte, []primitives.Slot)
|
||||||
HeadSyncCommitteeFetcher
|
HeadSyncCommitteeFetcher
|
||||||
HeadDomainFetcher
|
HeadDomainFetcher
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
|
// ForkFetcher retrieves the current fork information of the Ethereum beacon chain.
|
||||||
type ForkFetcher interface {
|
type ForkFetcher interface {
|
||||||
ForkChoicer() forkchoice.ForkChoicer
|
|
||||||
CurrentFork() *ethpb.Fork
|
CurrentFork() *ethpb.Fork
|
||||||
|
GenesisFetcher
|
||||||
|
TimeFetcher
|
||||||
|
}
|
||||||
|
|
||||||
|
// TemporalOracle is like ForkFetcher minus CurrentFork()
|
||||||
|
type TemporalOracle interface {
|
||||||
|
GenesisFetcher
|
||||||
|
TimeFetcher
|
||||||
}
|
}
|
||||||
|
|
||||||
// CanonicalFetcher retrieves the current chain's canonical information.
|
// CanonicalFetcher retrieves the current chain's canonical information.
|
||||||
@@ -81,10 +100,13 @@ type CanonicalFetcher interface {
|
|||||||
// FinalizationFetcher defines a common interface for methods in blockchain service which
|
// FinalizationFetcher defines a common interface for methods in blockchain service which
|
||||||
// directly retrieve finalization and justification related data.
|
// directly retrieve finalization and justification related data.
|
||||||
type FinalizationFetcher interface {
|
type FinalizationFetcher interface {
|
||||||
FinalizedCheckpt() (*ethpb.Checkpoint, error)
|
FinalizedCheckpt() *ethpb.Checkpoint
|
||||||
CurrentJustifiedCheckpt() (*ethpb.Checkpoint, error)
|
CurrentJustifiedCheckpt() *ethpb.Checkpoint
|
||||||
PreviousJustifiedCheckpt() (*ethpb.Checkpoint, error)
|
PreviousJustifiedCheckpt() *ethpb.Checkpoint
|
||||||
VerifyFinalizedBlkDescendant(ctx context.Context, blockRoot [32]byte) error
|
UnrealizedJustifiedPayloadBlockHash() [32]byte
|
||||||
|
FinalizedBlockHash() [32]byte
|
||||||
|
InForkchoice([32]byte) bool
|
||||||
|
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// OptimisticModeFetcher retrieves information about optimistic status of the node.
|
// OptimisticModeFetcher retrieves information about optimistic status of the node.
|
||||||
@@ -94,51 +116,31 @@ type OptimisticModeFetcher interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
||||||
func (s *Service) FinalizedCheckpt() (*ethpb.Checkpoint, error) {
|
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||||
cp, err := s.store.FinalizedCheckpt()
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
if err != nil {
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
return nil, err
|
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||||
}
|
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||||
|
}
|
||||||
|
|
||||||
return ethpb.CopyCheckpoint(cp), nil
|
// PreviousJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||||
|
func (s *Service) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
|
cp := s.cfg.ForkChoiceStore.PreviousJustifiedCheckpoint()
|
||||||
|
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
|
// CurrentJustifiedCheckpt returns the current justified checkpoint from chain store.
|
||||||
func (s *Service) CurrentJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
func (s *Service) CurrentJustifiedCheckpt() *ethpb.Checkpoint {
|
||||||
cp, err := s.store.JustifiedCheckpt()
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
if err != nil {
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
return nil, err
|
cp := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||||
}
|
return ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||||
|
|
||||||
return ethpb.CopyCheckpoint(cp), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PreviousJustifiedCheckpt returns the previous justified checkpoint from chain store.
|
|
||||||
func (s *Service) PreviousJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
|
||||||
cp, err := s.store.PrevJustifiedCheckpt()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ethpb.CopyCheckpoint(cp), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BestJustifiedCheckpt returns the best justified checkpoint from store.
|
|
||||||
func (s *Service) BestJustifiedCheckpt() (*ethpb.Checkpoint, error) {
|
|
||||||
cp, err := s.store.BestJustifiedCheckpt()
|
|
||||||
if err != nil {
|
|
||||||
// If there is no best justified checkpoint, return the checkpoint with root as zeros to be used for genesis cases.
|
|
||||||
if errors.Is(err, store.ErrNilCheckpoint) {
|
|
||||||
return ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ethpb.CopyCheckpoint(cp), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeadSlot returns the slot of the head of the chain.
|
// HeadSlot returns the slot of the head of the chain.
|
||||||
func (s *Service) HeadSlot() types.Slot {
|
func (s *Service) HeadSlot() primitives.Slot {
|
||||||
s.headLock.RLock()
|
s.headLock.RLock()
|
||||||
defer s.headLock.RUnlock()
|
defer s.headLock.RUnlock()
|
||||||
|
|
||||||
@@ -154,9 +156,8 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
|
|||||||
s.headLock.RLock()
|
s.headLock.RLock()
|
||||||
defer s.headLock.RUnlock()
|
defer s.headLock.RUnlock()
|
||||||
|
|
||||||
if s.headRoot() != params.BeaconConfig().ZeroHash {
|
if s.head != nil && s.head.root != params.BeaconConfig().ZeroHash {
|
||||||
r := s.headRoot()
|
return bytesutil.SafeCopyBytes(s.head.root[:]), nil
|
||||||
return r[:], nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
b, err := s.cfg.BeaconDB.HeadBlock(ctx)
|
||||||
@@ -178,12 +179,12 @@ func (s *Service) HeadRoot(ctx context.Context) ([]byte, error) {
|
|||||||
// HeadBlock returns the head block of the chain.
|
// HeadBlock returns the head block of the chain.
|
||||||
// If the head is nil from service struct,
|
// If the head is nil from service struct,
|
||||||
// it will attempt to get the head block from DB.
|
// it will attempt to get the head block from DB.
|
||||||
func (s *Service) HeadBlock(ctx context.Context) (interfaces.SignedBeaconBlock, error) {
|
func (s *Service) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||||
s.headLock.RLock()
|
s.headLock.RLock()
|
||||||
defer s.headLock.RUnlock()
|
defer s.headLock.RUnlock()
|
||||||
|
|
||||||
if s.hasHeadState() {
|
if s.hasHeadState() {
|
||||||
return s.headBlock(), nil
|
return s.headBlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.cfg.BeaconDB.HeadBlock(ctx)
|
return s.cfg.BeaconDB.HeadBlock(ctx)
|
||||||
@@ -208,13 +209,35 @@ func (s *Service) HeadState(ctx context.Context) (state.BeaconState, error) {
|
|||||||
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
|
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HeadStateReadOnly returns the read only head state of the chain.
|
||||||
|
// If the head is nil from service struct, it will attempt to get the
|
||||||
|
// head state from DB. Any callers of this method MUST only use the
|
||||||
|
// state instance to read fields from the state. Any type assertions back
|
||||||
|
// to the concrete type and subsequent use of it could lead to corruption
|
||||||
|
// of the state.
|
||||||
|
func (s *Service) HeadStateReadOnly(ctx context.Context) (state.ReadOnlyBeaconState, error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "blockChain.HeadStateReadOnly")
|
||||||
|
defer span.End()
|
||||||
|
s.headLock.RLock()
|
||||||
|
defer s.headLock.RUnlock()
|
||||||
|
|
||||||
|
ok := s.hasHeadState()
|
||||||
|
span.AddAttributes(trace.BoolAttribute("cache_hit", ok))
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
return s.headStateReadOnly(ctx), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.cfg.StateGen.StateByRoot(ctx, s.headRoot())
|
||||||
|
}
|
||||||
|
|
||||||
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
|
// HeadValidatorsIndices returns a list of active validator indices from the head view of a given epoch.
|
||||||
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch types.Epoch) ([]types.ValidatorIndex, error) {
|
func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch primitives.Epoch) ([]primitives.ValidatorIndex, error) {
|
||||||
s.headLock.RLock()
|
s.headLock.RLock()
|
||||||
defer s.headLock.RUnlock()
|
defer s.headLock.RUnlock()
|
||||||
|
|
||||||
if !s.hasHeadState() {
|
if !s.hasHeadState() {
|
||||||
return []types.ValidatorIndex{}, nil
|
return []primitives.ValidatorIndex{}, nil
|
||||||
}
|
}
|
||||||
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
|
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
|
||||||
}
|
}
|
||||||
@@ -275,6 +298,8 @@ func (s *Service) CurrentFork() *ethpb.Fork {
|
|||||||
|
|
||||||
// IsCanonical returns true if the input block root is part of the canonical chain.
|
// IsCanonical returns true if the input block root is part of the canonical chain.
|
||||||
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
// If the block has not been finalized, check fork choice store to see if the block is canonical
|
||||||
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
|
if s.cfg.ForkChoiceStore.HasNode(blockRoot) {
|
||||||
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
|
return s.cfg.ForkChoiceStore.IsCanonical(blockRoot), nil
|
||||||
@@ -284,14 +309,8 @@ func (s *Service) IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, er
|
|||||||
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot), nil
|
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, blockRoot), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
|
||||||
// Heads roots and heads slots are returned.
|
|
||||||
func (s *Service) ChainHeads() ([][32]byte, []types.Slot) {
|
|
||||||
return s.cfg.ForkChoiceStore.Tips()
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeadPublicKeyToValidatorIndex returns the validator index of the `pubkey` in current head state.
|
// HeadPublicKeyToValidatorIndex returns the validator index of the `pubkey` in current head state.
|
||||||
func (s *Service) HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool) {
|
func (s *Service) HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
|
||||||
s.headLock.RLock()
|
s.headLock.RLock()
|
||||||
defer s.headLock.RUnlock()
|
defer s.headLock.RUnlock()
|
||||||
if !s.hasHeadState() {
|
if !s.hasHeadState() {
|
||||||
@@ -301,7 +320,7 @@ func (s *Service) HeadPublicKeyToValidatorIndex(pubKey [fieldparams.BLSPubkeyLen
|
|||||||
}
|
}
|
||||||
|
|
||||||
// HeadValidatorIndexToPublicKey returns the pubkey of the validator `index` in current head state.
|
// HeadValidatorIndexToPublicKey returns the pubkey of the validator `index` in current head state.
|
||||||
func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error) {
|
func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index primitives.ValidatorIndex) ([fieldparams.BLSPubkeyLength]byte, error) {
|
||||||
s.headLock.RLock()
|
s.headLock.RLock()
|
||||||
defer s.headLock.RUnlock()
|
defer s.headLock.RUnlock()
|
||||||
if !s.hasHeadState() {
|
if !s.hasHeadState() {
|
||||||
@@ -314,40 +333,95 @@ func (s *Service) HeadValidatorIndexToPublicKey(_ context.Context, index types.V
|
|||||||
return v.PublicKey(), nil
|
return v.PublicKey(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForkChoicer returns the forkchoice interface.
|
|
||||||
func (s *Service) ForkChoicer() forkchoice.ForkChoicer {
|
|
||||||
return s.cfg.ForkChoiceStore
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsOptimistic returns true if the current head is optimistic.
|
// IsOptimistic returns true if the current head is optimistic.
|
||||||
func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
func (s *Service) IsOptimistic(_ context.Context) (bool, error) {
|
||||||
s.headLock.RLock()
|
|
||||||
defer s.headLock.RUnlock()
|
|
||||||
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
|
if slots.ToEpoch(s.CurrentSlot()) < params.BeaconConfig().BellatrixForkEpoch {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
s.headLock.RLock()
|
||||||
|
headRoot := s.head.root
|
||||||
|
s.headLock.RUnlock()
|
||||||
|
|
||||||
return s.IsOptimisticForRoot(ctx, s.head.root)
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
|
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(headRoot)
|
||||||
|
if err == nil {
|
||||||
|
return optimistic, nil
|
||||||
|
}
|
||||||
|
if !errors.Is(err, doublylinkedtree.ErrNilNode) {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
// If fockchoice does not have the headroot, then the node is considered
|
||||||
|
// optimistic
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFinalized returns true if the input root is finalized.
|
||||||
|
// It first checks latest finalized root then checks finalized root index in DB.
|
||||||
|
func (s *Service) IsFinalized(ctx context.Context, root [32]byte) bool {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
|
if s.cfg.ForkChoiceStore.FinalizedCheckpoint().Root == root {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// If node exists in our store, then it is not
|
||||||
|
// finalized.
|
||||||
|
if s.cfg.ForkChoiceStore.HasNode(root) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return s.cfg.BeaconDB.IsFinalizedBlock(ctx, root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InForkchoice returns true if the given root is found in forkchoice
|
||||||
|
// This in particular means that the blockroot is a descendant of the
|
||||||
|
// finalized checkpoint
|
||||||
|
func (s *Service) InForkchoice(root [32]byte) bool {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
|
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsViableForkCheckpoint returns whether the given checkpoint is a checkpoint in any
|
||||||
|
// chain known to forkchoice
|
||||||
|
func (s *Service) IsViableForCheckpoint(cp *forkchoicetypes.Checkpoint) (bool, error) {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
|
return s.cfg.ForkChoiceStore.IsViableForCheckpoint(cp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||||
// and returns true if it is optimistic.
|
// and returns true if it is optimistic.
|
||||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
|
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
|
||||||
|
s.cfg.ForkChoiceStore.RUnlock()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return optimistic, nil
|
return optimistic, nil
|
||||||
}
|
}
|
||||||
if err != protoarray.ErrUnknownNodeRoot && err != doublylinkedtree.ErrNilNode {
|
if !errors.Is(err, doublylinkedtree.ErrNilNode) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
// if the requested root is the headroot and the root is not found in
|
||||||
|
// forkchoice, the node should respond that it is optimistic
|
||||||
|
headRoot, err := s.HeadRoot(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
if bytes.Equal(headRoot, root[:]) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
ss, err := s.cfg.BeaconDB.StateSummary(ctx, root)
|
ss, err := s.cfg.BeaconDB.StateSummary(ctx, root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if ss == nil {
|
|
||||||
return false, errInvalidNilSummary
|
|
||||||
}
|
|
||||||
|
|
||||||
|
if ss == nil {
|
||||||
|
ss, err = s.recoverStateSummary(ctx, root)
|
||||||
|
if err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
}
|
||||||
validatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
validatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@@ -356,8 +430,14 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Historical non-canonical blocks here are returned as optimistic for safety.
|
||||||
|
isCanonical, err := s.IsCanonical(ctx, root)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
if slots.ToEpoch(ss.Slot)+1 < validatedCheckpoint.Epoch {
|
if slots.ToEpoch(ss.Slot)+1 < validatedCheckpoint.Epoch {
|
||||||
return false, nil
|
return !isCanonical, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
|
// Checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
|
||||||
@@ -366,20 +446,51 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if lastValidated == nil {
|
if lastValidated == nil {
|
||||||
return false, errInvalidNilSummary
|
lastValidated, err = s.recoverStateSummary(ctx, root)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ss.Slot > lastValidated.Slot {
|
if ss.Slot > lastValidated.Slot {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
return !isCanonical, nil
|
||||||
|
}
|
||||||
|
|
||||||
isCanonical, err := s.IsCanonical(ctx, root)
|
// Ancestor returns the block root of an ancestry block from the input block root.
|
||||||
|
//
|
||||||
|
// Spec pseudocode definition:
|
||||||
|
//
|
||||||
|
// def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
|
||||||
|
// block = store.blocks[root]
|
||||||
|
// if block.slot > slot:
|
||||||
|
// return get_ancestor(store, block.parent_root, slot)
|
||||||
|
// elif block.slot == slot:
|
||||||
|
// return root
|
||||||
|
// else:
|
||||||
|
// # root is older than queried slot, thus a skip slot. Return most recent root prior to slot
|
||||||
|
// return root
|
||||||
|
func (s *Service) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "blockChain.ancestor")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
r := bytesutil.ToBytes32(root)
|
||||||
|
// Get ancestor root from fork choice store instead of recursively looking up blocks in DB.
|
||||||
|
// This is most optimal outcome.
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
ar, err := s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||||
|
s.cfg.ForkChoiceStore.RUnlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
// Try getting ancestor root from DB when failed to retrieve from fork choice store.
|
||||||
|
// This is the second line of defense for retrieving ancestor root.
|
||||||
|
ar, err = s.ancestorByDB(ctx, r, slot)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Historical non-canonical blocks here are returned as optimistic for safety.
|
return ar[:], nil
|
||||||
return !isCanonical, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetGenesisTime sets the genesis time of beacon chain.
|
// SetGenesisTime sets the genesis time of beacon chain.
|
||||||
@@ -387,7 +498,17 @@ func (s *Service) SetGenesisTime(t time.Time) {
|
|||||||
s.genesisTime = t
|
s.genesisTime = t
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForkChoiceStore returns the fork choice store in the service.
|
func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) {
|
||||||
func (s *Service) ForkChoiceStore() forkchoice.ForkChoicer {
|
if s.cfg.BeaconDB.HasBlock(ctx, blockRoot) {
|
||||||
return s.cfg.ForkChoiceStore
|
b, err := s.cfg.BeaconDB.Block(ctx, blockRoot)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
summary := ðpb.StateSummary{Slot: b.Block().Slot(), Root: blockRoot[:]}
|
||||||
|
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, summary); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return summary, nil
|
||||||
|
}
|
||||||
|
return nil, errBlockDoesNotExist
|
||||||
}
|
}
|
||||||
|
|||||||
94
beacon-chain/blockchain/chain_info_forkchoice.go
Normal file
94
beacon-chain/blockchain/chain_info_forkchoice.go
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
package blockchain
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
|
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CachedHeadRoot returns the corresponding value from Forkchoice
|
||||||
|
func (s *Service) CachedHeadRoot() [32]byte {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
|
return s.cfg.ForkChoiceStore.CachedHeadRoot()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetProposerHead returns the corresponding value from forkchoice
|
||||||
|
func (s *Service) GetProposerHead() [32]byte {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
|
return s.cfg.ForkChoiceStore.GetProposerHead()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetForkChoiceGenesisTime sets the genesis time in Forkchoice
|
||||||
|
func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||||
|
s.cfg.ForkChoiceStore.Lock()
|
||||||
|
defer s.cfg.ForkChoiceStore.Unlock()
|
||||||
|
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HighestReceivedBlockSlot returns the corresponding value from forkchoice
|
||||||
|
func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
|
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
|
||||||
|
func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
|
return s.cfg.ForkChoiceStore.ReceivedBlocksLastEpoch()
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertNode is a wrapper for node insertion which is self locked
|
||||||
|
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||||
|
s.cfg.ForkChoiceStore.Lock()
|
||||||
|
defer s.cfg.ForkChoiceStore.Unlock()
|
||||||
|
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForkChoiceDump returns the corresponding value from forkchoice
|
||||||
|
func (s *Service) ForkChoiceDump(ctx context.Context) (*ethpbv1.ForkChoiceDump, error) {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
|
return s.cfg.ForkChoiceStore.ForkChoiceDump(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSlot returns the corresponding value from forkchoice
|
||||||
|
func (s *Service) NewSlot(ctx context.Context, slot primitives.Slot) error {
|
||||||
|
s.cfg.ForkChoiceStore.Lock()
|
||||||
|
defer s.cfg.ForkChoiceStore.Unlock()
|
||||||
|
return s.cfg.ForkChoiceStore.NewSlot(ctx, slot)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProposerBoost wraps the corresponding method from forkchoice
|
||||||
|
func (s *Service) ProposerBoost() [32]byte {
|
||||||
|
s.cfg.ForkChoiceStore.Lock()
|
||||||
|
defer s.cfg.ForkChoiceStore.Unlock()
|
||||||
|
return s.cfg.ForkChoiceStore.ProposerBoost()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainHeads returns all possible chain heads (leaves of fork choice tree).
|
||||||
|
// Heads roots and heads slots are returned.
|
||||||
|
func (s *Service) ChainHeads() ([][32]byte, []primitives.Slot) {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
|
return s.cfg.ForkChoiceStore.Tips()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnrealizedJustifiedPayloadBlockHash returns unrealized justified payload block hash from forkchoice.
|
||||||
|
func (s *Service) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
|
return s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinalizedBlockHash returns finalized payload block hash from forkchoice.
|
||||||
|
func (s *Service) FinalizedBlockHash() [32]byte {
|
||||||
|
s.cfg.ForkChoiceStore.RLock()
|
||||||
|
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||||
|
return s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||||
|
}
|
||||||
@@ -4,12 +4,14 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
"github.com/prysmaticlabs/prysm/testing/util"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHeadSlot_DataRace(t *testing.T) {
|
func TestHeadSlot_DataRace(t *testing.T) {
|
||||||
@@ -17,7 +19,7 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
|||||||
s := &Service{
|
s := &Service{
|
||||||
cfg: &config{BeaconDB: beaconDB},
|
cfg: &config{BeaconDB: beaconDB},
|
||||||
}
|
}
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
st, _ := util.DeterministicGenesisState(t, 1)
|
st, _ := util.DeterministicGenesisState(t, 1)
|
||||||
wait := make(chan struct{})
|
wait := make(chan struct{})
|
||||||
@@ -32,10 +34,10 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
|||||||
func TestHeadRoot_DataRace(t *testing.T) {
|
func TestHeadRoot_DataRace(t *testing.T) {
|
||||||
beaconDB := testDB.SetupDB(t)
|
beaconDB := testDB.SetupDB(t)
|
||||||
s := &Service{
|
s := &Service{
|
||||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||||
head: &head{root: [32]byte{'A'}},
|
head: &head{root: [32]byte{'A'}},
|
||||||
}
|
}
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wait := make(chan struct{})
|
wait := make(chan struct{})
|
||||||
st, _ := util.DeterministicGenesisState(t, 1)
|
st, _ := util.DeterministicGenesisState(t, 1)
|
||||||
@@ -51,13 +53,13 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
|||||||
|
|
||||||
func TestHeadBlock_DataRace(t *testing.T) {
|
func TestHeadBlock_DataRace(t *testing.T) {
|
||||||
beaconDB := testDB.SetupDB(t)
|
beaconDB := testDB.SetupDB(t)
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(ðpb.SignedBeaconBlock{})
|
wsb, err := blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
s := &Service{
|
s := &Service{
|
||||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||||
head: &head{block: wsb},
|
head: &head{block: wsb},
|
||||||
}
|
}
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wait := make(chan struct{})
|
wait := make(chan struct{})
|
||||||
st, _ := util.DeterministicGenesisState(t, 1)
|
st, _ := util.DeterministicGenesisState(t, 1)
|
||||||
@@ -74,12 +76,15 @@ func TestHeadBlock_DataRace(t *testing.T) {
|
|||||||
func TestHeadState_DataRace(t *testing.T) {
|
func TestHeadState_DataRace(t *testing.T) {
|
||||||
beaconDB := testDB.SetupDB(t)
|
beaconDB := testDB.SetupDB(t)
|
||||||
s := &Service{
|
s := &Service{
|
||||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB, doublylinkedtree.New())},
|
||||||
}
|
}
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wait := make(chan struct{})
|
wait := make(chan struct{})
|
||||||
st, _ := util.DeterministicGenesisState(t, 1)
|
st, _ := util.DeterministicGenesisState(t, 1)
|
||||||
|
root := bytesutil.ToBytes32(bytesutil.PadTo([]byte{'s'}, 32))
|
||||||
|
require.NoError(t, beaconDB.SaveGenesisBlockRoot(context.Background(), root))
|
||||||
|
require.NoError(t, beaconDB.SaveState(context.Background(), st, root))
|
||||||
go func() {
|
go func() {
|
||||||
defer close(wait)
|
defer close(wait)
|
||||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||||
|
|||||||
@@ -5,22 +5,21 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||||
v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
"github.com/prysmaticlabs/prysm/testing/util"
|
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -33,41 +32,33 @@ var _ ForkFetcher = (*Service)(nil)
|
|||||||
// insert into forkchoice
|
// insert into forkchoice
|
||||||
func prepareForkchoiceState(
|
func prepareForkchoiceState(
|
||||||
_ context.Context,
|
_ context.Context,
|
||||||
slot types.Slot,
|
slot primitives.Slot,
|
||||||
blockRoot [32]byte,
|
blockRoot [32]byte,
|
||||||
parentRoot [32]byte,
|
parentRoot [32]byte,
|
||||||
payloadHash [32]byte,
|
payloadHash [32]byte,
|
||||||
justifiedEpoch types.Epoch,
|
justified *ethpb.Checkpoint,
|
||||||
finalizedEpoch types.Epoch,
|
finalized *ethpb.Checkpoint,
|
||||||
) (state.BeaconState, [32]byte, error) {
|
) (state.BeaconState, [32]byte, error) {
|
||||||
blockHeader := ðpb.BeaconBlockHeader{
|
blockHeader := ðpb.BeaconBlockHeader{
|
||||||
ParentRoot: parentRoot[:],
|
ParentRoot: parentRoot[:],
|
||||||
}
|
}
|
||||||
|
|
||||||
executionHeader := ðpb.ExecutionPayloadHeader{
|
executionHeader := &enginev1.ExecutionPayloadHeader{
|
||||||
BlockHash: payloadHash[:],
|
BlockHash: payloadHash[:],
|
||||||
}
|
}
|
||||||
|
|
||||||
justifiedCheckpoint := ðpb.Checkpoint{
|
|
||||||
Epoch: justifiedEpoch,
|
|
||||||
}
|
|
||||||
|
|
||||||
finalizedCheckpoint := ðpb.Checkpoint{
|
|
||||||
Epoch: finalizedEpoch,
|
|
||||||
}
|
|
||||||
|
|
||||||
base := ðpb.BeaconStateBellatrix{
|
base := ðpb.BeaconStateBellatrix{
|
||||||
Slot: slot,
|
Slot: slot,
|
||||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||||
BlockRoots: make([][]byte, 1),
|
BlockRoots: make([][]byte, 1),
|
||||||
CurrentJustifiedCheckpoint: justifiedCheckpoint,
|
CurrentJustifiedCheckpoint: justified,
|
||||||
FinalizedCheckpoint: finalizedCheckpoint,
|
FinalizedCheckpoint: finalized,
|
||||||
LatestExecutionPayloadHeader: executionHeader,
|
LatestExecutionPayloadHeader: executionHeader,
|
||||||
LatestBlockHeader: blockHeader,
|
LatestBlockHeader: blockHeader,
|
||||||
}
|
}
|
||||||
|
|
||||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||||
st, err := v3.InitializeFromProto(base)
|
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||||
return st, blockRoot, err
|
return st, blockRoot, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -79,118 +70,106 @@ func TestHeadRoot_Nil(t *testing.T) {
|
|||||||
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
|
assert.DeepEqual(t, params.BeaconConfig().ZeroHash[:], headRoot, "Incorrect pre chain start value")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestService_ForkChoiceStore(t *testing.T) {
|
|
||||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
|
|
||||||
p := c.ForkChoiceStore()
|
|
||||||
require.Equal(t, 0, int(p.FinalizedEpoch()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFinalizedCheckpt_CanRetrieve(t *testing.T) {
|
|
||||||
beaconDB := testDB.SetupDB(t)
|
|
||||||
|
|
||||||
cp := ðpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
|
||||||
c := setupBeaconChain(t, beaconDB)
|
|
||||||
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
|
|
||||||
|
|
||||||
cp, err := c.FinalizedCheckpt()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, cp.Epoch, cp.Epoch, "Unexpected finalized epoch")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
func TestFinalizedCheckpt_GenesisRootOk(t *testing.T) {
|
||||||
beaconDB := testDB.SetupDB(t)
|
service, tr := minimalTestService(t)
|
||||||
|
ctx, fcs := tr.ctx, tr.fcs
|
||||||
|
|
||||||
genesisRoot := [32]byte{'A'}
|
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||||
c := setupBeaconChain(t, beaconDB)
|
cp := service.FinalizedCheckpt()
|
||||||
c.store.SetFinalizedCheckptAndPayloadHash(cp, [32]byte{'a'})
|
assert.DeepEqual(t, [32]byte{}, bytesutil.ToBytes32(cp.Root))
|
||||||
c.originBlockRoot = genesisRoot
|
cp = service.CurrentJustifiedCheckpt()
|
||||||
cp, err := c.FinalizedCheckpt()
|
assert.DeepEqual(t, [32]byte{}, bytesutil.ToBytes32(cp.Root))
|
||||||
|
// check that forkchoice has the right genesis root as the node root
|
||||||
|
root, err := fcs.Head(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.DeepEqual(t, c.originBlockRoot[:], cp.Root)
|
require.Equal(t, service.originBlockRoot, root)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
func TestCurrentJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
||||||
beaconDB := testDB.SetupDB(t)
|
service, tr := minimalTestService(t)
|
||||||
|
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||||
|
|
||||||
c := setupBeaconChain(t, beaconDB)
|
jroot := [32]byte{'j'}
|
||||||
_, err := c.CurrentJustifiedCheckpt()
|
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: jroot}
|
||||||
require.ErrorIs(t, err, store.ErrNilCheckpoint)
|
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||||
cp := ðpb.Checkpoint{Epoch: 6, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
require.NoError(t, beaconDB.SaveState(ctx, bState, jroot))
|
||||||
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
|
|
||||||
jp, err := c.CurrentJustifiedCheckpt()
|
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, cp))
|
||||||
require.NoError(t, err)
|
jp := service.CurrentJustifiedCheckpt()
|
||||||
assert.Equal(t, cp.Epoch, jp.Epoch, "Unexpected justified epoch")
|
assert.Equal(t, cp.Epoch, jp.Epoch, "Unexpected justified epoch")
|
||||||
|
require.Equal(t, cp.Root, bytesutil.ToBytes32(jp.Root))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
func TestFinalizedBlockHash(t *testing.T) {
|
||||||
beaconDB := testDB.SetupDB(t)
|
service, tr := minimalTestService(t)
|
||||||
|
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||||
|
|
||||||
c := setupBeaconChain(t, beaconDB)
|
r := [32]byte{'f'}
|
||||||
genesisRoot := [32]byte{'B'}
|
cp := &forkchoicetypes.Checkpoint{Epoch: 6, Root: r}
|
||||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||||
c.store.SetJustifiedCheckptAndPayloadHash(cp, [32]byte{})
|
require.NoError(t, beaconDB.SaveState(ctx, bState, r))
|
||||||
c.originBlockRoot = genesisRoot
|
|
||||||
cp, err := c.CurrentJustifiedCheckpt()
|
require.NoError(t, fcs.UpdateFinalizedCheckpoint(cp))
|
||||||
require.NoError(t, err)
|
h := service.FinalizedBlockHash()
|
||||||
assert.DeepEqual(t, c.originBlockRoot[:], cp.Root)
|
require.Equal(t, params.BeaconConfig().ZeroHash, h)
|
||||||
|
require.Equal(t, r, fcs.FinalizedCheckpoint().Root)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPreviousJustifiedCheckpt_CanRetrieve(t *testing.T) {
|
func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||||
beaconDB := testDB.SetupDB(t)
|
ctx := context.Background()
|
||||||
|
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||||
cp := ðpb.Checkpoint{Epoch: 7, Root: bytesutil.PadTo([]byte("foo"), 32)}
|
ojc := ðpb.Checkpoint{Root: []byte{'j'}}
|
||||||
c := setupBeaconChain(t, beaconDB)
|
ofc := ðpb.Checkpoint{Root: []byte{'f'}}
|
||||||
_, err := c.PreviousJustifiedCheckpt()
|
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
require.ErrorIs(t, err, store.ErrNilCheckpoint)
|
|
||||||
c.store.SetPrevJustifiedCheckpt(cp)
|
|
||||||
pcp, err := c.PreviousJustifiedCheckpt()
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, cp.Epoch, pcp.Epoch, "Unexpected previous justified epoch")
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
}
|
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
|
||||||
|
|
||||||
func TestPrevJustifiedCheckpt_GenesisRootOk(t *testing.T) {
|
h := service.UnrealizedJustifiedPayloadBlockHash()
|
||||||
beaconDB := testDB.SetupDB(t)
|
require.Equal(t, params.BeaconConfig().ZeroHash, h)
|
||||||
|
require.Equal(t, [32]byte{'j'}, service.cfg.ForkChoiceStore.JustifiedCheckpoint().Root)
|
||||||
genesisRoot := [32]byte{'C'}
|
|
||||||
cp := ðpb.Checkpoint{Root: genesisRoot[:]}
|
|
||||||
c := setupBeaconChain(t, beaconDB)
|
|
||||||
c.store.SetPrevJustifiedCheckpt(cp)
|
|
||||||
c.originBlockRoot = genesisRoot
|
|
||||||
pcp, err := c.PreviousJustifiedCheckpt()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.DeepEqual(t, c.originBlockRoot[:], pcp.Root)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
func TestHeadSlot_CanRetrieve(t *testing.T) {
|
||||||
c := &Service{}
|
c := &Service{}
|
||||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{})
|
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
c.head = &head{slot: 100, state: s}
|
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
assert.Equal(t, types.Slot(100), c.HeadSlot())
|
require.NoError(t, err)
|
||||||
|
b.SetSlot(100)
|
||||||
|
c.head = &head{block: b, state: s}
|
||||||
|
assert.Equal(t, primitives.Slot(100), c.HeadSlot())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHeadRoot_CanRetrieve(t *testing.T) {
|
func TestHeadRoot_CanRetrieve(t *testing.T) {
|
||||||
c := &Service{}
|
service, tr := minimalTestService(t)
|
||||||
c.head = &head{root: [32]byte{'A'}}
|
ctx := tr.ctx
|
||||||
r, err := c.HeadRoot(context.Background())
|
|
||||||
|
gs, _ := util.DeterministicGenesisState(t, 32)
|
||||||
|
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||||
|
|
||||||
|
r, err := service.HeadRoot(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, [32]byte{'A'}, bytesutil.ToBytes32(r))
|
assert.Equal(t, service.originBlockRoot, bytesutil.ToBytes32(r))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHeadRoot_UseDB(t *testing.T) {
|
func TestHeadRoot_UseDB(t *testing.T) {
|
||||||
beaconDB := testDB.SetupDB(t)
|
service, tr := minimalTestService(t)
|
||||||
c := &Service{cfg: &config{BeaconDB: beaconDB}}
|
ctx, beaconDB := tr.ctx, tr.db
|
||||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
|
||||||
|
service.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||||
b := util.NewBeaconBlock()
|
b := util.NewBeaconBlock()
|
||||||
br, err := b.Block.HashTreeRoot()
|
br, err := b.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:]}))
|
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: br[:]}))
|
||||||
require.NoError(t, beaconDB.SaveHeadBlockRoot(context.Background(), br))
|
require.NoError(t, beaconDB.SaveHeadBlockRoot(ctx, br))
|
||||||
r, err := c.HeadRoot(context.Background())
|
r, err := service.HeadRoot(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, br, bytesutil.ToBytes32(r))
|
assert.Equal(t, br, bytesutil.ToBytes32(r))
|
||||||
}
|
}
|
||||||
@@ -198,26 +177,28 @@ func TestHeadRoot_UseDB(t *testing.T) {
|
|||||||
func TestHeadBlock_CanRetrieve(t *testing.T) {
|
func TestHeadBlock_CanRetrieve(t *testing.T) {
|
||||||
b := util.NewBeaconBlock()
|
b := util.NewBeaconBlock()
|
||||||
b.Block.Slot = 1
|
b.Block.Slot = 1
|
||||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{})
|
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
c := &Service{}
|
c := &Service{}
|
||||||
c.head = &head{block: wsb, state: s}
|
c.head = &head{block: wsb, state: s}
|
||||||
|
|
||||||
recevied, err := c.HeadBlock(context.Background())
|
received, err := c.HeadBlock(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.DeepEqual(t, b, recevied.Proto(), "Incorrect head block received")
|
pb, err := received.Proto()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.DeepEqual(t, b, pb, "Incorrect head block received")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHeadState_CanRetrieve(t *testing.T) {
|
func TestHeadState_CanRetrieve(t *testing.T) {
|
||||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 2, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
c := &Service{}
|
c := &Service{}
|
||||||
c.head = &head{state: s}
|
c.head = &head{state: s}
|
||||||
headState, err := c.HeadState(context.Background())
|
headState, err := c.HeadState(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.DeepEqual(t, headState.InnerStateUnsafe(), s.InnerStateUnsafe(), "Incorrect head state received")
|
assert.DeepEqual(t, headState.ToProtoUnsafe(), s.ToProtoUnsafe(), "Incorrect head state received")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGenesisTime_CanRetrieve(t *testing.T) {
|
func TestGenesisTime_CanRetrieve(t *testing.T) {
|
||||||
@@ -228,7 +209,7 @@ func TestGenesisTime_CanRetrieve(t *testing.T) {
|
|||||||
|
|
||||||
func TestCurrentFork_CanRetrieve(t *testing.T) {
|
func TestCurrentFork_CanRetrieve(t *testing.T) {
|
||||||
f := ðpb.Fork{Epoch: 999}
|
f := ðpb.Fork{Epoch: 999}
|
||||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Fork: f})
|
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Fork: f})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
c := &Service{}
|
c := &Service{}
|
||||||
c.head = &head{state: s}
|
c.head = &head{state: s}
|
||||||
@@ -253,7 +234,7 @@ func TestGenesisValidatorsRoot_CanRetrieve(t *testing.T) {
|
|||||||
c := &Service{}
|
c := &Service{}
|
||||||
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
|
assert.Equal(t, [32]byte{}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
|
||||||
|
|
||||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
|
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{GenesisValidatorsRoot: []byte{'a'}})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
c.head = &head{state: s}
|
c.head = &head{state: s}
|
||||||
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
|
assert.Equal(t, [32]byte{'a'}, c.GenesisValidatorsRoot(), "Did not get correct genesis validators root")
|
||||||
@@ -267,7 +248,7 @@ func TestHeadETH1Data_Nil(t *testing.T) {
|
|||||||
|
|
||||||
func TestHeadETH1Data_CanRetrieve(t *testing.T) {
|
func TestHeadETH1Data_CanRetrieve(t *testing.T) {
|
||||||
d := ðpb.Eth1Data{DepositCount: 999}
|
d := ðpb.Eth1Data{DepositCount: 999}
|
||||||
s, err := v1.InitializeFromProto(ðpb.BeaconState{Eth1Data: d})
|
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Eth1Data: d})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
c := &Service{}
|
c := &Service{}
|
||||||
c.head = &head{state: s}
|
c.head = &head{state: s}
|
||||||
@@ -285,9 +266,7 @@ func TestIsCanonical_Ok(t *testing.T) {
|
|||||||
blk.Block.Slot = 0
|
blk.Block.Slot = 0
|
||||||
root, err := blk.Block.HashTreeRoot()
|
root, err := blk.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
|
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
|
||||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
|
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, root))
|
||||||
can, err := c.IsCanonical(ctx, root)
|
can, err := c.IsCanonical(ctx, root)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -325,52 +304,40 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
|||||||
root = c.HeadGenesisValidatorsRoot()
|
root = c.HeadGenesisValidatorsRoot()
|
||||||
require.DeepEqual(t, root[:], s.GenesisValidatorsRoot())
|
require.DeepEqual(t, root[:], s.GenesisValidatorsRoot())
|
||||||
}
|
}
|
||||||
func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}}
|
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
|
||||||
|
|
||||||
roots, slots := c.ChainHeads()
|
//
|
||||||
require.DeepEqual(t, [][32]byte{{'c'}, {'d'}, {'e'}}, roots)
|
// A <- B <- C
|
||||||
require.DeepEqual(t, []types.Slot{102, 103, 104}, slots)
|
// \ \
|
||||||
}
|
// \ ---------- E
|
||||||
|
// ---------- D
|
||||||
|
|
||||||
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
|
func TestService_ChainHeads(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}}
|
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
st, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
|
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
|
|
||||||
roots, slots := c.ChainHeads()
|
roots, slots := c.ChainHeads()
|
||||||
require.Equal(t, 3, len(roots))
|
require.Equal(t, 3, len(roots))
|
||||||
rootMap := map[[32]byte]types.Slot{[32]byte{'c'}: 102, [32]byte{'d'}: 103, [32]byte{'e'}: 104}
|
rootMap := map[[32]byte]primitives.Slot{{'c'}: 102, {'d'}: 103, {'e'}: 104}
|
||||||
for i, root := range roots {
|
for i, root := range roots {
|
||||||
slot, ok := rootMap[root]
|
slot, ok := rootMap[root]
|
||||||
require.Equal(t, true, ok)
|
require.Equal(t, true, ok)
|
||||||
@@ -391,7 +358,7 @@ func TestService_HeadPublicKeyToValidatorIndex(t *testing.T) {
|
|||||||
|
|
||||||
i, e := c.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(v.PublicKey))
|
i, e := c.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(v.PublicKey))
|
||||||
require.Equal(t, true, e)
|
require.Equal(t, true, e)
|
||||||
require.Equal(t, types.ValidatorIndex(0), i)
|
require.Equal(t, primitives.ValidatorIndex(0), i)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
|
func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
|
||||||
@@ -400,12 +367,12 @@ func TestService_HeadPublicKeyToValidatorIndexNil(t *testing.T) {
|
|||||||
|
|
||||||
idx, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
idx, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
||||||
require.Equal(t, false, e)
|
require.Equal(t, false, e)
|
||||||
require.Equal(t, types.ValidatorIndex(0), idx)
|
require.Equal(t, primitives.ValidatorIndex(0), idx)
|
||||||
|
|
||||||
c.head = &head{state: nil}
|
c.head = &head{state: nil}
|
||||||
i, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
i, e := c.HeadPublicKeyToValidatorIndex([fieldparams.BLSPubkeyLength]byte{})
|
||||||
require.Equal(t, false, e)
|
require.Equal(t, false, e)
|
||||||
require.Equal(t, types.ValidatorIndex(0), i)
|
require.Equal(t, primitives.ValidatorIndex(0), i)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
|
func TestService_HeadValidatorIndexToPublicKey(t *testing.T) {
|
||||||
@@ -436,40 +403,22 @@ func TestService_HeadValidatorIndexToPublicKeyNil(t *testing.T) {
|
|||||||
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
|
require.Equal(t, [fieldparams.BLSPubkeyLength]byte{}, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestService_IsOptimistic_ProtoArray(t *testing.T) {
|
func TestService_IsOptimistic(t *testing.T) {
|
||||||
params.SetupTestConfigCleanup(t)
|
params.SetupTestConfigCleanup(t)
|
||||||
cfg := params.BeaconConfig()
|
cfg := params.BeaconConfig()
|
||||||
cfg.BellatrixForkEpoch = 0
|
cfg.BellatrixForkEpoch = 0
|
||||||
params.OverrideBeaconConfig(cfg)
|
params.OverrideBeaconConfig(cfg)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||||
|
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
|
|
||||||
opt, err := c.IsOptimistic(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, true, opt)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
|
|
||||||
params.SetupTestConfigCleanup(t)
|
|
||||||
cfg := params.BeaconConfig()
|
|
||||||
cfg.BellatrixForkEpoch = 0
|
|
||||||
params.OverrideBeaconConfig(cfg)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
|
||||||
|
|
||||||
opt, err := c.IsOptimistic(ctx)
|
opt, err := c.IsOptimistic(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -484,143 +433,54 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
|||||||
require.Equal(t, false, opt)
|
require.Equal(t, false, opt)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
|
func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
|
|
||||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, true, opt)
|
require.Equal(t, true, opt)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
|
func TestService_IsOptimisticForRoot_DB(t *testing.T) {
|
||||||
ctx := context.Background()
|
|
||||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
|
||||||
|
|
||||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, true, opt)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
|
|
||||||
beaconDB := testDB.SetupDB(t)
|
beaconDB := testDB.SetupDB(t)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||||
b := util.NewBeaconBlock()
|
b := util.NewBeaconBlock()
|
||||||
b.Block.Slot = 10
|
b.Block.Slot = 10
|
||||||
br, err := b.Block.HashTreeRoot()
|
br, err := b.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
|
||||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||||
|
|
||||||
optimisticBlock := util.NewBeaconBlock()
|
optimisticBlock := util.NewBeaconBlock()
|
||||||
optimisticBlock.Block.Slot = 97
|
optimisticBlock.Block.Slot = 97
|
||||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
|
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
|
||||||
|
|
||||||
validatedBlock := util.NewBeaconBlock()
|
validatedBlock := util.NewBeaconBlock()
|
||||||
validatedBlock.Block.Slot = 9
|
validatedBlock.Block.Slot = 9
|
||||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
|
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
|
||||||
|
|
||||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||||
|
|
||||||
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
|
||||||
require.ErrorContains(t, "nil summary returned from the DB", err)
|
|
||||||
|
|
||||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
|
||||||
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
|
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, true, optimistic)
|
require.Equal(t, true, optimistic)
|
||||||
|
|
||||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
|
|
||||||
cp := ðpb.Checkpoint{
|
|
||||||
Epoch: 1,
|
|
||||||
Root: validatedRoot[:],
|
|
||||||
}
|
|
||||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validatedRoot))
|
|
||||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, cp))
|
|
||||||
|
|
||||||
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, false, validated)
|
|
||||||
|
|
||||||
// Before the first finalized epoch, finalized root could be zeros.
|
|
||||||
validatedCheckpoint = ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
|
||||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
|
|
||||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
|
|
||||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
|
||||||
|
|
||||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
|
||||||
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, true, optimistic)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
|
|
||||||
beaconDB := testDB.SetupDB(t)
|
|
||||||
ctx := context.Background()
|
|
||||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
|
||||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
|
||||||
b := util.NewBeaconBlock()
|
|
||||||
b.Block.Slot = 10
|
|
||||||
br, err := b.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
|
||||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
|
||||||
|
|
||||||
optimisticBlock := util.NewBeaconBlock()
|
|
||||||
optimisticBlock.Block.Slot = 97
|
|
||||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
|
||||||
|
|
||||||
validatedBlock := util.NewBeaconBlock()
|
|
||||||
validatedBlock.Block.Slot = 9
|
|
||||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
|
||||||
|
|
||||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
|
||||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
|
||||||
|
|
||||||
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
|
||||||
require.ErrorContains(t, "nil summary returned from the DB", err)
|
|
||||||
|
|
||||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
|
||||||
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, true, optimistic)
|
|
||||||
|
|
||||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
|
|
||||||
cp := ðpb.Checkpoint{
|
cp := ðpb.Checkpoint{
|
||||||
Epoch: 1,
|
Epoch: 1,
|
||||||
Root: validatedRoot[:],
|
Root: validatedRoot[:],
|
||||||
@@ -646,32 +506,26 @@ func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
|
|||||||
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||||
beaconDB := testDB.SetupDB(t)
|
beaconDB := testDB.SetupDB(t)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||||
b := util.NewBeaconBlock()
|
b := util.NewBeaconBlock()
|
||||||
b.Block.Slot = 10
|
b.Block.Slot = 10
|
||||||
br, err := b.Block.HashTreeRoot()
|
br, err := b.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
|
||||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||||
|
|
||||||
optimisticBlock := util.NewBeaconBlock()
|
optimisticBlock := util.NewBeaconBlock()
|
||||||
optimisticBlock.Block.Slot = 97
|
optimisticBlock.Block.Slot = 97
|
||||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
|
util.SaveBlock(t, context.Background(), beaconDB, optimisticBlock)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
|
||||||
|
|
||||||
validatedBlock := util.NewBeaconBlock()
|
validatedBlock := util.NewBeaconBlock()
|
||||||
validatedBlock.Block.Slot = 9
|
validatedBlock.Block.Slot = 9
|
||||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
|
util.SaveBlock(t, context.Background(), beaconDB, validatedBlock)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
|
||||||
|
|
||||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||||
@@ -687,3 +541,44 @@ func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
|||||||
require.Equal(t, true, validated)
|
require.Equal(t, true, validated)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||||
|
beaconDB := testDB.SetupDB(t)
|
||||||
|
ctx := context.Background()
|
||||||
|
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||||
|
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||||
|
b := util.NewBeaconBlock()
|
||||||
|
b.Block.Slot = 10
|
||||||
|
br, err := b.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||||
|
_, err = c.IsOptimisticForRoot(ctx, br)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
summ, err := beaconDB.StateSummary(ctx, br)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, summ)
|
||||||
|
assert.Equal(t, 10, int(summ.Slot))
|
||||||
|
assert.DeepEqual(t, br[:], summ.Root)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestService_IsFinalized(t *testing.T) {
|
||||||
|
beaconDB := testDB.SetupDB(t)
|
||||||
|
ctx := context.Background()
|
||||||
|
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New()}}
|
||||||
|
r1 := [32]byte{'a'}
|
||||||
|
require.NoError(t, c.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||||
|
Root: r1,
|
||||||
|
}))
|
||||||
|
b := util.NewBeaconBlock()
|
||||||
|
br, err := b.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
util.SaveBlock(t, ctx, beaconDB, b)
|
||||||
|
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||||
|
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
|
||||||
|
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{
|
||||||
|
Root: br[:],
|
||||||
|
}))
|
||||||
|
require.Equal(t, true, c.IsFinalized(ctx, r1))
|
||||||
|
require.Equal(t, true, c.IsFinalized(ctx, br))
|
||||||
|
require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'}))
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build !develop
|
//go:build !develop
|
||||||
// +build !develop
|
|
||||||
|
|
||||||
package blockchain
|
package blockchain
|
||||||
|
|
||||||
|
|||||||
@@ -4,31 +4,32 @@ import "github.com/pkg/errors"
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrInvalidPayload is returned when the payload is invalid
|
// ErrInvalidPayload is returned when the payload is invalid
|
||||||
ErrInvalidPayload = errors.New("recevied an INVALID payload from execution engine")
|
ErrInvalidPayload = invalidBlock{error: errors.New("received an INVALID payload from execution engine")}
|
||||||
|
// ErrInvalidBlockHashPayloadStatus is returned when the payload has invalid block hash.
|
||||||
|
ErrInvalidBlockHashPayloadStatus = invalidBlock{error: errors.New("received an INVALID_BLOCK_HASH payload from execution engine")}
|
||||||
// ErrUndefinedExecutionEngineError is returned when the execution engine returns an error that is not defined
|
// ErrUndefinedExecutionEngineError is returned when the execution engine returns an error that is not defined
|
||||||
ErrUndefinedExecutionEngineError = errors.New("received an undefined ee error")
|
ErrUndefinedExecutionEngineError = errors.New("received an undefined execution engine error")
|
||||||
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
|
// errNilFinalizedInStore is returned when a nil finalized checkpt is returned from store.
|
||||||
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
|
errNilFinalizedInStore = errors.New("nil finalized checkpoint returned from store")
|
||||||
// errNilFinalizedCheckpoint is returned when a nil finalized checkpt is returned from a state.
|
// errNilFinalizedCheckpoint is returned when a nil finalized checkpt is returned from a state.
|
||||||
errNilFinalizedCheckpoint = errors.New("nil finalized checkpoint returned from state")
|
errNilFinalizedCheckpoint = errors.New("nil finalized checkpoint returned from state")
|
||||||
// errNilJustifiedCheckpoint is returned when a nil justified checkpt is returned from a state.
|
// errNilJustifiedCheckpoint is returned when a nil justified checkpt is returned from a state.
|
||||||
errNilJustifiedCheckpoint = errors.New("nil finalized checkpoint returned from state")
|
errNilJustifiedCheckpoint = errors.New("nil justified checkpoint returned from state")
|
||||||
// errInvalidNilSummary is returned when a nil summary is returned from the DB.
|
// errBlockDoesNotExist is returned when a block does not exist for a particular state summary.
|
||||||
errInvalidNilSummary = errors.New("nil summary returned from the DB")
|
errBlockDoesNotExist = errors.New("could not find block in DB")
|
||||||
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used
|
// errWrongBlockCount is returned when the wrong number of blocks or block roots is used
|
||||||
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
|
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
|
||||||
// block is not a valid optimistic candidate block
|
|
||||||
errNotOptimisticCandidate = errors.New("block is not suitable for optimistic sync")
|
|
||||||
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
|
// errBlockNotFoundInCacheOrDB is returned when a block is not found in the cache or DB.
|
||||||
errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
|
errBlockNotFoundInCacheOrDB = errors.New("block not found in cache or db")
|
||||||
// errNilStateFromStategen is returned when a nil state is returned from the state generator.
|
|
||||||
errNilStateFromStategen = errors.New("justified state can't be nil")
|
|
||||||
// errWSBlockNotFound is returned when a block is not found in the WS cache or DB.
|
// errWSBlockNotFound is returned when a block is not found in the WS cache or DB.
|
||||||
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
|
errWSBlockNotFound = errors.New("weak subjectivity root not found in db")
|
||||||
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
|
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
|
||||||
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
|
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
|
||||||
// errNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
|
// ErrNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
|
||||||
errNotDescendantOfFinalized = invalidBlock{errors.New("not descendant of finalized checkpoint")}
|
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
|
||||||
|
// ErrNotCheckpoint is returned when a given checkpoint is not a
|
||||||
|
// checkpoint in any chain known to forkchoice
|
||||||
|
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
|
||||||
)
|
)
|
||||||
|
|
||||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||||
@@ -38,17 +39,25 @@ var (
|
|||||||
// The block is deemed invalid according to execution layer client.
|
// The block is deemed invalid according to execution layer client.
|
||||||
// The block violates certain fork choice rules (before finalized slot, not finalized ancestor)
|
// The block violates certain fork choice rules (before finalized slot, not finalized ancestor)
|
||||||
type invalidBlock struct {
|
type invalidBlock struct {
|
||||||
|
invalidAncestorRoots [][32]byte
|
||||||
error
|
error
|
||||||
|
root [32]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
type invalidBlockError interface {
|
type invalidBlockError interface {
|
||||||
Error() string
|
Error() string
|
||||||
InvalidBlock() bool
|
InvalidAncestorRoots() [][32]byte
|
||||||
|
BlockRoot() [32]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// InvalidBlock returns true for `invalidBlock`.
|
// BlockRoot returns the invalid block root.
|
||||||
func (e invalidBlock) InvalidBlock() bool {
|
func (e invalidBlock) BlockRoot() [32]byte {
|
||||||
return true
|
return e.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidAncestorRoots returns an optional list of invalid roots of the invalid block which leads up last valid root.
|
||||||
|
func (e invalidBlock) InvalidAncestorRoots() [][32]byte {
|
||||||
|
return e.invalidAncestorRoots
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsInvalidBlock returns true if the error has `invalidBlock`.
|
// IsInvalidBlock returns true if the error has `invalidBlock`.
|
||||||
@@ -56,9 +65,34 @@ func IsInvalidBlock(e error) bool {
|
|||||||
if e == nil {
|
if e == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
d, ok := e.(invalidBlockError)
|
_, ok := e.(invalidBlockError)
|
||||||
if !ok {
|
if !ok {
|
||||||
return IsInvalidBlock(errors.Unwrap(e))
|
return IsInvalidBlock(errors.Unwrap(e))
|
||||||
}
|
}
|
||||||
return d.InvalidBlock()
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidBlockRoot returns the invalid block root. If the error
|
||||||
|
// doesn't have an invalid blockroot. [32]byte{} is returned.
|
||||||
|
func InvalidBlockRoot(e error) [32]byte {
|
||||||
|
if e == nil {
|
||||||
|
return [32]byte{}
|
||||||
|
}
|
||||||
|
d, ok := e.(invalidBlockError)
|
||||||
|
if !ok {
|
||||||
|
return [32]byte{}
|
||||||
|
}
|
||||||
|
return d.BlockRoot()
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidAncestorRoots returns a list of invalid roots up to last valid root.
|
||||||
|
func InvalidAncestorRoots(e error) [][32]byte {
|
||||||
|
if e == nil {
|
||||||
|
return [][32]byte{}
|
||||||
|
}
|
||||||
|
d, ok := e.(invalidBlockError)
|
||||||
|
if !ok {
|
||||||
|
return [][32]byte{}
|
||||||
|
}
|
||||||
|
return d.InvalidAncestorRoots()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,14 +4,33 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestIsInvalidBlock(t *testing.T) {
|
func TestIsInvalidBlock(t *testing.T) {
|
||||||
require.Equal(t, false, IsInvalidBlock(ErrInvalidPayload))
|
require.Equal(t, true, IsInvalidBlock(ErrInvalidPayload)) // Already wrapped.
|
||||||
err := invalidBlock{ErrInvalidPayload}
|
err := invalidBlock{error: ErrInvalidPayload}
|
||||||
require.Equal(t, true, IsInvalidBlock(err))
|
require.Equal(t, true, IsInvalidBlock(err))
|
||||||
|
|
||||||
newErr := errors.Wrap(err, "wrap me")
|
newErr := errors.Wrap(err, "wrap me")
|
||||||
require.Equal(t, true, IsInvalidBlock(newErr))
|
require.Equal(t, true, IsInvalidBlock(newErr))
|
||||||
|
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInvalidBlockRoot(t *testing.T) {
|
||||||
|
require.Equal(t, [32]byte{}, InvalidBlockRoot(ErrUndefinedExecutionEngineError))
|
||||||
|
require.Equal(t, [32]byte{}, InvalidBlockRoot(ErrInvalidPayload))
|
||||||
|
|
||||||
|
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}}
|
||||||
|
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||||
|
require.DeepEqual(t, [][32]byte(nil), InvalidAncestorRoots(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInvalidRoots(t *testing.T) {
|
||||||
|
roots := [][32]byte{{'d'}, {'b'}, {'c'}}
|
||||||
|
err := invalidBlock{error: ErrInvalidPayload, root: [32]byte{'a'}, invalidAncestorRoots: roots}
|
||||||
|
|
||||||
|
require.Equal(t, true, IsInvalidBlock(err))
|
||||||
|
require.Equal(t, [32]byte{'a'}, InvalidBlockRoot(err))
|
||||||
|
require.DeepEqual(t, roots, InvalidAncestorRoots(err))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,31 +5,34 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
|
||||||
|
|
||||||
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
|
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
|
||||||
type notifyForkchoiceUpdateArg struct {
|
type notifyForkchoiceUpdateArg struct {
|
||||||
headState state.BeaconState
|
headState state.BeaconState
|
||||||
headRoot [32]byte
|
headRoot [32]byte
|
||||||
headBlock interfaces.BeaconBlock
|
headBlock interfaces.ReadOnlyBeaconBlock
|
||||||
}
|
}
|
||||||
|
|
||||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||||
@@ -41,67 +44,79 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
|||||||
|
|
||||||
headBlk := arg.headBlock
|
headBlk := arg.headBlock
|
||||||
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
|
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
|
||||||
return nil, errors.New("nil head block")
|
log.Error("Head block is nil")
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
// Must not call fork choice updated until the transition conditions are met on the Pow network.
|
// Must not call fork choice updated until the transition conditions are met on the Pow network.
|
||||||
isExecutionBlk, err := blocks.IsExecutionBlock(headBlk.Body())
|
isExecutionBlk, err := blocks.IsExecutionBlock(headBlk.Body())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "could not determine if block is execution block")
|
log.WithError(err).Error("Could not determine if head block is execution block")
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
if !isExecutionBlk {
|
if !isExecutionBlk {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
headPayload, err := headBlk.Body().ExecutionPayload()
|
headPayload, err := headBlk.Body().Execution()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "could not get execution payload")
|
log.WithError(err).Error("Could not get execution payload for head block")
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
finalizedHash := s.store.FinalizedPayloadBlockHash()
|
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||||
justifiedHash := s.store.JustifiedPayloadBlockHash()
|
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
|
||||||
fcs := &enginev1.ForkchoiceState{
|
fcs := &enginev1.ForkchoiceState{
|
||||||
HeadBlockHash: headPayload.BlockHash,
|
HeadBlockHash: headPayload.BlockHash(),
|
||||||
SafeBlockHash: justifiedHash[:],
|
SafeBlockHash: justifiedHash[:],
|
||||||
FinalizedBlockHash: finalizedHash[:],
|
FinalizedBlockHash: finalizedHash[:],
|
||||||
}
|
}
|
||||||
|
|
||||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||||
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, arg.headState, nextSlot)
|
hasAttr, attr, proposerId := s.getPayloadAttribute(ctx, arg.headState, nextSlot, arg.headRoot[:])
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "could not get payload attribute")
|
|
||||||
}
|
|
||||||
|
|
||||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
|
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch err {
|
switch err {
|
||||||
case powchain.ErrAcceptedSyncingPayloadStatus:
|
case execution.ErrAcceptedSyncingPayloadStatus:
|
||||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"headSlot": headBlk.Slot(),
|
"headSlot": headBlk.Slot(),
|
||||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash())),
|
||||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||||
}).Info("Called fork choice updated with optimistic block")
|
}).Info("Called fork choice updated with optimistic block")
|
||||||
return payloadID, s.optimisticCandidateBlock(ctx, headBlk)
|
return payloadID, nil
|
||||||
case powchain.ErrInvalidPayloadStatus:
|
case execution.ErrInvalidPayloadStatus:
|
||||||
newPayloadInvalidNodeCount.Inc()
|
forkchoiceUpdatedInvalidNodeCount.Inc()
|
||||||
headRoot := arg.headRoot
|
headRoot := arg.headRoot
|
||||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, headRoot, bytesutil.ToBytes32(headBlk.ParentRoot()), bytesutil.ToBytes32(lastValidHash))
|
if len(lastValidHash) == 0 {
|
||||||
|
lastValidHash = defaultLatestValidHash
|
||||||
|
}
|
||||||
|
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
log.WithError(err).Error("Could not set head root to invalid")
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||||
return nil, err
|
log.WithError(err).Error("Could not remove invalid block and state")
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := s.updateHead(ctx, s.justifiedBalances.balances)
|
r, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
log.WithFields(logrus.Fields{
|
||||||
|
"slot": headBlk.Slot(),
|
||||||
|
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
|
||||||
|
"invalidChildrenCount": len(invalidRoots),
|
||||||
|
}).Warn("Pruned invalid blocks, could not update head root")
|
||||||
|
return nil, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
|
||||||
}
|
}
|
||||||
b, err := s.getBlock(ctx, r)
|
b, err := s.getBlock(ctx, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
log.WithError(err).Error("Could not get head block")
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
|
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
log.WithError(err).Error("Could not get head state")
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
pid, err := s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
pid, err := s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||||
headState: st,
|
headState: st,
|
||||||
@@ -109,28 +124,41 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
|||||||
headBlock: b.Block(),
|
headBlock: b.Block(),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err // Returning err because it's recursive here.
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.saveHead(ctx, r, b, st); err != nil {
|
||||||
|
log.WithError(err).Error("could not save head after pruning invalid blocks")
|
||||||
}
|
}
|
||||||
|
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"slot": headBlk.Slot(),
|
"slot": headBlk.Slot(),
|
||||||
"blockRoot": fmt.Sprintf("%#x", headRoot),
|
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
|
||||||
"invalidCount": len(invalidRoots),
|
"invalidChildrenCount": len(invalidRoots),
|
||||||
|
"newHeadRoot": fmt.Sprintf("%#x", bytesutil.Trunc(r[:])),
|
||||||
}).Warn("Pruned invalid blocks")
|
}).Warn("Pruned invalid blocks")
|
||||||
return pid, ErrInvalidPayload
|
return pid, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
forkchoiceUpdatedValidNodeCount.Inc()
|
forkchoiceUpdatedValidNodeCount.Inc()
|
||||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, arg.headRoot); err != nil {
|
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, arg.headRoot); err != nil {
|
||||||
return nil, errors.Wrap(err, "could not set block to valid")
|
log.WithError(err).Error("Could not set head root to valid")
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
if hasAttr { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
// If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||||
|
if hasAttr && payloadID != nil {
|
||||||
var pId [8]byte
|
var pId [8]byte
|
||||||
copy(pId[:], payloadID[:])
|
copy(pId[:], payloadID[:])
|
||||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId)
|
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId, arg.headRoot)
|
||||||
|
} else if hasAttr && payloadID == nil {
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||||
|
"slot": headBlk.Slot(),
|
||||||
|
}).Error("Received nil payload ID on VALID engine response")
|
||||||
}
|
}
|
||||||
return payloadID, nil
|
return payloadID, nil
|
||||||
}
|
}
|
||||||
@@ -145,17 +173,17 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
|||||||
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
|
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
|
||||||
return params.BeaconConfig().ZeroHash, nil
|
return params.BeaconConfig().ZeroHash, nil
|
||||||
}
|
}
|
||||||
payload, err := blk.Block().Body().ExecutionPayload()
|
payload, err := blk.Block().Body().Execution()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
|
return [32]byte{}, errors.Wrap(err, "could not get execution payload")
|
||||||
}
|
}
|
||||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
return bytesutil.ToBytes32(payload.BlockHash()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// notifyForkchoiceUpdate signals execution engine on a new payload.
|
// notifyNewPayload signals execution engine on a new payload.
|
||||||
// It returns true if the EL has returned VALID for the block
|
// It returns true if the EL has returned VALID for the block
|
||||||
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
||||||
postStateHeader *ethpb.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) (bool, error) {
|
postStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
@@ -164,40 +192,40 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
|||||||
if blocks.IsPreBellatrixVersion(postStateVersion) {
|
if blocks.IsPreBellatrixVersion(postStateVersion) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
if err := wrapper.BeaconBlockIsNil(blk); err != nil {
|
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
body := blk.Block().Body()
|
body := blk.Block().Body()
|
||||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
|
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(invalidBlock{err}, "could not determine if execution is enabled")
|
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
|
||||||
}
|
}
|
||||||
if !enabled {
|
if !enabled {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
payload, err := body.ExecutionPayload()
|
payload, err := body.Execution()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(invalidBlock{err}, "could not get execution payload")
|
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
|
||||||
}
|
}
|
||||||
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
newPayloadValidNodeCount.Inc()
|
newPayloadValidNodeCount.Inc()
|
||||||
return true, nil
|
return true, nil
|
||||||
case powchain.ErrAcceptedSyncingPayloadStatus:
|
case execution.ErrAcceptedSyncingPayloadStatus:
|
||||||
newPayloadOptimisticNodeCount.Inc()
|
newPayloadOptimisticNodeCount.Inc()
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"slot": blk.Block().Slot(),
|
"slot": blk.Block().Slot(),
|
||||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||||
}).Info("Called new payload with optimistic block")
|
}).Info("Called new payload with optimistic block")
|
||||||
return false, s.optimisticCandidateBlock(ctx, blk.Block())
|
return false, nil
|
||||||
case powchain.ErrInvalidPayloadStatus:
|
case execution.ErrInvalidPayloadStatus:
|
||||||
newPayloadInvalidNodeCount.Inc()
|
newPayloadInvalidNodeCount.Inc()
|
||||||
root, err := blk.Block().HashTreeRoot()
|
root, err := blk.Block().HashTreeRoot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, root, bytesutil.ToBytes32(blk.Block().ParentRoot()), bytesutil.ToBytes32(lastValidHash))
|
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, root, blk.Block().ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -205,64 +233,46 @@ func (s *Service) notifyNewPayload(ctx context.Context, postStateVersion int,
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"slot": blk.Block().Slot(),
|
"slot": blk.Block().Slot(),
|
||||||
"blockRoot": fmt.Sprintf("%#x", root),
|
"blockRoot": fmt.Sprintf("%#x", root),
|
||||||
"invalidCount": len(invalidRoots),
|
"invalidChildrenCount": len(invalidRoots),
|
||||||
}).Warn("Pruned invalid blocks")
|
}).Warn("Pruned invalid blocks")
|
||||||
return false, invalidBlock{ErrInvalidPayload}
|
return false, invalidBlock{
|
||||||
|
invalidAncestorRoots: invalidRoots,
|
||||||
|
error: ErrInvalidPayload,
|
||||||
|
}
|
||||||
|
case execution.ErrInvalidBlockHashPayloadStatus:
|
||||||
|
newPayloadInvalidNodeCount.Inc()
|
||||||
|
return false, ErrInvalidBlockHashPayloadStatus
|
||||||
default:
|
default:
|
||||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// optimisticCandidateBlock returns an error if this block can't be optimistically synced.
|
|
||||||
// It replaces boolean in spec code with `errNotOptimisticCandidate`.
|
|
||||||
//
|
|
||||||
// Spec pseudocode definition:
|
|
||||||
// def is_optimistic_candidate_block(opt_store: OptimisticStore, current_slot: Slot, block: BeaconBlock) -> bool:
|
|
||||||
// if is_execution_block(opt_store.blocks[block.parent_root]):
|
|
||||||
// return True
|
|
||||||
//
|
|
||||||
// if block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot:
|
|
||||||
// return True
|
|
||||||
//
|
|
||||||
// return False
|
|
||||||
func (s *Service) optimisticCandidateBlock(ctx context.Context, blk interfaces.BeaconBlock) error {
|
|
||||||
if blk.Slot()+params.BeaconConfig().SafeSlotsToImportOptimistically <= s.CurrentSlot() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
parent, err := s.getBlock(ctx, bytesutil.ToBytes32(blk.ParentRoot()))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
parentIsExecutionBlock, err := blocks.IsExecutionBlock(parent.Block().Body())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if parentIsExecutionBlock {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return errNotOptimisticCandidate
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPayloadAttributes returns the payload attributes for the given state and slot.
|
// getPayloadAttributes returns the payload attributes for the given state and slot.
|
||||||
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
|
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
|
||||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, *enginev1.PayloadAttributes, types.ValidatorIndex, error) {
|
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) (bool, payloadattribute.Attributer, primitives.ValidatorIndex) {
|
||||||
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot)
|
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||||
if !ok { // There's no need to build attribute if there is no proposer for slot.
|
// Root is `[32]byte{}` since we are retrieving proposer ID of a given slot. During insertion at assignment the root was not known.
|
||||||
return false, nil, 0, nil
|
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
|
||||||
|
if !ok && !features.Get().PrepareAllPayloads { // There's no need to build attribute if there is no proposer for slot.
|
||||||
|
return false, emptyAttri, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get previous randao.
|
// Get previous randao.
|
||||||
st = st.Copy()
|
st = st.Copy()
|
||||||
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
|
if slot > st.Slot() {
|
||||||
if err != nil {
|
var err error
|
||||||
return false, nil, 0, err
|
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("Could not process slots to get payload attribute")
|
||||||
|
return false, emptyAttri, 0
|
||||||
|
}
|
||||||
}
|
}
|
||||||
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, 0, nil
|
log.WithError(err).Error("Could not get randao mix to get payload attribute")
|
||||||
|
return false, emptyAttri, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get fee recipient.
|
// Get fee recipient.
|
||||||
@@ -270,17 +280,18 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
|||||||
recipient, err := s.cfg.BeaconDB.FeeRecipientByValidatorID(ctx, proposerID)
|
recipient, err := s.cfg.BeaconDB.FeeRecipientByValidatorID(ctx, proposerID)
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
||||||
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
|
if feeRecipient.String() == params.BeaconConfig().EthBurnAddressHex {
|
||||||
logrus.WithFields(logrus.Fields{
|
logrus.WithFields(logrus.Fields{
|
||||||
"validatorIndex": proposerID,
|
"validatorIndex": proposerID,
|
||||||
"burnAddress": fieldparams.EthBurnAddressHex,
|
"burnAddress": params.BeaconConfig().EthBurnAddressHex,
|
||||||
}).Warn("Fee recipient is currently using the burn address, " +
|
}).Warn("Fee recipient is currently using the burn address, " +
|
||||||
"you will not be rewarded transaction fees on this setting. " +
|
"you will not be rewarded transaction fees on this setting. " +
|
||||||
"Please set a different eth address as the fee recipient. " +
|
"Please set a different eth address as the fee recipient. " +
|
||||||
"Please refer to our documentation for instructions")
|
"Please refer to our documentation for instructions")
|
||||||
}
|
}
|
||||||
case err != nil:
|
case err != nil:
|
||||||
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")
|
log.WithError(err).Error("Could not get fee recipient to get payload attribute")
|
||||||
|
return false, emptyAttri, 0
|
||||||
default:
|
default:
|
||||||
feeRecipient = recipient
|
feeRecipient = recipient
|
||||||
}
|
}
|
||||||
@@ -288,14 +299,44 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
|||||||
// Get timestamp.
|
// Get timestamp.
|
||||||
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, 0, err
|
log.WithError(err).Error("Could not get timestamp to get payload attribute")
|
||||||
|
return false, emptyAttri, 0
|
||||||
}
|
}
|
||||||
attr := &enginev1.PayloadAttributes{
|
|
||||||
Timestamp: uint64(t.Unix()),
|
var attr payloadattribute.Attributer
|
||||||
PrevRandao: prevRando,
|
switch st.Version() {
|
||||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
case version.Capella:
|
||||||
|
withdrawals, err := st.ExpectedWithdrawals()
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||||
|
return false, emptyAttri, 0
|
||||||
|
}
|
||||||
|
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
|
||||||
|
Timestamp: uint64(t.Unix()),
|
||||||
|
PrevRandao: prevRando,
|
||||||
|
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||||
|
Withdrawals: withdrawals,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("Could not get payload attribute")
|
||||||
|
return false, emptyAttri, 0
|
||||||
|
}
|
||||||
|
case version.Bellatrix:
|
||||||
|
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
|
||||||
|
Timestamp: uint64(t.Unix()),
|
||||||
|
PrevRandao: prevRando,
|
||||||
|
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("Could not get payload attribute")
|
||||||
|
return false, emptyAttri, 0
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
log.WithField("version", st.Version()).Error("Could not get payload attribute due to unknown state version")
|
||||||
|
return false, emptyAttri, 0
|
||||||
}
|
}
|
||||||
return true, attr, proposerID, nil
|
|
||||||
|
return true, attr, proposerID
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.
|
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
134
beacon-chain/blockchain/forkchoice_update_execution.go
Normal file
134
beacon-chain/blockchain/forkchoice_update_execution.go
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
package blockchain
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"go.opencensus.io/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Service) isNewProposer(slot primitives.Slot) bool {
|
||||||
|
_, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot, [32]byte{} /* root */)
|
||||||
|
return ok || features.Get().PrepareAllPayloads
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) isNewHead(r [32]byte) bool {
|
||||||
|
s.headLock.RLock()
|
||||||
|
defer s.headLock.RUnlock()
|
||||||
|
|
||||||
|
currentHeadRoot := s.originBlockRoot
|
||||||
|
if s.head != nil {
|
||||||
|
currentHeadRoot = s.headRoot()
|
||||||
|
}
|
||||||
|
|
||||||
|
return r != currentHeadRoot || r == [32]byte{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.BeaconState, interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||||
|
if !s.hasBlockInInitSyncOrDB(ctx, r) {
|
||||||
|
return nil, nil, errors.New("block does not exist")
|
||||||
|
}
|
||||||
|
newHeadBlock, err := s.getBlock(ctx, r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
headState, err := s.cfg.StateGen.StateByRoot(ctx, r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return headState, newHeadBlock, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||||
|
// it returns true if the new head is updated
|
||||||
|
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) (bool, error) {
|
||||||
|
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
|
||||||
|
defer span.End()
|
||||||
|
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
|
||||||
|
ctx = trace.NewContext(s.ctx, span)
|
||||||
|
|
||||||
|
isNewHead := s.isNewHead(newHeadRoot)
|
||||||
|
if !isNewHead {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
isNewProposer := s.isNewProposer(proposingSlot)
|
||||||
|
if isNewProposer && !features.Get().DisableReorgLateBlocks {
|
||||||
|
if s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||||
|
headState: headState,
|
||||||
|
headRoot: newHeadRoot,
|
||||||
|
headBlock: headBlock.Block(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return false, errors.Wrap(err, "could not notify forkchoice update")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
|
||||||
|
log.WithError(err).Error("could not save head")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only need to prune attestations from pool if the head has changed.
|
||||||
|
if err := s.pruneAttsFromPool(headBlock); err != nil {
|
||||||
|
log.WithError(err).Error("could not prune attestations from pool")
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||||
|
// reorged or not by the next proposer.
|
||||||
|
func (s *Service) shouldOverrideFCU(newHeadRoot [32]byte, proposingSlot primitives.Slot) bool {
|
||||||
|
headWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||||
|
}
|
||||||
|
currentSlot := s.CurrentSlot()
|
||||||
|
if proposingSlot == currentSlot {
|
||||||
|
proposerHead := s.cfg.ForkChoiceStore.GetProposerHead()
|
||||||
|
if proposerHead != newHeadRoot {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||||
|
"weight": headWeight,
|
||||||
|
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
|
||||||
|
params.BeaconConfig().SecondsPerSlot)
|
||||||
|
lateBlockFailedAttemptSecondThreshold.Inc()
|
||||||
|
} else {
|
||||||
|
if s.cfg.ForkChoiceStore.ShouldOverrideFCU() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
secs, err := slots.SecondsSinceSlotStart(currentSlot,
|
||||||
|
uint64(s.genesisTime.Unix()), uint64(time.Now().Unix()))
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("could not compute seconds since slot start")
|
||||||
|
}
|
||||||
|
if secs >= doublylinkedtree.ProcessAttestationsThreshold {
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"root": fmt.Sprintf("%#x", newHeadRoot),
|
||||||
|
"weight": headWeight,
|
||||||
|
}).Infof("Attempted late block reorg aborted due to attestations at %d seconds",
|
||||||
|
doublylinkedtree.ProcessAttestationsThreshold)
|
||||||
|
lateBlockFailedAttemptFirstThreshold.Inc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
224
beacon-chain/blockchain/forkchoice_update_execution_test.go
Normal file
224
beacon-chain/blockchain/forkchoice_update_execution_test.go
Normal file
@@ -0,0 +1,224 @@
|
|||||||
|
package blockchain
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||||
|
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||||
|
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
|
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestService_isNewProposer(t *testing.T) {
|
||||||
|
beaconDB := testDB.SetupDB(t)
|
||||||
|
service := setupBeaconChain(t, beaconDB)
|
||||||
|
require.Equal(t, false, service.isNewProposer(service.CurrentSlot()+1))
|
||||||
|
|
||||||
|
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
|
||||||
|
require.Equal(t, true, service.isNewProposer(service.CurrentSlot()+1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestService_isNewHead(t *testing.T) {
|
||||||
|
beaconDB := testDB.SetupDB(t)
|
||||||
|
service := setupBeaconChain(t, beaconDB)
|
||||||
|
require.Equal(t, true, service.isNewHead([32]byte{}))
|
||||||
|
|
||||||
|
service.head = &head{root: [32]byte{1}}
|
||||||
|
require.Equal(t, true, service.isNewHead([32]byte{2}))
|
||||||
|
require.Equal(t, false, service.isNewHead([32]byte{1}))
|
||||||
|
|
||||||
|
// Nil head should use origin root
|
||||||
|
service.head = nil
|
||||||
|
service.originBlockRoot = [32]byte{3}
|
||||||
|
require.Equal(t, true, service.isNewHead([32]byte{2}))
|
||||||
|
require.Equal(t, false, service.isNewHead([32]byte{3}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestService_getHeadStateAndBlock(t *testing.T) {
|
||||||
|
beaconDB := testDB.SetupDB(t)
|
||||||
|
service := setupBeaconChain(t, beaconDB)
|
||||||
|
_, _, err := service.getStateAndBlock(context.Background(), [32]byte{})
|
||||||
|
require.ErrorContains(t, "block does not exist", err)
|
||||||
|
|
||||||
|
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{Signature: []byte{1}}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), blk))
|
||||||
|
|
||||||
|
st, _ := util.DeterministicGenesisState(t, 1)
|
||||||
|
r, err := blk.Block().HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), st, r))
|
||||||
|
|
||||||
|
gotState, err := service.cfg.BeaconDB.State(context.Background(), r)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.DeepEqual(t, st.ToProto(), gotState.ToProto())
|
||||||
|
|
||||||
|
gotBlk, err := service.cfg.BeaconDB.Block(context.Background(), r)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.DeepEqual(t, blk, gotBlk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||||
|
hook := logTest.NewGlobal()
|
||||||
|
ctx := context.Background()
|
||||||
|
opts := testServiceOptsWithDB(t)
|
||||||
|
|
||||||
|
service, err := NewService(ctx, opts...)
|
||||||
|
require.NoError(t, err)
|
||||||
|
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||||
|
_, err = service.forkchoiceUpdateWithExecution(ctx, service.headRoot(), service.CurrentSlot()+1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
hookErr := "could not notify forkchoice update"
|
||||||
|
invalidStateErr := "could not get state summary: could not find block in DB"
|
||||||
|
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||||
|
require.LogsDoNotContain(t, hook, hookErr)
|
||||||
|
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
|
||||||
|
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}, service.CurrentSlot()+1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.LogsContain(t, hook, invalidStateErr)
|
||||||
|
|
||||||
|
hook.Reset()
|
||||||
|
service.head = &head{
|
||||||
|
root: [32]byte{'a'},
|
||||||
|
block: nil, /* should not panic if notify head uses correct head */
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block in Cache
|
||||||
|
b := util.NewBeaconBlock()
|
||||||
|
b.Block.Slot = 2
|
||||||
|
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
r1, err := b.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.saveInitSyncBlock(ctx, r1, wsb))
|
||||||
|
st, _ := util.DeterministicGenesisState(t, 1)
|
||||||
|
service.head = &head{
|
||||||
|
root: r1,
|
||||||
|
block: wsb,
|
||||||
|
state: st,
|
||||||
|
}
|
||||||
|
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
|
||||||
|
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||||
|
require.LogsDoNotContain(t, hook, hookErr)
|
||||||
|
|
||||||
|
// Block in DB
|
||||||
|
b = util.NewBeaconBlock()
|
||||||
|
b.Block.Slot = 3
|
||||||
|
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
|
||||||
|
r1, err = b.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
st, _ = util.DeterministicGenesisState(t, 1)
|
||||||
|
service.head = &head{
|
||||||
|
root: r1,
|
||||||
|
block: wsb,
|
||||||
|
state: st,
|
||||||
|
}
|
||||||
|
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1}, [32]byte{2})
|
||||||
|
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()+1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.LogsDoNotContain(t, hook, invalidStateErr)
|
||||||
|
require.LogsDoNotContain(t, hook, hookErr)
|
||||||
|
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2, [32]byte{2})
|
||||||
|
require.Equal(t, true, has)
|
||||||
|
require.Equal(t, primitives.ValidatorIndex(1), vId)
|
||||||
|
require.Equal(t, [8]byte{1}, payloadID)
|
||||||
|
|
||||||
|
// Test zero headRoot returns immediately.
|
||||||
|
headRoot := service.headRoot()
|
||||||
|
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{}, service.CurrentSlot()+1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, service.headRoot(), headRoot)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testing.T) {
|
||||||
|
service, tr := minimalTestService(t)
|
||||||
|
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||||
|
|
||||||
|
altairBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockAltair())
|
||||||
|
altairBlkRoot, err := altairBlk.Block().HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
|
||||||
|
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
st, _ := util.DeterministicGenesisState(t, 10)
|
||||||
|
service.head = &head{
|
||||||
|
state: st,
|
||||||
|
}
|
||||||
|
|
||||||
|
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||||
|
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||||
|
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||||
|
|
||||||
|
service.cfg.ExecutionEngineCaller = &mockExecution.EngineClient{}
|
||||||
|
require.NoError(t, beaconDB.SaveState(ctx, st, bellatrixBlkRoot))
|
||||||
|
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
|
||||||
|
sb, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(ðpb.SignedBeaconBlockBellatrix{}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, beaconDB.SaveBlock(ctx, sb))
|
||||||
|
r, err := sb.Block().HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Set head to be the same but proposing next slot
|
||||||
|
service.head.root = r
|
||||||
|
service.head.block = sb
|
||||||
|
service.head.state = st
|
||||||
|
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(service.CurrentSlot()+1, 0, [8]byte{}, [32]byte{} /* root */)
|
||||||
|
_, err = service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestShouldOverrideFCU(t *testing.T) {
|
||||||
|
hook := logTest.NewGlobal()
|
||||||
|
service, tr := minimalTestService(t)
|
||||||
|
ctx, fcs := tr.ctx, tr.fcs
|
||||||
|
|
||||||
|
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||||
|
headRoot := [32]byte{'b'}
|
||||||
|
parentRoot := [32]byte{'a'}
|
||||||
|
ojc := ðpb.Checkpoint{}
|
||||||
|
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{}, ojc, ojc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||||
|
st, root, err = prepareForkchoiceState(ctx, 2, headRoot, parentRoot, [32]byte{}, ojc, ojc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||||
|
|
||||||
|
require.Equal(t, primitives.Slot(2), service.CurrentSlot())
|
||||||
|
require.Equal(t, true, service.shouldOverrideFCU(headRoot, 2))
|
||||||
|
require.LogsDoNotContain(t, hook, "12 seconds")
|
||||||
|
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 2))
|
||||||
|
require.LogsContain(t, hook, "12 seconds")
|
||||||
|
|
||||||
|
head, err := fcs.Head(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, headRoot, head)
|
||||||
|
|
||||||
|
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 29)
|
||||||
|
require.Equal(t, true, service.shouldOverrideFCU(parentRoot, 3))
|
||||||
|
require.LogsDoNotContain(t, hook, "10 seconds")
|
||||||
|
fcs.SetGenesisTime(uint64(time.Now().Unix()) - 24)
|
||||||
|
service.SetGenesisTime(time.Now().Add(-time.Duration(2*params.BeaconConfig().SecondsPerSlot+10) * time.Second))
|
||||||
|
require.Equal(t, false, service.shouldOverrideFCU(parentRoot, 3))
|
||||||
|
require.LogsContain(t, hook, "10 seconds")
|
||||||
|
}
|
||||||
@@ -6,22 +6,21 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
"github.com/prysmaticlabs/prysm/config/features"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/math"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
)
|
)
|
||||||
@@ -29,17 +28,9 @@ import (
|
|||||||
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
|
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
|
||||||
// This function is only used in spec-tests, it does save the head after updating it.
|
// This function is only used in spec-tests, it does save the head after updating it.
|
||||||
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||||
jp, err := s.store.JustifiedCheckpt()
|
s.cfg.ForkChoiceStore.Lock()
|
||||||
if err != nil {
|
defer s.cfg.ForkChoiceStore.Unlock()
|
||||||
return err
|
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||||
}
|
|
||||||
|
|
||||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(jp.Root))
|
|
||||||
if err != nil {
|
|
||||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", jp.Root)
|
|
||||||
return errors.Wrap(err, msg)
|
|
||||||
}
|
|
||||||
headRoot, err := s.updateHead(ctx, balances)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "could not update head")
|
return errors.Wrap(err, "could not update head")
|
||||||
}
|
}
|
||||||
@@ -56,72 +47,24 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
|||||||
|
|
||||||
// This defines the current chain service's view of head.
|
// This defines the current chain service's view of head.
|
||||||
type head struct {
|
type head struct {
|
||||||
slot types.Slot // current head slot.
|
root [32]byte // current head root.
|
||||||
root [32]byte // current head root.
|
block interfaces.ReadOnlySignedBeaconBlock // current head block.
|
||||||
block interfaces.SignedBeaconBlock // current head block.
|
state state.BeaconState // current head state.
|
||||||
state state.BeaconState // current head state.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determined the head from the fork choice service and saves its new data
|
|
||||||
// (head root, head block, and head state) to the local service cache.
|
|
||||||
func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte, error) {
|
|
||||||
ctx, span := trace.StartSpan(ctx, "blockChain.updateHead")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
// Get head from the fork choice service.
|
|
||||||
f, err := s.store.FinalizedCheckpt()
|
|
||||||
if err != nil {
|
|
||||||
return [32]byte{}, errors.Wrap(err, "could not get finalized checkpoint")
|
|
||||||
}
|
|
||||||
j, err := s.store.JustifiedCheckpt()
|
|
||||||
if err != nil {
|
|
||||||
return [32]byte{}, errors.Wrap(err, "could not get justified checkpoint")
|
|
||||||
}
|
|
||||||
// To get head before the first justified epoch, the fork choice will start with origin root
|
|
||||||
// instead of zero hashes.
|
|
||||||
headStartRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(j.Root))
|
|
||||||
|
|
||||||
// In order to process head, fork choice store requires justified info.
|
|
||||||
// If the fork choice store is missing justified block info, a node should
|
|
||||||
// re-initiate fork choice store using the latest justified info.
|
|
||||||
// This recovers a fatal condition and should not happen in run time.
|
|
||||||
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
|
|
||||||
jb, err := s.getBlock(ctx, headStartRoot)
|
|
||||||
if err != nil {
|
|
||||||
return [32]byte{}, err
|
|
||||||
}
|
|
||||||
st, err := s.cfg.StateGen.StateByRoot(ctx, s.ensureRootNotZeros(headStartRoot))
|
|
||||||
if err != nil {
|
|
||||||
return [32]byte{}, err
|
|
||||||
}
|
|
||||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
|
||||||
s.cfg.ForkChoiceStore = doublylinkedtree.New(j.Epoch, f.Epoch)
|
|
||||||
} else {
|
|
||||||
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch)
|
|
||||||
}
|
|
||||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, st, f, j); err != nil {
|
|
||||||
return [32]byte{}, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.cfg.ForkChoiceStore.Head(ctx, headStartRoot, balances)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This saves head info to the local service cache, it also saves the
|
// This saves head info to the local service cache, it also saves the
|
||||||
// new head root to the DB.
|
// new head root to the DB.
|
||||||
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.SignedBeaconBlock, headState state.BeaconState) error {
|
// Caller of the method MUST acquire a lock on forkchoice.
|
||||||
|
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.ReadOnlySignedBeaconBlock, headState state.BeaconState) error {
|
||||||
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
|
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
// Do nothing if head hasn't changed.
|
// Do nothing if head hasn't changed.
|
||||||
oldHeadroot, err := s.HeadRoot(ctx)
|
if !s.isNewHead(newHeadRoot) {
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if newHeadRoot == bytesutil.ToBytes32(oldHeadroot) {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err := wrapper.BeaconBlockIsNil(headBlock); err != nil {
|
|
||||||
|
if err := blocks.BeaconBlockIsNil(headBlock); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if headState == nil || headState.IsNil() {
|
if headState == nil || headState.IsNil() {
|
||||||
@@ -134,21 +77,55 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
|
||||||
s.headLock.RLock()
|
s.headLock.RLock()
|
||||||
oldHeadRoot := s.headRoot()
|
oldHeadBlock, err := s.headBlock()
|
||||||
oldStateRoot := s.headBlock().Block().StateRoot()
|
if err != nil {
|
||||||
|
s.headLock.RUnlock()
|
||||||
|
return errors.Wrap(err, "could not get old head block")
|
||||||
|
}
|
||||||
|
oldStateRoot := oldHeadBlock.Block().StateRoot()
|
||||||
s.headLock.RUnlock()
|
s.headLock.RUnlock()
|
||||||
headSlot := s.HeadSlot()
|
headSlot := s.HeadSlot()
|
||||||
newHeadSlot := headBlock.Block().Slot()
|
newHeadSlot := headBlock.Block().Slot()
|
||||||
newStateRoot := headBlock.Block().StateRoot()
|
newStateRoot := headBlock.Block().StateRoot()
|
||||||
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(oldHeadroot) {
|
|
||||||
|
r, err := s.HeadRoot(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get old head root")
|
||||||
|
}
|
||||||
|
oldHeadRoot := bytesutil.ToBytes32(r)
|
||||||
|
if headBlock.Block().ParentRoot() != oldHeadRoot {
|
||||||
|
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||||
|
commonRoot, forkSlot, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("Could not find common ancestor root")
|
||||||
|
commonRoot = params.BeaconConfig().ZeroHash
|
||||||
|
}
|
||||||
|
dis := headSlot + newHeadSlot - 2*forkSlot
|
||||||
|
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||||
|
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
|
||||||
|
if err != nil {
|
||||||
|
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("could not determine node weight")
|
||||||
|
}
|
||||||
|
newWeight, err := s.cfg.ForkChoiceStore.Weight(newHeadRoot)
|
||||||
|
if err != nil {
|
||||||
|
log.WithField("root", fmt.Sprintf("%#x", newHeadRoot)).Warn("could not determine node weight")
|
||||||
|
}
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
"newRoot": fmt.Sprintf("%#x", newHeadRoot),
|
||||||
}).Debug("Chain reorg occurred")
|
"newWeight": newWeight,
|
||||||
absoluteSlotDifference := slots.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
|
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||||
isOptimistic, err := s.IsOptimistic(ctx)
|
"oldRoot": fmt.Sprintf("%#x", oldHeadRoot),
|
||||||
|
"oldWeight": oldWeight,
|
||||||
|
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
|
||||||
|
"distance": dis,
|
||||||
|
"depth": dep,
|
||||||
|
}).Info("Chain reorg occurred")
|
||||||
|
reorgDistance.Observe(float64(dis))
|
||||||
|
reorgDepth.Observe(float64(dep))
|
||||||
|
|
||||||
|
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(newHeadRoot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||||
}
|
}
|
||||||
@@ -156,24 +133,26 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
|||||||
Type: statefeed.Reorg,
|
Type: statefeed.Reorg,
|
||||||
Data: ðpbv1.EventChainReorg{
|
Data: ðpbv1.EventChainReorg{
|
||||||
Slot: newHeadSlot,
|
Slot: newHeadSlot,
|
||||||
Depth: absoluteSlotDifference,
|
Depth: math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
|
||||||
OldHeadBlock: oldHeadRoot[:],
|
OldHeadBlock: oldHeadRoot[:],
|
||||||
NewHeadBlock: newHeadRoot[:],
|
NewHeadBlock: newHeadRoot[:],
|
||||||
OldHeadState: oldStateRoot,
|
OldHeadState: oldStateRoot[:],
|
||||||
NewHeadState: newStateRoot,
|
NewHeadState: newStateRoot[:],
|
||||||
Epoch: slots.ToEpoch(newHeadSlot),
|
Epoch: slots.ToEpoch(newHeadSlot),
|
||||||
ExecutionOptimistic: isOptimistic,
|
ExecutionOptimistic: isOptimistic,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
if err := s.saveOrphanedAtts(ctx, bytesutil.ToBytes32(oldHeadroot), newHeadRoot); err != nil {
|
if err := s.saveOrphanedOperations(ctx, oldHeadRoot, newHeadRoot); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
reorgCount.Inc()
|
reorgCount.Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache the new head info.
|
// Cache the new head info.
|
||||||
s.setHead(newHeadRoot, headBlock, headState)
|
if err := s.setHead(newHeadRoot, headBlock, headState); err != nil {
|
||||||
|
return errors.Wrap(err, "could not set head")
|
||||||
|
}
|
||||||
|
|
||||||
// Save the new head root to DB.
|
// Save the new head root to DB.
|
||||||
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, newHeadRoot); err != nil {
|
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, newHeadRoot); err != nil {
|
||||||
@@ -183,7 +162,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
|||||||
// Forward an event capturing a new chain head over a common event feed
|
// Forward an event capturing a new chain head over a common event feed
|
||||||
// done in a goroutine to avoid blocking the critical runtime main routine.
|
// done in a goroutine to avoid blocking the critical runtime main routine.
|
||||||
go func() {
|
go func() {
|
||||||
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, newHeadRoot[:]); err != nil {
|
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot[:], newHeadRoot[:]); err != nil {
|
||||||
log.WithError(err).Error("Could not notify event feed of new chain head")
|
log.WithError(err).Error("Could not notify event feed of new chain head")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -194,8 +173,8 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
|||||||
// This gets called to update canonical root mapping. It does not save head block
|
// This gets called to update canonical root mapping. It does not save head block
|
||||||
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
|
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
|
||||||
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
|
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
|
||||||
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.SignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
|
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
|
||||||
if err := wrapper.BeaconBlockIsNil(b); err != nil {
|
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cachedHeadRoot, err := s.HeadRoot(ctx)
|
cachedHeadRoot, err := s.HeadRoot(ctx)
|
||||||
@@ -206,44 +185,61 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.SignedBeaconBlo
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
s.setHeadInitialSync(r, b.Copy(), hs)
|
bCp, err := b.Copy()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.setHeadInitialSync(r, bCp, hs); err != nil {
|
||||||
|
return errors.Wrap(err, "could not set head")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This sets head view object which is used to track the head slot, root, block and state.
|
// This sets head view object which is used to track the head slot, root, block and state.
|
||||||
func (s *Service) setHead(root [32]byte, block interfaces.SignedBeaconBlock, state state.BeaconState) {
|
func (s *Service) setHead(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
|
||||||
s.headLock.Lock()
|
s.headLock.Lock()
|
||||||
defer s.headLock.Unlock()
|
defer s.headLock.Unlock()
|
||||||
|
|
||||||
// This does a full copy of the block and state.
|
// This does a full copy of the block and state.
|
||||||
|
bCp, err := block.Copy()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
s.head = &head{
|
s.head = &head{
|
||||||
slot: block.Block().Slot(),
|
|
||||||
root: root,
|
root: root,
|
||||||
block: block.Copy(),
|
block: bCp,
|
||||||
state: state.Copy(),
|
state: state.Copy(),
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This sets head view object which is used to track the head slot, root, block and state. The method
|
// This sets head view object which is used to track the head slot, root, block and state. The method
|
||||||
// assumes that state being passed into the method will not be modified by any other alternate
|
// assumes that state being passed into the method will not be modified by any other alternate
|
||||||
// caller which holds the state's reference.
|
// caller which holds the state's reference.
|
||||||
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.SignedBeaconBlock, state state.BeaconState) {
|
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
|
||||||
s.headLock.Lock()
|
s.headLock.Lock()
|
||||||
defer s.headLock.Unlock()
|
defer s.headLock.Unlock()
|
||||||
|
|
||||||
// This does a full copy of the block only.
|
// This does a full copy of the block only.
|
||||||
|
bCp, err := block.Copy()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
s.head = &head{
|
s.head = &head{
|
||||||
slot: block.Block().Slot(),
|
|
||||||
root: root,
|
root: root,
|
||||||
block: block.Copy(),
|
block: bCp,
|
||||||
state: state,
|
state: state,
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This returns the head slot.
|
// This returns the head slot.
|
||||||
// This is a lock free version.
|
// This is a lock free version.
|
||||||
func (s *Service) headSlot() types.Slot {
|
func (s *Service) headSlot() primitives.Slot {
|
||||||
return s.head.slot
|
if s.head == nil || s.head.block == nil || s.head.block.Block() == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return s.head.block.Block().Slot()
|
||||||
}
|
}
|
||||||
|
|
||||||
// This returns the head root.
|
// This returns the head root.
|
||||||
@@ -260,7 +256,7 @@ func (s *Service) headRoot() [32]byte {
|
|||||||
// This returns the head block.
|
// This returns the head block.
|
||||||
// It does a full copy on head block for immutability.
|
// It does a full copy on head block for immutability.
|
||||||
// This is a lock free version.
|
// This is a lock free version.
|
||||||
func (s *Service) headBlock() interfaces.SignedBeaconBlock {
|
func (s *Service) headBlock() (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||||
return s.head.block.Copy()
|
return s.head.block.Copy()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -268,12 +264,22 @@ func (s *Service) headBlock() interfaces.SignedBeaconBlock {
|
|||||||
// It does a full copy on head state for immutability.
|
// It does a full copy on head state for immutability.
|
||||||
// This is a lock free version.
|
// This is a lock free version.
|
||||||
func (s *Service) headState(ctx context.Context) state.BeaconState {
|
func (s *Service) headState(ctx context.Context) state.BeaconState {
|
||||||
_, span := trace.StartSpan(ctx, "blockChain.headState")
|
ctx, span := trace.StartSpan(ctx, "blockChain.headState")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
return s.head.state.Copy()
|
return s.head.state.Copy()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This returns a read only version of the head state.
|
||||||
|
// It does not perform a copy of the head state.
|
||||||
|
// This is a lock free version.
|
||||||
|
func (s *Service) headStateReadOnly(ctx context.Context) state.ReadOnlyBeaconState {
|
||||||
|
ctx, span := trace.StartSpan(ctx, "blockChain.headStateReadOnly")
|
||||||
|
defer span.End()
|
||||||
|
|
||||||
|
return s.head.state
|
||||||
|
}
|
||||||
|
|
||||||
// This returns the genesis validators root of the head state.
|
// This returns the genesis validators root of the head state.
|
||||||
// This is a lock free version.
|
// This is a lock free version.
|
||||||
func (s *Service) headGenesisValidatorsRoot() [32]byte {
|
func (s *Service) headGenesisValidatorsRoot() [32]byte {
|
||||||
@@ -283,14 +289,14 @@ func (s *Service) headGenesisValidatorsRoot() [32]byte {
|
|||||||
// This returns the validator referenced by the provided index in
|
// This returns the validator referenced by the provided index in
|
||||||
// the head state.
|
// the head state.
|
||||||
// This is a lock free version.
|
// This is a lock free version.
|
||||||
func (s *Service) headValidatorAtIndex(index types.ValidatorIndex) (state.ReadOnlyValidator, error) {
|
func (s *Service) headValidatorAtIndex(index primitives.ValidatorIndex) (state.ReadOnlyValidator, error) {
|
||||||
return s.head.state.ValidatorAtIndexReadOnly(index)
|
return s.head.state.ValidatorAtIndexReadOnly(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This returns the validator index referenced by the provided pubkey in
|
// This returns the validator index referenced by the provided pubkey in
|
||||||
// the head state.
|
// the head state.
|
||||||
// This is a lock free version.
|
// This is a lock free version.
|
||||||
func (s *Service) headValidatorIndexAtPubkey(pubKey [fieldparams.BLSPubkeyLength]byte) (types.ValidatorIndex, bool) {
|
func (s *Service) headValidatorIndexAtPubkey(pubKey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
|
||||||
return s.head.state.ValidatorIndexByPubkey(pubKey)
|
return s.head.state.ValidatorIndexByPubkey(pubKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -304,7 +310,7 @@ func (s *Service) hasHeadState() bool {
|
|||||||
// chain head is determined, set, and saved to disk.
|
// chain head is determined, set, and saved to disk.
|
||||||
func (s *Service) notifyNewHeadEvent(
|
func (s *Service) notifyNewHeadEvent(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
newHeadSlot types.Slot,
|
newHeadSlot primitives.Slot,
|
||||||
newHeadState state.BeaconState,
|
newHeadState state.BeaconState,
|
||||||
newHeadStateRoot,
|
newHeadStateRoot,
|
||||||
newHeadRoot []byte,
|
newHeadRoot []byte,
|
||||||
@@ -312,7 +318,7 @@ func (s *Service) notifyNewHeadEvent(
|
|||||||
previousDutyDependentRoot := s.originBlockRoot[:]
|
previousDutyDependentRoot := s.originBlockRoot[:]
|
||||||
currentDutyDependentRoot := s.originBlockRoot[:]
|
currentDutyDependentRoot := s.originBlockRoot[:]
|
||||||
|
|
||||||
var previousDutyEpoch types.Epoch
|
var previousDutyEpoch primitives.Epoch
|
||||||
currentDutyEpoch := slots.ToEpoch(newHeadSlot)
|
currentDutyEpoch := slots.ToEpoch(newHeadSlot)
|
||||||
if currentDutyEpoch > 0 {
|
if currentDutyEpoch > 0 {
|
||||||
previousDutyEpoch = currentDutyEpoch.Sub(1)
|
previousDutyEpoch = currentDutyEpoch.Sub(1)
|
||||||
@@ -356,12 +362,12 @@ func (s *Service) notifyNewHeadEvent(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This saves the attestations between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
|
// This saves the Attestations and BLSToExecChanges between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
|
||||||
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
|
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
|
||||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
|
func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
|
||||||
commonAncestorRoot, err := s.ForkChoicer().CommonAncestorRoot(ctx, newHeadRoot, orphanedRoot)
|
commonAncestorRoot, _, err := s.cfg.ForkChoiceStore.CommonAncestor(ctx, newHeadRoot, orphanedRoot)
|
||||||
switch {
|
switch {
|
||||||
// Exit early if there's no common ancestor as there would be nothing to save.
|
// Exit early if there's no common ancestor and root doesn't exist, there would be nothing to save.
|
||||||
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
|
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
|
||||||
return nil
|
return nil
|
||||||
case err != nil:
|
case err != nil:
|
||||||
@@ -397,7 +403,30 @@ func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte, n
|
|||||||
}
|
}
|
||||||
saveOrphanedAttCount.Inc()
|
saveOrphanedAttCount.Inc()
|
||||||
}
|
}
|
||||||
orphanedRoot = bytesutil.ToBytes32(orphanedBlk.Block().ParentRoot())
|
for _, as := range orphanedBlk.Block().Body().AttesterSlashings() {
|
||||||
|
if err := s.cfg.SlashingPool.InsertAttesterSlashing(ctx, s.headStateReadOnly(ctx), as); err != nil {
|
||||||
|
log.WithError(err).Error("Could not insert reorg attester slashing")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, vs := range orphanedBlk.Block().Body().ProposerSlashings() {
|
||||||
|
if err := s.cfg.SlashingPool.InsertProposerSlashing(ctx, s.headStateReadOnly(ctx), vs); err != nil {
|
||||||
|
log.WithError(err).Error("Could not insert reorg proposer slashing")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, v := range orphanedBlk.Block().Body().VoluntaryExits() {
|
||||||
|
s.cfg.ExitPool.InsertVoluntaryExit(v)
|
||||||
|
}
|
||||||
|
if orphanedBlk.Version() >= version.Capella {
|
||||||
|
changes, err := orphanedBlk.Block().Body().BLSToExecutionChanges()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||||
|
}
|
||||||
|
for _, c := range changes {
|
||||||
|
s.cfg.BLSToExecPool.InsertBLSToExecChange(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
parentRoot := orphanedBlk.Block().ParentRoot()
|
||||||
|
orphanedRoot = bytesutil.ToBytes32(parentRoot[:])
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,17 +5,17 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/async"
|
"github.com/prysmaticlabs/prysm/v4/async"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Initialize the state cache for sync committees.
|
// Initialize the state cache for sync committees.
|
||||||
@@ -24,36 +24,36 @@ var syncCommitteeHeadStateCache = cache.NewSyncCommitteeHeadState()
|
|||||||
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
|
// HeadSyncCommitteeFetcher is the interface that wraps the head sync committee related functions.
|
||||||
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
|
// The head sync committee functions return callers sync committee indices and public keys with respect to current head state.
|
||||||
type HeadSyncCommitteeFetcher interface {
|
type HeadSyncCommitteeFetcher interface {
|
||||||
HeadSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error)
|
HeadSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error)
|
||||||
HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error)
|
HeadSyncCommitteePubKeys(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) ([][]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeadDomainFetcher is the interface that wraps the head sync domain related functions.
|
// HeadDomainFetcher is the interface that wraps the head sync domain related functions.
|
||||||
// The head sync committee domain functions return callers domain data with respect to slot and head state.
|
// The head sync committee domain functions return callers domain data with respect to slot and head state.
|
||||||
type HeadDomainFetcher interface {
|
type HeadDomainFetcher interface {
|
||||||
HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error)
|
HeadSyncCommitteeDomain(ctx context.Context, slot primitives.Slot) ([]byte, error)
|
||||||
HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
|
HeadSyncSelectionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error)
|
||||||
HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error)
|
HeadSyncContributionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeadSyncCommitteeDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
// HeadSyncCommitteeDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
||||||
func (s *Service) HeadSyncCommitteeDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
func (s *Service) HeadSyncCommitteeDomain(ctx context.Context, slot primitives.Slot) ([]byte, error) {
|
||||||
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommittee)
|
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommittee)
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeadSyncSelectionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
// HeadSyncSelectionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
||||||
func (s *Service) HeadSyncSelectionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
func (s *Service) HeadSyncSelectionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error) {
|
||||||
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommitteeSelectionProof)
|
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainSyncCommitteeSelectionProof)
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeadSyncContributionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
// HeadSyncContributionProofDomain returns the head sync committee domain using current head state advanced up to `slot`.
|
||||||
func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot types.Slot) ([]byte, error) {
|
func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot primitives.Slot) ([]byte, error) {
|
||||||
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainContributionAndProof)
|
return s.domainWithHeadState(ctx, slot, params.BeaconConfig().DomainContributionAndProof)
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeadSyncCommitteeIndices returns the sync committee index position using the head state. Input `slot` is taken in consideration
|
// HeadSyncCommitteeIndices returns the sync committee index position using the head state. Input `slot` is taken in consideration
|
||||||
// where validator's duty for `slot - 1` is used for block inclusion in `slot`. That means when a validator is at epoch boundary
|
// where validator's duty for `slot - 1` is used for block inclusion in `slot`. That means when a validator is at epoch boundary
|
||||||
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the valiator will be considered using next period sync committee.
|
// across EPOCHS_PER_SYNC_COMMITTEE_PERIOD then the validator will be considered using next period sync committee.
|
||||||
//
|
//
|
||||||
// Spec definition:
|
// Spec definition:
|
||||||
// Being assigned to a sync committee for a given slot means that the validator produces and broadcasts signatures for slot - 1 for inclusion in slot.
|
// Being assigned to a sync committee for a given slot means that the validator produces and broadcasts signatures for slot - 1 for inclusion in slot.
|
||||||
@@ -61,7 +61,7 @@ func (s *Service) HeadSyncContributionProofDomain(ctx context.Context, slot type
|
|||||||
// [compute_start_slot_at_epoch(epoch) - 1, compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH - 1)
|
// [compute_start_slot_at_epoch(epoch) - 1, compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH - 1)
|
||||||
// rather than for the range
|
// rather than for the range
|
||||||
// [compute_start_slot_at_epoch(epoch), compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH)
|
// [compute_start_slot_at_epoch(epoch), compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH)
|
||||||
func (s *Service) HeadSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
func (s *Service) HeadSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error) {
|
||||||
nextSlotEpoch := slots.ToEpoch(slot + 1)
|
nextSlotEpoch := slots.ToEpoch(slot + 1)
|
||||||
currentEpoch := slots.ToEpoch(slot)
|
currentEpoch := slots.ToEpoch(slot)
|
||||||
|
|
||||||
@@ -79,7 +79,7 @@ func (s *Service) HeadSyncCommitteeIndices(ctx context.Context, index types.Vali
|
|||||||
|
|
||||||
// headCurrentSyncCommitteeIndices returns the input validator `index`'s position indices in the current sync committee with respect to `slot`.
|
// headCurrentSyncCommitteeIndices returns the input validator `index`'s position indices in the current sync committee with respect to `slot`.
|
||||||
// Head state advanced up to `slot` is used for calculation.
|
// Head state advanced up to `slot` is used for calculation.
|
||||||
func (s *Service) headCurrentSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
func (s *Service) headCurrentSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error) {
|
||||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -89,7 +89,7 @@ func (s *Service) headCurrentSyncCommitteeIndices(ctx context.Context, index typ
|
|||||||
|
|
||||||
// headNextSyncCommitteeIndices returns the input validator `index`'s position indices in the next sync committee with respect to `slot`.
|
// headNextSyncCommitteeIndices returns the input validator `index`'s position indices in the next sync committee with respect to `slot`.
|
||||||
// Head state advanced up to `slot` is used for calculation.
|
// Head state advanced up to `slot` is used for calculation.
|
||||||
func (s *Service) headNextSyncCommitteeIndices(ctx context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
func (s *Service) headNextSyncCommitteeIndices(ctx context.Context, index primitives.ValidatorIndex, slot primitives.Slot) ([]primitives.CommitteeIndex, error) {
|
||||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -99,7 +99,7 @@ func (s *Service) headNextSyncCommitteeIndices(ctx context.Context, index types.
|
|||||||
|
|
||||||
// HeadSyncCommitteePubKeys returns the head sync committee public keys with respect to `slot` and subcommittee index `committeeIndex`.
|
// HeadSyncCommitteePubKeys returns the head sync committee public keys with respect to `slot` and subcommittee index `committeeIndex`.
|
||||||
// Head state advanced up to `slot` is used for calculation.
|
// Head state advanced up to `slot` is used for calculation.
|
||||||
func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot, committeeIndex types.CommitteeIndex) ([][]byte, error) {
|
func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) ([][]byte, error) {
|
||||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -125,7 +125,7 @@ func (s *Service) HeadSyncCommitteePubKeys(ctx context.Context, slot types.Slot,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// returns calculated domain using input `domain` and `slot`.
|
// returns calculated domain using input `domain` and `slot`.
|
||||||
func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, domain [4]byte) ([]byte, error) {
|
func (s *Service) domainWithHeadState(ctx context.Context, slot primitives.Slot, domain [4]byte) ([]byte, error) {
|
||||||
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
headState, err := s.getSyncCommitteeHeadState(ctx, slot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -135,7 +135,7 @@ func (s *Service) domainWithHeadState(ctx context.Context, slot types.Slot, doma
|
|||||||
|
|
||||||
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.
|
// returns the head state that is advanced up to `slot`. It utilizes the cache `syncCommitteeHeadState` by retrieving using `slot` as key.
|
||||||
// For the cache miss, it processes head state up to slot and fill the cache with `slot` as key.
|
// For the cache miss, it processes head state up to slot and fill the cache with `slot` as key.
|
||||||
func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot types.Slot) (state.BeaconState, error) {
|
func (s *Service) getSyncCommitteeHeadState(ctx context.Context, slot primitives.Slot) (state.BeaconState, error) {
|
||||||
var headState state.BeaconState
|
var headState state.BeaconState
|
||||||
var err error
|
var err error
|
||||||
mLock := async.NewMultilock(fmt.Sprintf("%s-%d", "syncHeadState", slot))
|
mLock := async.NewMultilock(fmt.Sprintf("%s-%d", "syncHeadState", slot))
|
||||||
|
|||||||
@@ -4,53 +4,15 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/signing"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||||
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
"github.com/prysmaticlabs/prysm/testing/util"
|
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestService_headSyncCommitteeFetcher_Errors(t *testing.T) {
|
|
||||||
beaconDB := dbtest.SetupDB(t)
|
|
||||||
c := &Service{
|
|
||||||
cfg: &config{
|
|
||||||
StateGen: stategen.New(beaconDB),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
c.head = &head{}
|
|
||||||
_, err := c.headCurrentSyncCommitteeIndices(context.Background(), types.ValidatorIndex(0), types.Slot(0))
|
|
||||||
require.ErrorContains(t, "nil state", err)
|
|
||||||
|
|
||||||
_, err = c.headNextSyncCommitteeIndices(context.Background(), types.ValidatorIndex(0), types.Slot(0))
|
|
||||||
require.ErrorContains(t, "nil state", err)
|
|
||||||
|
|
||||||
_, err = c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(0), types.CommitteeIndex(0))
|
|
||||||
require.ErrorContains(t, "nil state", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestService_HeadDomainFetcher_Errors(t *testing.T) {
|
|
||||||
beaconDB := dbtest.SetupDB(t)
|
|
||||||
c := &Service{
|
|
||||||
cfg: &config{
|
|
||||||
StateGen: stategen.New(beaconDB),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
c.head = &head{}
|
|
||||||
_, err := c.HeadSyncCommitteeDomain(context.Background(), types.Slot(0))
|
|
||||||
require.ErrorContains(t, "nil state", err)
|
|
||||||
|
|
||||||
_, err = c.HeadSyncSelectionProofDomain(context.Background(), types.Slot(0))
|
|
||||||
require.ErrorContains(t, "nil state", err)
|
|
||||||
|
|
||||||
_, err = c.HeadSyncSelectionProofDomain(context.Background(), types.Slot(0))
|
|
||||||
require.ErrorContains(t, "nil state", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
|
func TestService_HeadSyncCommitteeIndices(t *testing.T) {
|
||||||
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
s, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().TargetCommitteeSize)
|
||||||
c := &Service{}
|
c := &Service{}
|
||||||
@@ -58,18 +20,18 @@ func TestService_HeadSyncCommitteeIndices(t *testing.T) {
|
|||||||
|
|
||||||
// Current period
|
// Current period
|
||||||
slot := 2*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
slot := 2*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||||
a, err := c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
a, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Current period where slot-2 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
// Current period where slot-2 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 2
|
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 2
|
||||||
b, err := c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
b, err := c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.DeepEqual(t, a, b)
|
require.DeepEqual(t, a, b)
|
||||||
|
|
||||||
// Next period where slot-1 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
// Next period where slot-1 across EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 1
|
slot = 3*uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) - 1
|
||||||
b, err = c.HeadSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
b, err = c.HeadSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.DeepNotEqual(t, a, b)
|
require.DeepNotEqual(t, a, b)
|
||||||
}
|
}
|
||||||
@@ -81,7 +43,7 @@ func TestService_headCurrentSyncCommitteeIndices(t *testing.T) {
|
|||||||
|
|
||||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||||
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||||
indices, err := c.headCurrentSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
indices, err := c.headCurrentSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default.
|
// NextSyncCommittee becomes CurrentSyncCommittee so it should be empty by default.
|
||||||
@@ -95,7 +57,7 @@ func TestService_headNextSyncCommitteeIndices(t *testing.T) {
|
|||||||
|
|
||||||
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
// Process slot up to `EpochsPerSyncCommitteePeriod` so it can `ProcessSyncCommitteeUpdates`.
|
||||||
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
slot := uint64(params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||||
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, types.Slot(slot))
|
indices, err := c.headNextSyncCommitteeIndices(context.Background(), 0, primitives.Slot(slot))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
|
// NextSyncCommittee should be be empty after `ProcessSyncCommitteeUpdates`. Validator should get indices.
|
||||||
@@ -109,7 +71,7 @@ func TestService_HeadSyncCommitteePubKeys(t *testing.T) {
|
|||||||
|
|
||||||
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
|
// Process slot up to 2 * `EpochsPerSyncCommitteePeriod` so it can run `ProcessSyncCommitteeUpdates` twice.
|
||||||
slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
slot := uint64(2*params.BeaconConfig().EpochsPerSyncCommitteePeriod)*uint64(params.BeaconConfig().SlotsPerEpoch) + 1
|
||||||
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), types.Slot(slot), 0)
|
pubkeys, err := c.HeadSyncCommitteePubKeys(context.Background(), primitives.Slot(slot), 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Any subcommittee should match the subcommittee size.
|
// Any subcommittee should match the subcommittee size.
|
||||||
|
|||||||
@@ -7,21 +7,20 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
|
||||||
"github.com/prysmaticlabs/prysm/config/features"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
"github.com/prysmaticlabs/prysm/testing/util"
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
|
||||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -30,12 +29,12 @@ func TestSaveHead_Same(t *testing.T) {
|
|||||||
service := setupBeaconChain(t, beaconDB)
|
service := setupBeaconChain(t, beaconDB)
|
||||||
|
|
||||||
r := [32]byte{'A'}
|
r := [32]byte{'A'}
|
||||||
service.head = &head{slot: 0, root: r}
|
service.head = &head{root: r}
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
st, _ := util.DeterministicGenesisState(t, 1)
|
st, _ := util.DeterministicGenesisState(t, 1)
|
||||||
require.NoError(t, service.saveHead(context.Background(), r, b, st))
|
require.NoError(t, service.saveHead(context.Background(), r, b, st))
|
||||||
assert.Equal(t, types.Slot(0), service.headSlot(), "Head did not stay the same")
|
assert.Equal(t, primitives.Slot(0), service.headSlot(), "Head did not stay the same")
|
||||||
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
|
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -44,19 +43,15 @@ func TestSaveHead_Different(t *testing.T) {
|
|||||||
beaconDB := testDB.SetupDB(t)
|
beaconDB := testDB.SetupDB(t)
|
||||||
service := setupBeaconChain(t, beaconDB)
|
service := setupBeaconChain(t, beaconDB)
|
||||||
|
|
||||||
util.NewBeaconBlock()
|
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||||
oldBlock, err := wrapper.WrappedSignedBeaconBlock(
|
|
||||||
util.NewBeaconBlock(),
|
|
||||||
)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
|
||||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0)
|
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
service.head = &head{
|
service.head = &head{
|
||||||
slot: 0,
|
|
||||||
root: oldRoot,
|
root: oldRoot,
|
||||||
block: oldBlock,
|
block: oldBlock,
|
||||||
}
|
}
|
||||||
@@ -65,12 +60,14 @@ func TestSaveHead_Different(t *testing.T) {
|
|||||||
newHeadSignedBlock.Block.Slot = 1
|
newHeadSignedBlock.Block.Slot = 1
|
||||||
newHeadBlock := newHeadSignedBlock.Block
|
newHeadBlock := newHeadSignedBlock.Block
|
||||||
|
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(newHeadSignedBlock)
|
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
|
||||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0)
|
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
|
|
||||||
|
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
headState, err := util.NewBeaconState()
|
headState, err := util.NewBeaconState()
|
||||||
@@ -80,13 +77,17 @@ func TestSaveHead_Different(t *testing.T) {
|
|||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||||
|
|
||||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
|
||||||
|
|
||||||
cachedRoot, err := service.HeadRoot(context.Background())
|
cachedRoot, err := service.HeadRoot(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.DeepEqual(t, cachedRoot, newRoot[:], "Head did not change")
|
assert.DeepEqual(t, cachedRoot, newRoot[:], "Head did not change")
|
||||||
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock().Proto(), "Head did not change")
|
headBlock, err := service.headBlock()
|
||||||
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
require.NoError(t, err)
|
||||||
|
pb, err := headBlock.Proto()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
|
||||||
|
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSaveHead_Different_Reorg(t *testing.T) {
|
func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||||
@@ -95,34 +96,33 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
|||||||
beaconDB := testDB.SetupDB(t)
|
beaconDB := testDB.SetupDB(t)
|
||||||
service := setupBeaconChain(t, beaconDB)
|
service := setupBeaconChain(t, beaconDB)
|
||||||
|
|
||||||
oldBlock, err := wrapper.WrappedSignedBeaconBlock(
|
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||||
util.NewBeaconBlock(),
|
|
||||||
)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
|
||||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0)
|
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
service.head = &head{
|
service.head = &head{
|
||||||
slot: 0,
|
|
||||||
root: oldRoot,
|
root: oldRoot,
|
||||||
block: oldBlock,
|
block: oldBlock,
|
||||||
}
|
}
|
||||||
|
|
||||||
reorgChainParent := [32]byte{'B'}
|
reorgChainParent := [32]byte{'B'}
|
||||||
|
state, blkRoot, err = prepareForkchoiceState(ctx, 0, reorgChainParent, oldRoot, oldBlock.Block().ParentRoot(), ojc, ofc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
|
|
||||||
newHeadSignedBlock := util.NewBeaconBlock()
|
newHeadSignedBlock := util.NewBeaconBlock()
|
||||||
newHeadSignedBlock.Block.Slot = 1
|
newHeadSignedBlock.Block.Slot = 1
|
||||||
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
|
newHeadSignedBlock.Block.ParentRoot = reorgChainParent[:]
|
||||||
newHeadBlock := newHeadSignedBlock.Block
|
newHeadBlock := newHeadSignedBlock.Block
|
||||||
|
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(newHeadSignedBlock)
|
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
|
||||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0)
|
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
headState, err := util.NewBeaconState()
|
headState, err := util.NewBeaconState()
|
||||||
@@ -132,52 +132,22 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
|||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||||
|
|
||||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
assert.Equal(t, primitives.Slot(1), service.HeadSlot(), "Head did not change")
|
||||||
|
|
||||||
cachedRoot, err := service.HeadRoot(context.Background())
|
cachedRoot, err := service.HeadRoot(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if !bytes.Equal(cachedRoot, newRoot[:]) {
|
if !bytes.Equal(cachedRoot, newRoot[:]) {
|
||||||
t.Error("Head did not change")
|
t.Error("Head did not change")
|
||||||
}
|
}
|
||||||
assert.DeepEqual(t, newHeadSignedBlock, service.headBlock().Proto(), "Head did not change")
|
headBlock, err := service.headBlock()
|
||||||
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
require.NoError(t, err)
|
||||||
|
pb, err := headBlock.Proto()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
|
||||||
|
assert.DeepSSZEqual(t, headState.ToProto(), service.headState(ctx).ToProto(), "Head did not change")
|
||||||
require.LogsContain(t, hook, "Chain reorg occurred")
|
require.LogsContain(t, hook, "Chain reorg occurred")
|
||||||
}
|
require.LogsContain(t, hook, "distance=1")
|
||||||
|
require.LogsContain(t, hook, "depth=1")
|
||||||
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
|
||||||
beaconDB := testDB.SetupDB(t)
|
|
||||||
service := setupBeaconChain(t, beaconDB)
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
state, _ := util.DeterministicGenesisState(t, 100)
|
|
||||||
r := [32]byte{'a'}
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: r[:]}))
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
|
|
||||||
balances, err := service.justifiedBalances.get(ctx, r)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
|
||||||
beaconDB := testDB.SetupDB(t)
|
|
||||||
service := setupBeaconChain(t, beaconDB)
|
|
||||||
|
|
||||||
b := util.NewBeaconBlock()
|
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
|
||||||
r, err := b.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
state, _ := util.DeterministicGenesisState(t, 1)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
|
|
||||||
|
|
||||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
|
||||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{'b'})
|
|
||||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{})
|
|
||||||
headRoot, err := service.updateHead(context.Background(), []uint64{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
st, _ := util.DeterministicGenesisState(t, 1)
|
|
||||||
require.NoError(t, service.saveHead(context.Background(), headRoot, wsb, st))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_notifyNewHeadEvent(t *testing.T) {
|
func Test_notifyNewHeadEvent(t *testing.T) {
|
||||||
@@ -246,60 +216,46 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSaveOrphanedAtts_NoCommonAncestor(t *testing.T) {
|
func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
beaconDB := testDB.SetupDB(t)
|
beaconDB := testDB.SetupDB(t)
|
||||||
service := setupBeaconChain(t, beaconDB)
|
service := setupBeaconChain(t, beaconDB)
|
||||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
|
||||||
|
|
||||||
// Chain setup
|
oldBlock := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, util.NewBeaconBlock())
|
||||||
// 0 -- 1 -- 2 -- 3
|
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||||
// -4
|
|
||||||
st, keys := util.DeterministicGenesisState(t, 64)
|
|
||||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
|
||||||
rG, err := blkG.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
service.head = &head{
|
||||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
root: oldRoot,
|
||||||
assert.NoError(t, err)
|
block: oldBlock,
|
||||||
blk1.Block.ParentRoot = rG[:]
|
|
||||||
r1, err := blk1.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
blk2.Block.ParentRoot = r1[:]
|
|
||||||
r2, err := blk2.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
blk3.Block.ParentRoot = r2[:]
|
|
||||||
r3, err := blk3.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
blk4 := util.NewBeaconBlock()
|
|
||||||
blk4.Block.Slot = 4
|
|
||||||
r4, err := blk4.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
|
||||||
r, err := blk.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
newHeadSignedBlock := util.NewBeaconBlock()
|
||||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
newHeadSignedBlock.Block.Slot = 1
|
||||||
|
newHeadBlock := newHeadSignedBlock.Block
|
||||||
|
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
|
||||||
|
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||||
|
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
state, blkRoot, err := prepareForkchoiceState(ctx, wsb.Block().Slot()-1, wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
|
|
||||||
|
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
|
headState, err := util.NewBeaconState()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, headState.SetSlot(1))
|
||||||
|
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||||
|
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||||
|
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||||
|
|
||||||
|
rOnlyState, err := service.HeadStateReadOnly(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, rOnlyState, service.head.state, "Head is not the same object")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSaveOrphanedAtts(t *testing.T) {
|
func TestSaveOrphanedAtts(t *testing.T) {
|
||||||
@@ -314,9 +270,8 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
|||||||
st, keys := util.DeterministicGenesisState(t, 64)
|
st, keys := util.DeterministicGenesisState(t, 64)
|
||||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
|
||||||
assert.NoError(t, err)
|
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
|
||||||
rG, err := blkG.Block.HashTreeRoot()
|
rG, err := blkG.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -343,19 +298,19 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
|||||||
blk4.Block.ParentRoot = rG[:]
|
blk4.Block.ParentRoot = rG[:]
|
||||||
r4, err := blk4.Block.HashTreeRoot()
|
r4, err := blk4.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
|
||||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||||
r, err := blk.Block.HashTreeRoot()
|
r, err := blk.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||||
wantAtts := []*ethpb.Attestation{
|
wantAtts := []*ethpb.Attestation{
|
||||||
blk3.Block.Body.Attestations[0],
|
blk3.Block.Body.Attestations[0],
|
||||||
@@ -369,124 +324,147 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
|||||||
require.DeepEqual(t, wantAtts, atts)
|
require.DeepEqual(t, wantAtts, atts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSaveOrphanedOps(t *testing.T) {
|
||||||
|
params.SetupTestConfigCleanup(t)
|
||||||
|
config := params.BeaconConfig()
|
||||||
|
config.ShardCommitteePeriod = 0
|
||||||
|
params.OverrideBeaconConfig(config)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
beaconDB := testDB.SetupDB(t)
|
||||||
|
service := setupBeaconChain(t, beaconDB)
|
||||||
|
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||||
|
|
||||||
|
// Chain setup
|
||||||
|
// 0 -- 1 -- 2 -- 3
|
||||||
|
// \-4
|
||||||
|
st, keys := util.DeterministicGenesisState(t, 64)
|
||||||
|
service.head = &head{state: st}
|
||||||
|
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||||
|
rG, err := blkG.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
blk1.Block.ParentRoot = rG[:]
|
||||||
|
r1, err := blk1.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
blk2.Block.ParentRoot = r1[:]
|
||||||
|
r2, err := blk2.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
blkConfig := util.DefaultBlockGenConfig()
|
||||||
|
blkConfig.NumBLSChanges = 5
|
||||||
|
blkConfig.NumProposerSlashings = 1
|
||||||
|
blkConfig.NumAttesterSlashings = 1
|
||||||
|
blkConfig.NumVoluntaryExits = 1
|
||||||
|
blk3, err := util.GenerateFullBlock(st, keys, blkConfig, 3)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
blk3.Block.ParentRoot = r2[:]
|
||||||
|
r3, err := blk3.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
blk4 := util.NewBeaconBlock()
|
||||||
|
blk4.Block.Slot = 4
|
||||||
|
blk4.Block.ParentRoot = rG[:]
|
||||||
|
r4, err := blk4.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
|
||||||
|
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||||
|
r, err := blk.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
|
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||||
|
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||||
|
wantAtts := []*ethpb.Attestation{
|
||||||
|
blk3.Block.Body.Attestations[0],
|
||||||
|
blk2.Block.Body.Attestations[0],
|
||||||
|
blk1.Block.Body.Attestations[0],
|
||||||
|
}
|
||||||
|
atts := service.cfg.AttPool.AggregatedAttestations()
|
||||||
|
sort.Slice(atts, func(i, j int) bool {
|
||||||
|
return atts[i].Data.Slot > atts[j].Data.Slot
|
||||||
|
})
|
||||||
|
require.DeepEqual(t, wantAtts, atts)
|
||||||
|
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingProposerSlashings(ctx, st, false)))
|
||||||
|
require.Equal(t, 1, len(service.cfg.SlashingPool.PendingAttesterSlashings(ctx, st, false)))
|
||||||
|
exits, err := service.cfg.ExitPool.PendingExits()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(exits))
|
||||||
|
}
|
||||||
|
|
||||||
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
beaconDB := testDB.SetupDB(t)
|
beaconDB := testDB.SetupDB(t)
|
||||||
service := setupBeaconChain(t, beaconDB)
|
service := setupBeaconChain(t, beaconDB)
|
||||||
|
service.cfg.BLSToExecPool = blstoexec.NewPool()
|
||||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
service.genesisTime = time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SlotsPerEpoch+2)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||||
|
|
||||||
// Chain setup
|
// Chain setup
|
||||||
// 0 -- 1 -- 2
|
// 0 -- 1 -- 2
|
||||||
// \-4
|
// \-4
|
||||||
st, keys := util.DeterministicGenesisState(t, 64)
|
st, keys := util.DeterministicGenesisStateCapella(t, 64)
|
||||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
blkConfig := util.DefaultBlockGenConfig()
|
||||||
|
blkConfig.NumBLSChanges = 5
|
||||||
|
blkG, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||||
assert.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
|
||||||
rG, err := blkG.Block.HashTreeRoot()
|
rG, err := blkG.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
blkConfig.NumBLSChanges = 10
|
||||||
|
blk1, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 2)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
blk1.Block.ParentRoot = rG[:]
|
blk1.Block.ParentRoot = rG[:]
|
||||||
r1, err := blk1.Block.HashTreeRoot()
|
r1, err := blk1.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
blkConfig.NumBLSChanges = 15
|
||||||
|
blk2, err := util.GenerateFullBlockCapella(st, keys, blkConfig, 3)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
blk2.Block.ParentRoot = r1[:]
|
blk2.Block.ParentRoot = r1[:]
|
||||||
r2, err := blk2.Block.HashTreeRoot()
|
r2, err := blk2.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
blk4 := util.NewBeaconBlock()
|
blk4 := util.NewBeaconBlockCapella()
|
||||||
|
blkConfig.NumBLSChanges = 0
|
||||||
blk4.Block.Slot = 4
|
blk4.Block.Slot = 4
|
||||||
blk4.Block.ParentRoot = rG[:]
|
blk4.Block.ParentRoot = rG[:]
|
||||||
r4, err := blk4.Block.HashTreeRoot()
|
r4, err := blk4.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
|
||||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
|
for _, blk := range []*ethpb.SignedBeaconBlockCapella{blkG, blk1, blk2, blk4} {
|
||||||
r, err := blk.Block.HashTreeRoot()
|
r, err := blk.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
|
require.NoError(t, service.saveOrphanedOperations(ctx, r2, r4))
|
||||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
require.Equal(t, 1, service.cfg.AttPool.AggregatedAttestationCount())
|
||||||
}
|
pending, err := service.cfg.BLSToExecPool.PendingBLSToExecChanges()
|
||||||
|
|
||||||
func TestSaveOrphanedAtts_NoCommonAncestor_DoublyLinkedTrie(t *testing.T) {
|
|
||||||
resetCfg := features.InitWithReset(&features.Flags{
|
|
||||||
EnableForkChoiceDoublyLinkedTree: true,
|
|
||||||
})
|
|
||||||
defer resetCfg()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
beaconDB := testDB.SetupDB(t)
|
|
||||||
service := setupBeaconChain(t, beaconDB)
|
|
||||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
|
||||||
|
|
||||||
// Chain setup
|
|
||||||
// 0 -- 1 -- 2 -- 3
|
|
||||||
// -4
|
|
||||||
st, keys := util.DeterministicGenesisState(t, 64)
|
|
||||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
|
||||||
rG, err := blkG.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 15, len(pending))
|
||||||
blk1, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
blk1.Block.ParentRoot = rG[:]
|
|
||||||
r1, err := blk1.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
blk2, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
blk2.Block.ParentRoot = r1[:]
|
|
||||||
r2, err := blk2.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
blk3, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 3)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
blk3.Block.ParentRoot = r2[:]
|
|
||||||
r3, err := blk3.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
blk4 := util.NewBeaconBlock()
|
|
||||||
blk4.Block.Slot = 4
|
|
||||||
r4, err := blk4.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
|
||||||
r, err := blk.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
|
||||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||||
resetCfg := features.InitWithReset(&features.Flags{
|
|
||||||
EnableForkChoiceDoublyLinkedTree: true,
|
|
||||||
})
|
|
||||||
defer resetCfg()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
beaconDB := testDB.SetupDB(t)
|
beaconDB := testDB.SetupDB(t)
|
||||||
service := setupBeaconChain(t, beaconDB)
|
service := setupBeaconChain(t, beaconDB)
|
||||||
@@ -498,9 +476,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
|||||||
st, keys := util.DeterministicGenesisState(t, 64)
|
st, keys := util.DeterministicGenesisState(t, 64)
|
||||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||||
assert.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
|
||||||
rG, err := blkG.Block.HashTreeRoot()
|
rG, err := blkG.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -528,18 +504,18 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
|||||||
r4, err := blk4.Block.HashTreeRoot()
|
r4, err := blk4.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||||
r, err := blk.Block.HashTreeRoot()
|
r, err := blk.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, service.saveOrphanedAtts(ctx, r3, r4))
|
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||||
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
|
||||||
wantAtts := []*ethpb.Attestation{
|
wantAtts := []*ethpb.Attestation{
|
||||||
blk3.Block.Body.Attestations[0],
|
blk3.Block.Body.Attestations[0],
|
||||||
@@ -554,11 +530,6 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||||
resetCfg := features.InitWithReset(&features.Flags{
|
|
||||||
EnableForkChoiceDoublyLinkedTree: true,
|
|
||||||
})
|
|
||||||
defer resetCfg()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
beaconDB := testDB.SetupDB(t)
|
beaconDB := testDB.SetupDB(t)
|
||||||
service := setupBeaconChain(t, beaconDB)
|
service := setupBeaconChain(t, beaconDB)
|
||||||
@@ -570,9 +541,7 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
|||||||
st, keys := util.DeterministicGenesisState(t, 64)
|
st, keys := util.DeterministicGenesisState(t, 64)
|
||||||
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
blkG, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 0)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(blkG)
|
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||||
assert.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, b))
|
|
||||||
rG, err := blkG.Block.HashTreeRoot()
|
rG, err := blkG.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@@ -594,46 +563,37 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
|||||||
r4, err := blk4.Block.HashTreeRoot()
|
r4, err := blk4.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
|
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
|
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
|
||||||
r, err := blk.Block.HashTreeRoot()
|
r, err := blk.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, b))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, service.saveOrphanedAtts(ctx, r2, r4))
|
require.NoError(t, service.saveOrphanedOperations(ctx, r2, r4))
|
||||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateHead_noSavedChanges(t *testing.T) {
|
func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||||
ctx := context.Background()
|
service, tr := minimalTestService(t)
|
||||||
|
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||||
|
|
||||||
beaconDB := testDB.SetupDB(t)
|
ojp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||||
fcs := doublylinkedtree.New(0, 0)
|
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
|
||||||
opts := []Option{
|
|
||||||
WithDatabase(beaconDB),
|
|
||||||
WithStateGen(stategen.New(beaconDB)),
|
|
||||||
WithForkChoiceStore(fcs),
|
|
||||||
}
|
|
||||||
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||||
|
|
||||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockBellatrix())
|
bellatrixBlk := util.SaveBlock(t, ctx, beaconDB, util.NewBeaconBlockBellatrix())
|
||||||
require.NoError(t, err)
|
|
||||||
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
|
|
||||||
fcp := ðpb.Checkpoint{
|
fcp := ðpb.Checkpoint{
|
||||||
Root: bellatrixBlkRoot[:],
|
Root: bellatrixBlkRoot[:],
|
||||||
Epoch: 1,
|
Epoch: 0,
|
||||||
}
|
}
|
||||||
service.store.SetFinalizedCheckptAndPayloadHash(fcp, [32]byte{'a'})
|
|
||||||
service.store.SetJustifiedCheckptAndPayloadHash(fcp, [32]byte{'b'})
|
|
||||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
|
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
|
||||||
|
|
||||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||||
@@ -643,10 +603,12 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
|||||||
headRoot := service.headRoot()
|
headRoot := service.headRoot()
|
||||||
require.Equal(t, [32]byte{}, headRoot)
|
require.Equal(t, [32]byte{}, headRoot)
|
||||||
|
|
||||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, 0, 0)
|
st, blkRoot, err = prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, fcp, fcp)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||||
newRoot, err := service.updateHead(ctx, []uint64{1, 2})
|
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{1, 2}, nil })
|
||||||
|
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
|
||||||
|
newRoot, err := service.cfg.ForkChoiceStore.Head(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEqual(t, headRoot, newRoot)
|
require.NotEqual(t, headRoot, newRoot)
|
||||||
require.Equal(t, headRoot, service.headRoot())
|
require.Equal(t, headRoot, service.headRoot())
|
||||||
|
|||||||
@@ -4,15 +4,24 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This saves a beacon block to the initial sync blocks cache.
|
// This saves a beacon block to the initial sync blocks cache. It rate limits how many blocks
|
||||||
func (s *Service) saveInitSyncBlock(r [32]byte, b interfaces.SignedBeaconBlock) {
|
// the cache keeps in memory (2 epochs worth of blocks) and saves them to DB when it hits this limit.
|
||||||
|
func (s *Service) saveInitSyncBlock(ctx context.Context, r [32]byte, b interfaces.ReadOnlySignedBeaconBlock) error {
|
||||||
s.initSyncBlocksLock.Lock()
|
s.initSyncBlocksLock.Lock()
|
||||||
defer s.initSyncBlocksLock.Unlock()
|
|
||||||
s.initSyncBlocks[r] = b
|
s.initSyncBlocks[r] = b
|
||||||
|
numBlocks := len(s.initSyncBlocks)
|
||||||
|
s.initSyncBlocksLock.Unlock()
|
||||||
|
if uint64(numBlocks) > initialSyncBlockCacheSize {
|
||||||
|
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s.clearInitSyncBlocks()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This checks if a beacon block exists in the initial sync blocks cache using the root
|
// This checks if a beacon block exists in the initial sync blocks cache using the root
|
||||||
@@ -34,7 +43,7 @@ func (s *Service) hasBlockInInitSyncOrDB(ctx context.Context, r [32]byte) bool {
|
|||||||
|
|
||||||
// Returns block for a given root `r` from either the initial sync blocks cache or the DB.
|
// Returns block for a given root `r` from either the initial sync blocks cache or the DB.
|
||||||
// Error is returned if the block is not found in either cache or DB.
|
// Error is returned if the block is not found in either cache or DB.
|
||||||
func (s *Service) getBlock(ctx context.Context, r [32]byte) (interfaces.SignedBeaconBlock, error) {
|
func (s *Service) getBlock(ctx context.Context, r [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||||
s.initSyncBlocksLock.RLock()
|
s.initSyncBlocksLock.RLock()
|
||||||
|
|
||||||
// Check cache first because it's faster.
|
// Check cache first because it's faster.
|
||||||
@@ -47,7 +56,7 @@ func (s *Service) getBlock(ctx context.Context, r [32]byte) (interfaces.SignedBe
|
|||||||
return nil, errors.Wrap(err, "could not retrieve block from db")
|
return nil, errors.Wrap(err, "could not retrieve block from db")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := wrapper.BeaconBlockIsNil(b); err != nil {
|
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||||
return nil, errBlockNotFoundInCacheOrDB
|
return nil, errBlockNotFoundInCacheOrDB
|
||||||
}
|
}
|
||||||
return b, nil
|
return b, nil
|
||||||
@@ -55,11 +64,11 @@ func (s *Service) getBlock(ctx context.Context, r [32]byte) (interfaces.SignedBe
|
|||||||
|
|
||||||
// This retrieves all the beacon blocks from the initial sync blocks cache, the returned
|
// This retrieves all the beacon blocks from the initial sync blocks cache, the returned
|
||||||
// blocks are unordered.
|
// blocks are unordered.
|
||||||
func (s *Service) getInitSyncBlocks() []interfaces.SignedBeaconBlock {
|
func (s *Service) getInitSyncBlocks() []interfaces.ReadOnlySignedBeaconBlock {
|
||||||
s.initSyncBlocksLock.RLock()
|
s.initSyncBlocksLock.RLock()
|
||||||
defer s.initSyncBlocksLock.RUnlock()
|
defer s.initSyncBlocksLock.RUnlock()
|
||||||
|
|
||||||
blks := make([]interfaces.SignedBeaconBlock, 0, len(s.initSyncBlocks))
|
blks := make([]interfaces.ReadOnlySignedBeaconBlock, 0, len(s.initSyncBlocks))
|
||||||
for _, b := range s.initSyncBlocks {
|
for _, b := range s.initSyncBlocks {
|
||||||
blks = append(blks, b)
|
blks = append(blks, b)
|
||||||
}
|
}
|
||||||
@@ -70,5 +79,5 @@ func (s *Service) getInitSyncBlocks() []interfaces.SignedBeaconBlock {
|
|||||||
func (s *Service) clearInitSyncBlocks() {
|
func (s *Service) clearInitSyncBlocks() {
|
||||||
s.initSyncBlocksLock.Lock()
|
s.initSyncBlocksLock.Lock()
|
||||||
defer s.initSyncBlocksLock.Unlock()
|
defer s.initSyncBlocksLock.Unlock()
|
||||||
s.initSyncBlocks = make(map[[32]byte]interfaces.SignedBeaconBlock)
|
s.initSyncBlocks = make(map[[32]byte]interfaces.ReadOnlySignedBeaconBlock)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,10 +4,10 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
"github.com/prysmaticlabs/prysm/testing/util"
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestService_getBlock(t *testing.T) {
|
func TestService_getBlock(t *testing.T) {
|
||||||
@@ -27,17 +27,15 @@ func TestService_getBlock(t *testing.T) {
|
|||||||
require.ErrorIs(t, err, errBlockNotFoundInCacheOrDB)
|
require.ErrorIs(t, err, errBlockNotFoundInCacheOrDB)
|
||||||
|
|
||||||
// block in cache
|
// block in cache
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(b1)
|
b, err := blocks.NewSignedBeaconBlock(b1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
s.saveInitSyncBlock(r1, b)
|
require.NoError(t, s.saveInitSyncBlock(ctx, r1, b))
|
||||||
got, err := s.getBlock(ctx, r1)
|
got, err := s.getBlock(ctx, r1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.DeepEqual(t, b, got)
|
require.DeepEqual(t, b, got)
|
||||||
|
|
||||||
// block in db
|
// block in db
|
||||||
b, err = wrapper.WrappedSignedBeaconBlock(b2)
|
b = util.SaveBlock(t, ctx, s.cfg.BeaconDB, b2)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, s.cfg.BeaconDB.SaveBlock(ctx, b))
|
|
||||||
got, err = s.getBlock(ctx, r2)
|
got, err = s.getBlock(ctx, r2)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.DeepEqual(t, b, got)
|
require.DeepEqual(t, b, got)
|
||||||
@@ -59,14 +57,12 @@ func TestService_hasBlockInInitSyncOrDB(t *testing.T) {
|
|||||||
require.Equal(t, false, s.hasBlockInInitSyncOrDB(ctx, [32]byte{}))
|
require.Equal(t, false, s.hasBlockInInitSyncOrDB(ctx, [32]byte{}))
|
||||||
|
|
||||||
// block in cache
|
// block in cache
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(b1)
|
b, err := blocks.NewSignedBeaconBlock(b1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
s.saveInitSyncBlock(r1, b)
|
require.NoError(t, s.saveInitSyncBlock(ctx, r1, b))
|
||||||
require.Equal(t, true, s.hasBlockInInitSyncOrDB(ctx, r1))
|
require.Equal(t, true, s.hasBlockInInitSyncOrDB(ctx, r1))
|
||||||
|
|
||||||
// block in db
|
// block in db
|
||||||
b, err = wrapper.WrappedSignedBeaconBlock(b2)
|
util.SaveBlock(t, ctx, s.cfg.BeaconDB, b2)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, s.cfg.BeaconDB.SaveBlock(ctx, b))
|
|
||||||
require.Equal(t, true, s.hasBlockInInitSyncOrDB(ctx, r2))
|
require.Equal(t, true, s.hasBlockInInitSyncOrDB(ctx, r2))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
package blockchain
|
package blockchain
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -5,20 +5,23 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
prysmTime "github.com/prysmaticlabs/prysm/time"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
|
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
var log = logrus.WithField("prefix", "blockchain")
|
var log = logrus.WithField("prefix", "blockchain")
|
||||||
|
|
||||||
// logs state transition related data every slot.
|
// logs state transition related data every slot.
|
||||||
func logStateTransitionData(b interfaces.BeaconBlock) error {
|
func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||||
log := log.WithField("slot", b.Slot())
|
log := log.WithField("slot", b.Slot())
|
||||||
if len(b.Body().Attestations()) > 0 {
|
if len(b.Body().Attestations()) > 0 {
|
||||||
log = log.WithField("attestations", len(b.Body().Attestations()))
|
log = log.WithField("attestations", len(b.Body().Attestations()))
|
||||||
@@ -35,48 +38,103 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
|
|||||||
if len(b.Body().VoluntaryExits()) > 0 {
|
if len(b.Body().VoluntaryExits()) > 0 {
|
||||||
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
|
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
|
||||||
}
|
}
|
||||||
if b.Version() == version.Altair || b.Version() == version.Bellatrix {
|
if b.Version() >= version.Altair {
|
||||||
agg, err := b.Body().SyncAggregate()
|
agg, err := b.Body().SyncAggregate()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||||
}
|
}
|
||||||
if b.Version() == version.Bellatrix {
|
if b.Version() >= version.Bellatrix {
|
||||||
p, err := b.Body().ExecutionPayload()
|
p, err := b.Body().Execution()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash)))
|
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||||
log = log.WithField("txCount", len(p.Transactions))
|
txs, err := p.Transactions()
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, consensus_types.ErrUnsupportedGetter):
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
default:
|
||||||
|
log = log.WithField("txCount", len(txs))
|
||||||
|
txsPerSlotCount.Set(float64(len(txs)))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
log.Info("Finished applying state transition")
|
log.Info("Finished applying state transition")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func logBlockSyncStatus(block interfaces.BeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
|
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesisTime uint64) error {
|
||||||
startTime, err := slots.ToTime(genesisTime, block.Slot())
|
startTime, err := slots.ToTime(genesisTime, block.Slot())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
level := log.Logger.GetLevel()
|
level := log.Logger.GetLevel()
|
||||||
|
|
||||||
log = log.WithField("slot", block.Slot())
|
|
||||||
if level >= logrus.DebugLevel {
|
if level >= logrus.DebugLevel {
|
||||||
log = log.WithField("slotInEpoch", block.Slot()%params.BeaconConfig().SlotsPerEpoch)
|
parentRoot := block.ParentRoot()
|
||||||
log = log.WithField("justifiedEpoch", justified.Epoch)
|
log.WithFields(logrus.Fields{
|
||||||
log = log.WithField("justifiedRoot", fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]))
|
"slot": block.Slot(),
|
||||||
log = log.WithField("parentRoot", fmt.Sprintf("0x%s...", hex.EncodeToString(block.ParentRoot())[:8]))
|
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||||
log = log.WithField("version", version.String(block.Version()))
|
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||||
log = log.WithField("sinceSlotStartTime", prysmTime.Now().Sub(startTime))
|
"epoch": slots.ToEpoch(block.Slot()),
|
||||||
log = log.WithField("chainServiceProcessedTime", prysmTime.Now().Sub(receivedTime))
|
"justifiedEpoch": justified.Epoch,
|
||||||
|
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||||
|
"finalizedEpoch": finalized.Epoch,
|
||||||
|
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||||
|
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||||
|
"version": version.String(block.Version()),
|
||||||
|
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||||
|
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime),
|
||||||
|
"deposits": len(block.Body().Deposits()),
|
||||||
|
}).Debug("Synced new block")
|
||||||
|
} else {
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"slot": block.Slot(),
|
||||||
|
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||||
|
"finalizedEpoch": finalized.Epoch,
|
||||||
|
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||||
|
"epoch": slots.ToEpoch(block.Slot()),
|
||||||
|
}).Info("Synced new block")
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
log.WithFields(logrus.Fields{
|
}
|
||||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
|
||||||
"epoch": slots.ToEpoch(block.Slot()),
|
// logs payload related data every slot.
|
||||||
"finalizedEpoch": finalized.Epoch,
|
func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
|
||||||
}).Info("Synced new block")
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not determine if block is execution block")
|
||||||
|
}
|
||||||
|
if !isExecutionBlk {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
payload, err := block.Body().Execution()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if payload.GasLimit() == 0 {
|
||||||
|
return errors.New("gas limit should not be 0")
|
||||||
|
}
|
||||||
|
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
|
||||||
|
fields := logrus.Fields{
|
||||||
|
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||||
|
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
|
||||||
|
"blockNumber": payload.BlockNumber,
|
||||||
|
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||||
|
}
|
||||||
|
if block.Version() >= version.Capella {
|
||||||
|
withdrawals, err := payload.Withdrawals()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get withdrawals")
|
||||||
|
}
|
||||||
|
fields["withdrawals"] = len(withdrawals)
|
||||||
|
changes, err := block.Body().BLSToExecutionChanges()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||||
|
}
|
||||||
|
fields["blsToExecutionChanges"] = len(changes)
|
||||||
|
}
|
||||||
|
log.WithFields(fields).Debug("Synced new payload")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,11 +3,11 @@ package blockchain
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -21,32 +21,32 @@ func Test_logStateTransitionData(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
wrappedPayloadBlk, err := wrapper.WrappedBeaconBlock(payloadBlk)
|
wrappedPayloadBlk, err := blocks.NewBeaconBlock(payloadBlk)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
b func() interfaces.BeaconBlock
|
b func() interfaces.ReadOnlyBeaconBlock
|
||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{name: "empty block body",
|
{name: "empty block body",
|
||||||
b: func() interfaces.BeaconBlock {
|
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||||
wb, err := wrapper.WrappedBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}})
|
wb, err := blocks.NewBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return wb
|
return wb
|
||||||
},
|
},
|
||||||
want: "\"Finished applying state transition\" prefix=blockchain slot=0",
|
want: "\"Finished applying state transition\" prefix=blockchain slot=0",
|
||||||
},
|
},
|
||||||
{name: "has attestation",
|
{name: "has attestation",
|
||||||
b: func() interfaces.BeaconBlock {
|
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||||
wb, err := wrapper.WrappedBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{Attestations: []*ethpb.Attestation{{}}}})
|
wb, err := blocks.NewBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{Attestations: []*ethpb.Attestation{{}}}})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return wb
|
return wb
|
||||||
},
|
},
|
||||||
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
|
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
|
||||||
},
|
},
|
||||||
{name: "has deposit",
|
{name: "has deposit",
|
||||||
b: func() interfaces.BeaconBlock {
|
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||||
wb, err := wrapper.WrappedBeaconBlock(
|
wb, err := blocks.NewBeaconBlock(
|
||||||
ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||||
Attestations: []*ethpb.Attestation{{}},
|
Attestations: []*ethpb.Attestation{{}},
|
||||||
Deposits: []*ethpb.Deposit{{}}}})
|
Deposits: []*ethpb.Deposit{{}}}})
|
||||||
@@ -56,8 +56,8 @@ func Test_logStateTransitionData(t *testing.T) {
|
|||||||
want: "\"Finished applying state transition\" attestations=1 deposits=1 prefix=blockchain slot=0",
|
want: "\"Finished applying state transition\" attestations=1 deposits=1 prefix=blockchain slot=0",
|
||||||
},
|
},
|
||||||
{name: "has attester slashing",
|
{name: "has attester slashing",
|
||||||
b: func() interfaces.BeaconBlock {
|
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||||
wb, err := wrapper.WrappedBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
wb, err := blocks.NewBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||||
AttesterSlashings: []*ethpb.AttesterSlashing{{}}}})
|
AttesterSlashings: []*ethpb.AttesterSlashing{{}}}})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return wb
|
return wb
|
||||||
@@ -65,8 +65,8 @@ func Test_logStateTransitionData(t *testing.T) {
|
|||||||
want: "\"Finished applying state transition\" attesterSlashings=1 prefix=blockchain slot=0",
|
want: "\"Finished applying state transition\" attesterSlashings=1 prefix=blockchain slot=0",
|
||||||
},
|
},
|
||||||
{name: "has proposer slashing",
|
{name: "has proposer slashing",
|
||||||
b: func() interfaces.BeaconBlock {
|
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||||
wb, err := wrapper.WrappedBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
wb, err := blocks.NewBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||||
ProposerSlashings: []*ethpb.ProposerSlashing{{}}}})
|
ProposerSlashings: []*ethpb.ProposerSlashing{{}}}})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return wb
|
return wb
|
||||||
@@ -74,8 +74,8 @@ func Test_logStateTransitionData(t *testing.T) {
|
|||||||
want: "\"Finished applying state transition\" prefix=blockchain proposerSlashings=1 slot=0",
|
want: "\"Finished applying state transition\" prefix=blockchain proposerSlashings=1 slot=0",
|
||||||
},
|
},
|
||||||
{name: "has exit",
|
{name: "has exit",
|
||||||
b: func() interfaces.BeaconBlock {
|
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||||
wb, err := wrapper.WrappedBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
wb, err := blocks.NewBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||||
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}})
|
VoluntaryExits: []*ethpb.SignedVoluntaryExit{{}}}})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return wb
|
return wb
|
||||||
@@ -83,8 +83,8 @@ func Test_logStateTransitionData(t *testing.T) {
|
|||||||
want: "\"Finished applying state transition\" prefix=blockchain slot=0 voluntaryExits=1",
|
want: "\"Finished applying state transition\" prefix=blockchain slot=0 voluntaryExits=1",
|
||||||
},
|
},
|
||||||
{name: "has everything",
|
{name: "has everything",
|
||||||
b: func() interfaces.BeaconBlock {
|
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||||
wb, err := wrapper.WrappedBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
wb, err := blocks.NewBeaconBlock(ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{
|
||||||
Attestations: []*ethpb.Attestation{{}},
|
Attestations: []*ethpb.Attestation{{}},
|
||||||
Deposits: []*ethpb.Deposit{{}},
|
Deposits: []*ethpb.Deposit{{}},
|
||||||
AttesterSlashings: []*ethpb.AttesterSlashing{{}},
|
AttesterSlashings: []*ethpb.AttesterSlashing{{}},
|
||||||
@@ -96,7 +96,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
|||||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||||
},
|
},
|
||||||
{name: "has payload",
|
{name: "has payload",
|
||||||
b: func() interfaces.BeaconBlock { return wrappedPayloadBlk },
|
b: func() interfaces.ReadOnlyBeaconBlock { return wrappedPayloadBlk },
|
||||||
want: "\"Finished applying state transition\" payloadHash=0x010203 prefix=blockchain slot=0 syncBitsCount=0 txCount=2",
|
want: "\"Finished applying state transition\" payloadHash=0x010203 prefix=blockchain slot=0 syncBitsCount=0 txCount=2",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
81
beacon-chain/blockchain/merge_ascii_art.go
Normal file
81
beacon-chain/blockchain/merge_ascii_art.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package blockchain
|
||||||
|
|
||||||
|
var mergeAsciiArt = `
|
||||||
|
|
||||||
|
+?$$$$$$?*; ;*?$$$$?*; +!$$$$$$?!;
|
||||||
|
!##$???$@##$+ !&#@$??$&#@* +@#&$????$##+
|
||||||
|
!##; +@#&; !##* ;$##* @#$ ;&#+
|
||||||
|
!##; !##+ ;##$ @#@ $#&* ++;
|
||||||
|
!##; ;@#&; *##* ?##; ;$##&$!+;
|
||||||
|
!##?!!!?$##$+ !##+ !##+ ;!$@##&$!;
|
||||||
|
!##@@@@$$!+ *##* ?##; ;*?@#&!
|
||||||
|
!##; ;##$ @#@ ;?$; ?##+
|
||||||
|
!##; ?##! ;?##+ ;##+ ;$#&;
|
||||||
|
!##; !&#&$??$&#@* ;&#&$$??$$&#@+
|
||||||
|
+??; ;*?$$$$?+ ;+!?$$$$$!+
|
||||||
|
;;;;
|
||||||
|
;+!??$$$?!*+; ;*?$@&&&&&@@$!*;
|
||||||
|
*?@############&$?+ ;!@###############&$!;
|
||||||
|
;!@####&@$????$$@#####@! ;?&####$?*++;++*!?@####&?;
|
||||||
|
*@###&$*; ;*$&###@* *&###@!; ;!@###&!
|
||||||
|
!###&!; ;?&###? *####! *&###?
|
||||||
|
!###@+ ;$###$ +###&+ ;$###?
|
||||||
|
;###&; $###? ;;+*!??$$$$$$$$??!$###* ;@###*
|
||||||
|
!###! &###?$@&#####################@$?!+; +###$
|
||||||
|
$###+ ;*?&#################################&$?*; &##&;
|
||||||
|
$###+ ;!$&########################################&$!; &###;
|
||||||
|
*###? ;!$################################################$*; +###@
|
||||||
|
;@###+ +$&####################################################&?; $###!
|
||||||
|
+###&+ *$##########################################################$+ ;$###$;
|
||||||
|
*&###?; +$##############################################################?*@###$;
|
||||||
|
+$###&?+ ;$#####################################################################?;
|
||||||
|
*@####@&#####################################################################*
|
||||||
|
+$&##################@?!*++*!$&###################&$?*++*!?$###############&*
|
||||||
|
$###############&?+ ;!@###############@!; ;!@##############!
|
||||||
|
;$##############&!; *&###########&!; !&#############!
|
||||||
|
$##############@+ +@* ;$#########$; +@* ;$#############!
|
||||||
|
?##############$; *###* $#######$; +&##! ?#############+
|
||||||
|
+##############$ !#####! $#####@; *#####? ?############@;
|
||||||
|
@#############@; !#######! ;&####+ *#######? $############?
|
||||||
|
+##############+ ?#########? $###$ !#########$ +############&;
|
||||||
|
$#############$ ;$###########? !###? !###########$; $############!
|
||||||
|
@#############* !#############! ?###$ *#############? +############@
|
||||||
|
;&############&; +?@#######&$+; ;&####; ;+?@#######&?+; @############;
|
||||||
|
;#############@ +$&#&$*; ;$#####@; +$&#&$*; $############+
|
||||||
|
;#############$ *+ ;+; ;*; *&#######&! ;*; ;+; +*; $############*
|
||||||
|
&############@ ;$@!; +$@! ;?###########$; *@$* ;*$@+ $############*
|
||||||
|
$############&; ;$#&$*+!@##* +@#############@+ +&#@?++$&#@; @############+
|
||||||
|
*#############* $######&+ !#################? +@######$; +#############;
|
||||||
|
@############$ ?####@+ ;$###################$; ;@####$ $############@
|
||||||
|
*#############* !##@; +@#####################&* ;$##? +#############!
|
||||||
|
$#############+ *$; ?#########################?; $! +&############&;
|
||||||
|
;&#############! +$###########################@+ *&#############?
|
||||||
|
+##############$*; ;?###############################$+ *$##############@;
|
||||||
|
*###############&$?!!$###################################$?!?$&################+
|
||||||
|
*###################################&@$$$$@&#################################!
|
||||||
|
+&##############################&?+; ;+?&#############################?
|
||||||
|
;$############################@; ;@###########################!
|
||||||
|
?###########################* *##########################*
|
||||||
|
+@#############&$!+$#######? ?########$+!$&###########@+
|
||||||
|
!&###########&; $#######$+ +$########? +&##########?;
|
||||||
|
;?###########&* *@#######@$!; ;$@########@* *##########$+
|
||||||
|
;?&##########?; ;*$&####&$* ;!$&####&$* ;$#########@*
|
||||||
|
;!@#########@!; ;++*+; ;*; ;+*++; ;!&########$*
|
||||||
|
*$&########&$*; ;*$&#&$*; +*$&#######&?+
|
||||||
|
;*$&#########&@$$@&#########&@$$@&########&$*;
|
||||||
|
;+?$&##############################&$?+;
|
||||||
|
;+!?$@&###################&$$!+;
|
||||||
|
;++*!??$$$$$$$?!!*+;
|
||||||
|
|
||||||
|
;;; ;;+*++; ;;;++;;;++;;; ;+++;;;++++; ;;; ;; ;;; ;;;++;;;+++;; ;;;+++++++; ;+++++;
|
||||||
|
;@#&+ +$&&@$@&#? @#@@@&#&@@@#$ ;$@@@@#&@@@@! !#@ !#$ +&#&; !#&@@@#&@@@&&; !#&@@@@@@@* $#&@@@&&$+
|
||||||
|
$#?#@; ?#&!; *#$ &&;;;$#!;;*#$ ;;;*#@;;;; $#? +#&; ;@#?#$ !#!;;*#@;;;@#; ?#$;;;;;;; @#* ;!&#?
|
||||||
|
*#$ ?#$ *#&; ;+; ++ $#! ;+; *#$ ;&#+ $#* ?#? $#! ;+; +#@ ++ ?#$ $#* ;&#*
|
||||||
|
;&&; @#* $#$ $#! *#$ !#@; !#$ +#@ ;&#; +#@ ?#@??????! $#* $#$
|
||||||
|
$#!;;;*#&; $#? $#! *#$ $#? ;&&; ;@#*;;;!#$ +#@ ?#@??????! $#* $#$
|
||||||
|
!##@@@@@&#$ !#@; $#! *#$ ;&#+ $#* ?#&@@@@@##! +#@ ?#$ $#* @#*
|
||||||
|
;&&+;;;;;;@#* ;$#$+ @$ $#! *#$ *#@!#? *#@;;;;;;+##+ +#@ ?#$ $#* +$#$
|
||||||
|
$#* +#&; ;?&#@$$$$#$ +$$&#@$$; ;$$$$@##$$$$* $##@; ;&#+ !#@; $$@##$$* ?#&$$$$$$$! @#@$$$@&@!
|
||||||
|
;** +*; +*!?!*+; ;******* ;***********+ ;**; ;*+ **; *******+ ;*********+ +*!!!!*;
|
||||||
|
|
||||||
|
`
|
||||||
@@ -6,15 +6,15 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/altair"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/epoch/precompute"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -111,6 +111,18 @@ var (
|
|||||||
Name: "beacon_reorgs_total",
|
Name: "beacon_reorgs_total",
|
||||||
Help: "Count the number of times beacon chain has a reorg",
|
Help: "Count the number of times beacon chain has a reorg",
|
||||||
})
|
})
|
||||||
|
LateBlockAttemptedReorgCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "beacon_late_block_attempted_reorgs",
|
||||||
|
Help: "Count the number of times a proposer served by this beacon has attempted a late block reorg",
|
||||||
|
})
|
||||||
|
lateBlockFailedAttemptFirstThreshold = promauto.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "beacon_failed_reorg_attempts_first_threshold",
|
||||||
|
Help: "Count the number of times a proposer served by this beacon attempted a late block reorg but desisted in the first threshold",
|
||||||
|
})
|
||||||
|
lateBlockFailedAttemptSecondThreshold = promauto.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "beacon_failed_reorg_attempts_second_threshold",
|
||||||
|
Help: "Count the number of times a proposer served by this beacon attempted a late block reorg but desisted in the second threshold",
|
||||||
|
})
|
||||||
saveOrphanedAttCount = promauto.NewCounter(prometheus.CounterOpts{
|
saveOrphanedAttCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||||
Name: "saved_orphaned_att_total",
|
Name: "saved_orphaned_att_total",
|
||||||
Help: "Count the number of times an orphaned attestation is saved",
|
Help: "Count the number of times an orphaned attestation is saved",
|
||||||
@@ -130,14 +142,6 @@ var (
|
|||||||
Name: "sync_head_state_hit",
|
Name: "sync_head_state_hit",
|
||||||
Help: "The number of sync head state requests that are present in the cache.",
|
Help: "The number of sync head state requests that are present in the cache.",
|
||||||
})
|
})
|
||||||
stateBalanceCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
|
||||||
Name: "state_balance_cache_hit",
|
|
||||||
Help: "Count the number of state balance cache hits.",
|
|
||||||
})
|
|
||||||
stateBalanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
|
||||||
Name: "state_balance_cache_miss",
|
|
||||||
Help: "Count the number of state balance cache hits.",
|
|
||||||
})
|
|
||||||
newPayloadValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
newPayloadValidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||||
Name: "new_payload_valid_node_count",
|
Name: "new_payload_valid_node_count",
|
||||||
Help: "Count the number of valid nodes after newPayload EE call",
|
Help: "Count the number of valid nodes after newPayload EE call",
|
||||||
@@ -158,14 +162,61 @@ var (
|
|||||||
Name: "forkchoice_updated_optimistic_node_count",
|
Name: "forkchoice_updated_optimistic_node_count",
|
||||||
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
|
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
|
||||||
})
|
})
|
||||||
missedPayloadIDFilledCount = promauto.NewCounter(prometheus.CounterOpts{
|
forkchoiceUpdatedInvalidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||||
Name: "missed_payload_id_filled_count",
|
Name: "forkchoice_updated_invalid_node_count",
|
||||||
Help: "",
|
Help: "Count the number of invalid nodes after forkchoiceUpdated EE call",
|
||||||
})
|
})
|
||||||
|
txsPerSlotCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Name: "txs_per_slot_count",
|
||||||
|
Help: "Count the number of txs per slot",
|
||||||
|
})
|
||||||
|
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||||
|
Name: "on_block_processing_milliseconds",
|
||||||
|
Help: "Total time in milliseconds to complete a call to onBlock()",
|
||||||
|
})
|
||||||
|
stateTransitionProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||||
|
Name: "state_transition_processing_milliseconds",
|
||||||
|
Help: "Total time to call a state transition in onBlock()",
|
||||||
|
})
|
||||||
|
processAttsElapsedTime = promauto.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Name: "process_attestations_milliseconds",
|
||||||
|
Help: "Captures latency for process attestations (forkchoice) in milliseconds",
|
||||||
|
Buckets: []float64{1, 5, 20, 100, 500, 1000},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
newAttHeadElapsedTime = promauto.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Name: "new_att_head_milliseconds",
|
||||||
|
Help: "Captures latency for new attestation head in milliseconds",
|
||||||
|
Buckets: []float64{1, 5, 20, 100, 500, 1000},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
newBlockHeadElapsedTime = promauto.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Name: "new_block_head_milliseconds",
|
||||||
|
Help: "Captures latency for new block head in milliseconds",
|
||||||
|
Buckets: []float64{1, 5, 20, 100, 500, 1000},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
reorgDistance = promauto.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Name: "reorg_distance",
|
||||||
|
Help: "Captures distance of reorgs. Distance is defined as the number of blocks between the old head and the new head",
|
||||||
|
Buckets: []float64{1, 2, 4, 8, 16, 32, 64},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
reorgDepth = promauto.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Name: "reorg_depth",
|
||||||
|
Help: "Captures depth of reorgs. Depth is defined as the number of blocks between the head and the common ancestor",
|
||||||
|
Buckets: []float64{1, 2, 4, 8, 16, 32},
|
||||||
|
},
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
// reportSlotMetrics reports slot related metrics.
|
// reportSlotMetrics reports slot related metrics.
|
||||||
func reportSlotMetrics(stateSlot, headSlot, clockSlot types.Slot, finalizedCheckpoint *ethpb.Checkpoint) {
|
func reportSlotMetrics(stateSlot, headSlot, clockSlot primitives.Slot, finalizedCheckpoint *ethpb.Checkpoint) {
|
||||||
clockTimeSlot.Set(float64(clockSlot))
|
clockTimeSlot.Set(float64(clockSlot))
|
||||||
beaconSlot.Set(float64(stateSlot))
|
beaconSlot.Set(float64(stateSlot))
|
||||||
beaconHeadSlot.Set(float64(headSlot))
|
beaconHeadSlot.Set(float64(headSlot))
|
||||||
@@ -177,7 +228,7 @@ func reportSlotMetrics(stateSlot, headSlot, clockSlot types.Slot, finalizedCheck
|
|||||||
|
|
||||||
// reportEpochMetrics reports epoch related metrics.
|
// reportEpochMetrics reports epoch related metrics.
|
||||||
func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconState) error {
|
func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconState) error {
|
||||||
currentEpoch := types.Epoch(postState.Slot() / params.BeaconConfig().SlotsPerEpoch)
|
currentEpoch := primitives.Epoch(postState.Slot() / params.BeaconConfig().SlotsPerEpoch)
|
||||||
|
|
||||||
// Validator instances
|
// Validator instances
|
||||||
pendingInstances := 0
|
pendingInstances := 0
|
||||||
@@ -196,9 +247,9 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
|||||||
slashingEffectiveBalance := uint64(0)
|
slashingEffectiveBalance := uint64(0)
|
||||||
|
|
||||||
for i, validator := range postState.Validators() {
|
for i, validator := range postState.Validators() {
|
||||||
bal, err := postState.BalanceAtIndex(types.ValidatorIndex(i))
|
bal, err := postState.BalanceAtIndex(primitives.ValidatorIndex(i))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Could not load validator balance: %v", err)
|
log.WithError(err).Error("Could not load validator balance")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if validator.Slashed {
|
if validator.Slashed {
|
||||||
@@ -266,9 +317,8 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
|||||||
var b *precompute.Balance
|
var b *precompute.Balance
|
||||||
var v []*precompute.Validator
|
var v []*precompute.Validator
|
||||||
var err error
|
var err error
|
||||||
switch headState.Version() {
|
|
||||||
case version.Phase0:
|
if headState.Version() == version.Phase0 {
|
||||||
// Validator participation should be viewed on the canonical chain.
|
|
||||||
v, b, err = precompute.New(ctx, headState)
|
v, b, err = precompute.New(ctx, headState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -277,7 +327,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case version.Altair, version.Bellatrix:
|
} else if headState.Version() >= version.Altair {
|
||||||
v, b, err = altair.InitializePrecomputeValidators(ctx, headState)
|
v, b, err = altair.InitializePrecomputeValidators(ctx, headState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -286,9 +336,10 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
default:
|
} else {
|
||||||
return errors.Errorf("invalid state type provided: %T", headState.InnerStateUnsafe())
|
return errors.Errorf("invalid state type provided: %T", headState.ToProtoUnsafe())
|
||||||
}
|
}
|
||||||
|
|
||||||
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
|
prevEpochActiveBalances.Set(float64(b.ActivePrevEpoch))
|
||||||
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))
|
prevEpochSourceBalances.Set(float64(b.PrevEpochAttested))
|
||||||
prevEpochTargetBalances.Set(float64(b.PrevEpochTargetAttested))
|
prevEpochTargetBalances.Set(float64(b.PrevEpochTargetAttested))
|
||||||
@@ -302,7 +353,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func reportAttestationInclusion(blk interfaces.BeaconBlock) {
|
func reportAttestationInclusion(blk interfaces.ReadOnlyBeaconBlock) {
|
||||||
for _, att := range blk.Body().Attestations() {
|
for _, att := range blk.Body().Attestations() {
|
||||||
attestationInclusionDelay.Observe(float64(blk.Slot() - att.Data.Slot))
|
attestationInclusionDelay.Observe(float64(blk.Slot() - att.Data.Slot))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,9 +4,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
"github.com/prysmaticlabs/prysm/testing/util"
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
func TestReportEpochMetrics_BadHeadState(t *testing.T) {
|
||||||
|
|||||||
@@ -1,23 +1,23 @@
|
|||||||
package blockchain
|
package blockchain
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testServiceOptsWithDB(t *testing.T) []Option {
|
func testServiceOptsWithDB(t *testing.T) []Option {
|
||||||
beaconDB := testDB.SetupDB(t)
|
beaconDB := testDB.SetupDB(t)
|
||||||
fcs := protoarray.New(0, 0)
|
fcs := doublylinkedtree.New()
|
||||||
|
cs := startup.NewClockSynchronizer()
|
||||||
return []Option{
|
return []Option{
|
||||||
WithDatabase(beaconDB),
|
WithDatabase(beaconDB),
|
||||||
WithStateGen(stategen.New(beaconDB)),
|
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||||
WithForkChoiceStore(fcs),
|
WithForkChoiceStore(fcs),
|
||||||
|
WithClockSynchronizer(cs),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -25,26 +25,6 @@ func testServiceOptsWithDB(t *testing.T) []Option {
|
|||||||
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
|
// in your code path. this is a lightweight way to satisfy the stategen/beacondb
|
||||||
// initialization requirements w/o the overhead of db init.
|
// initialization requirements w/o the overhead of db init.
|
||||||
func testServiceOptsNoDB() []Option {
|
func testServiceOptsNoDB() []Option {
|
||||||
return []Option{
|
cs := startup.NewClockSynchronizer()
|
||||||
withStateBalanceCache(satisfactoryStateBalanceCache()),
|
return []Option{WithClockSynchronizer(cs)}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type mockStateByRooter struct {
|
|
||||||
state state.BeaconState
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ stateByRooter = &mockStateByRooter{}
|
|
||||||
|
|
||||||
func (m mockStateByRooter) StateByRoot(_ context.Context, _ [32]byte) (state.BeaconState, error) {
|
|
||||||
return m.state, m.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns an instance of the state balance cache that can be used
|
|
||||||
// to satisfy the requirement for one in NewService, but which will
|
|
||||||
// always return an error if used.
|
|
||||||
func satisfactoryStateBalanceCache() *stateBalanceCache {
|
|
||||||
err := errors.New("satisfactoryStateBalanceCache doesn't perform real caching")
|
|
||||||
return &stateBalanceCache{stateGen: mockStateByRooter{err: err}}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,78 +0,0 @@
|
|||||||
package blockchain
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewSlot mimics the implementation of `on_tick` in fork choice consensus spec.
|
|
||||||
// It resets the proposer boost root in fork choice, and it updates store's justified checkpoint
|
|
||||||
// if a better checkpoint on the store's finalized checkpoint chain.
|
|
||||||
// This should only be called at the start of every slot interval.
|
|
||||||
//
|
|
||||||
// Spec pseudocode definition:
|
|
||||||
// # Reset store.proposer_boost_root if this is a new slot
|
|
||||||
// if current_slot > previous_slot:
|
|
||||||
// store.proposer_boost_root = Root()
|
|
||||||
//
|
|
||||||
// # Not a new epoch, return
|
|
||||||
// if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0):
|
|
||||||
// return
|
|
||||||
//
|
|
||||||
// # Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
|
||||||
// if store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
|
||||||
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
|
||||||
// ancestor_at_finalized_slot = get_ancestor(store, store.best_justified_checkpoint.root, finalized_slot)
|
|
||||||
// if ancestor_at_finalized_slot == store.finalized_checkpoint.root:
|
|
||||||
// store.justified_checkpoint = store.best_justified_checkpoint
|
|
||||||
func (s *Service) NewSlot(ctx context.Context, slot types.Slot) error {
|
|
||||||
// Reset proposer boost root in fork choice.
|
|
||||||
if err := s.cfg.ForkChoiceStore.ResetBoostedProposerRoot(ctx); err != nil {
|
|
||||||
return errors.Wrap(err, "could not reset boosted proposer root in fork choice")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return if it's not a new epoch.
|
|
||||||
if !slots.IsEpochStart(slot) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
|
|
||||||
bj, err := s.store.BestJustifiedCheckpt()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "could not get best justified checkpoint")
|
|
||||||
}
|
|
||||||
j, err := s.store.JustifiedCheckpt()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "could not get justified checkpoint")
|
|
||||||
}
|
|
||||||
f, err := s.store.FinalizedCheckpt()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
|
||||||
}
|
|
||||||
if bj.Epoch > j.Epoch {
|
|
||||||
finalizedSlot, err := slots.EpochStart(f.Epoch)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r, err := s.ancestor(ctx, bj.Root, finalizedSlot)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if bytes.Equal(r, f.Root) {
|
|
||||||
h, err := s.getPayloadHash(ctx, bj.Root)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.store.SetJustifiedCheckptAndPayloadHash(bj, h)
|
|
||||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(bj); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,129 +0,0 @@
|
|||||||
package blockchain
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain/store"
|
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
|
||||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
|
||||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestService_newSlot(t *testing.T) {
|
|
||||||
beaconDB := testDB.SetupDB(t)
|
|
||||||
fcs := protoarray.New(0, 0)
|
|
||||||
opts := []Option{
|
|
||||||
WithDatabase(beaconDB),
|
|
||||||
WithStateGen(stategen.New(beaconDB)),
|
|
||||||
WithForkChoiceStore(fcs),
|
|
||||||
}
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
genesisStateRoot := [32]byte{}
|
|
||||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(genesis)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
|
||||||
bj, err := genesis.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // genesis
|
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // finalized
|
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // justified
|
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // best justified
|
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // bad
|
|
||||||
|
|
||||||
type args struct {
|
|
||||||
slot types.Slot
|
|
||||||
finalized *ethpb.Checkpoint
|
|
||||||
justified *ethpb.Checkpoint
|
|
||||||
bestJustified *ethpb.Checkpoint
|
|
||||||
shouldEqual bool
|
|
||||||
}
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
args args
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Not epoch boundary. No change",
|
|
||||||
args: args{
|
|
||||||
slot: params.BeaconConfig().SlotsPerEpoch + 1,
|
|
||||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
|
||||||
justified: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
|
||||||
bestJustified: ðpb.Checkpoint{Epoch: 3, Root: bj[:]},
|
|
||||||
shouldEqual: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Justified higher than best justified. No change",
|
|
||||||
args: args{
|
|
||||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
|
||||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
|
||||||
justified: ðpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
|
||||||
bestJustified: ðpb.Checkpoint{Epoch: 2, Root: bj[:]},
|
|
||||||
shouldEqual: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Best justified not on the same chain as finalized. No change",
|
|
||||||
args: args{
|
|
||||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
|
||||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
|
||||||
justified: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
|
||||||
bestJustified: ðpb.Checkpoint{Epoch: 3, Root: bytesutil.PadTo([]byte{'d'}, 32)},
|
|
||||||
shouldEqual: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Best justified on the same chain as finalized. Yes change",
|
|
||||||
args: args{
|
|
||||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
|
||||||
finalized: ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'a'}, 32)},
|
|
||||||
justified: ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'b'}, 32)},
|
|
||||||
bestJustified: ðpb.Checkpoint{Epoch: 3, Root: bj[:]},
|
|
||||||
shouldEqual: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, test := range tests {
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
s := store.New(test.args.justified, test.args.finalized)
|
|
||||||
s.SetBestJustifiedCheckpt(test.args.bestJustified)
|
|
||||||
service.store = s
|
|
||||||
|
|
||||||
require.NoError(t, service.NewSlot(ctx, test.args.slot))
|
|
||||||
if test.args.shouldEqual {
|
|
||||||
bcp, err := service.store.BestJustifiedCheckpt()
|
|
||||||
require.NoError(t, err)
|
|
||||||
cp, err := service.store.JustifiedCheckpt()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.DeepSSZEqual(t, bcp, cp)
|
|
||||||
} else {
|
|
||||||
bcp, err := service.store.BestJustifiedCheckpt()
|
|
||||||
require.NoError(t, err)
|
|
||||||
cp, err := service.store.JustifiedCheckpt()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.DeepNotSSZEqual(t, bcp, cp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,20 +1,22 @@
|
|||||||
package blockchain
|
package blockchain
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/prysmaticlabs/prysm/async/event"
|
"github.com/prysmaticlabs/prysm/v4/async/event"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
|
||||||
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Option func(s *Service) error
|
type Option func(s *Service) error
|
||||||
@@ -44,7 +46,7 @@ func WithDatabase(beaconDB db.HeadAccessDatabase) Option {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithChainStartFetcher to retrieve information about genesis.
|
// WithChainStartFetcher to retrieve information about genesis.
|
||||||
func WithChainStartFetcher(f powchain.ChainStartFetcher) Option {
|
func WithChainStartFetcher(f execution.ChainStartFetcher) Option {
|
||||||
return func(s *Service) error {
|
return func(s *Service) error {
|
||||||
s.cfg.ChainStartFetcher = f
|
s.cfg.ChainStartFetcher = f
|
||||||
return nil
|
return nil
|
||||||
@@ -52,7 +54,7 @@ func WithChainStartFetcher(f powchain.ChainStartFetcher) Option {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithExecutionEngineCaller to call execution engine.
|
// WithExecutionEngineCaller to call execution engine.
|
||||||
func WithExecutionEngineCaller(c powchain.EngineCaller) Option {
|
func WithExecutionEngineCaller(c execution.EngineCaller) Option {
|
||||||
return func(s *Service) error {
|
return func(s *Service) error {
|
||||||
s.cfg.ExecutionEngineCaller = c
|
s.cfg.ExecutionEngineCaller = c
|
||||||
return nil
|
return nil
|
||||||
@@ -99,6 +101,14 @@ func WithSlashingPool(p slashings.PoolManager) Option {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithBLSToExecPool to keep track of BLS to Execution address changes.
|
||||||
|
func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||||
|
return func(s *Service) error {
|
||||||
|
s.cfg.BLSToExecPool = p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||||
return func(s *Service) error {
|
return func(s *Service) error {
|
||||||
@@ -147,13 +157,6 @@ func WithSlasherAttestationsFeed(f *event.Feed) Option {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func withStateBalanceCache(c *stateBalanceCache) Option {
|
|
||||||
return func(s *Service) error {
|
|
||||||
s.justifiedBalances = c
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithFinalizedStateAtStartUp to store finalized state at start up.
|
// WithFinalizedStateAtStartUp to store finalized state at start up.
|
||||||
func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
||||||
return func(s *Service) error {
|
return func(s *Service) error {
|
||||||
@@ -161,3 +164,11 @@ func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
|
||||||
|
return func(s *Service) error {
|
||||||
|
s.clockSetter = gs
|
||||||
|
s.clockWaiter = gs
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -10,48 +10,53 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// validateMergeBlock validates terminal block hash in the event of manual overrides before checking for total difficulty.
|
// validateMergeBlock validates terminal block hash in the event of manual overrides before checking for total difficulty.
|
||||||
//
|
//
|
||||||
// def validate_merge_block(block: BeaconBlock) -> None:
|
// def validate_merge_block(block: ReadOnlyBeaconBlock) -> None:
|
||||||
// if TERMINAL_BLOCK_HASH != Hash32():
|
|
||||||
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
|
||||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
|
||||||
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
|
||||||
// return
|
|
||||||
//
|
//
|
||||||
// pow_block = get_pow_block(block.body.execution_payload.parent_hash)
|
// if TERMINAL_BLOCK_HASH != Hash32():
|
||||||
// # Check if `pow_block` is available
|
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
||||||
// assert pow_block is not None
|
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||||
// pow_parent = get_pow_block(pow_block.parent_hash)
|
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
||||||
// # Check if `pow_parent` is available
|
// return
|
||||||
// assert pow_parent is not None
|
//
|
||||||
// # Check if `pow_block` is a valid terminal PoW block
|
// pow_block = get_pow_block(block.body.execution_payload.parent_hash)
|
||||||
// assert is_valid_terminal_pow_block(pow_block, pow_parent)
|
// # Check if `pow_block` is available
|
||||||
func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBeaconBlock) error {
|
// assert pow_block is not None
|
||||||
if err := wrapper.BeaconBlockIsNil(b); err != nil {
|
// pow_parent = get_pow_block(pow_block.parent_hash)
|
||||||
|
// # Check if `pow_parent` is available
|
||||||
|
// assert pow_parent is not None
|
||||||
|
// # Check if `pow_block` is a valid terminal PoW block
|
||||||
|
// assert is_valid_terminal_pow_block(pow_block, pow_parent)
|
||||||
|
func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) error {
|
||||||
|
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
payload, err := b.Block().Body().ExecutionPayload()
|
payload, err := b.Block().Body().Execution()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if payload == nil {
|
if payload.IsNil() {
|
||||||
return errors.New("nil execution payload")
|
return errors.New("nil execution payload")
|
||||||
}
|
}
|
||||||
if err := validateTerminalBlockHash(b.Block().Slot(), payload); err != nil {
|
ok, err := canUseValidatedTerminalBlockHash(b.Block().Slot(), payload)
|
||||||
|
if err != nil {
|
||||||
return errors.Wrap(err, "could not validate terminal block hash")
|
return errors.Wrap(err, "could not validate terminal block hash")
|
||||||
}
|
}
|
||||||
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash)
|
if ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
mergeBlockParentHash, mergeBlockTD, err := s.getBlkParentHashAndTD(ctx, payload.ParentHash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "could not get merge block parent hash and total difficulty")
|
return errors.Wrap(err, "could not get merge block parent hash and total difficulty")
|
||||||
}
|
}
|
||||||
@@ -66,30 +71,33 @@ func (s *Service) validateMergeBlock(ctx context.Context, b interfaces.SignedBea
|
|||||||
if !valid {
|
if !valid {
|
||||||
err := fmt.Errorf("invalid TTD, configTTD: %s, currentTTD: %s, parentTTD: %s",
|
err := fmt.Errorf("invalid TTD, configTTD: %s, currentTTD: %s, parentTTD: %s",
|
||||||
params.BeaconConfig().TerminalTotalDifficulty, mergeBlockTD, mergeBlockParentTD)
|
params.BeaconConfig().TerminalTotalDifficulty, mergeBlockTD, mergeBlockParentTD)
|
||||||
return invalidBlock{err}
|
return invalidBlock{error: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"slot": b.Block().Slot(),
|
"slot": b.Block().Slot(),
|
||||||
"mergeBlockHash": common.BytesToHash(payload.ParentHash).String(),
|
"mergeBlockHash": common.BytesToHash(payload.ParentHash()).String(),
|
||||||
"mergeBlockParentHash": common.BytesToHash(mergeBlockParentHash).String(),
|
"mergeBlockParentHash": common.BytesToHash(mergeBlockParentHash).String(),
|
||||||
"terminalTotalDifficulty": params.BeaconConfig().TerminalTotalDifficulty,
|
"terminalTotalDifficulty": params.BeaconConfig().TerminalTotalDifficulty,
|
||||||
"mergeBlockTotalDifficulty": mergeBlockTD,
|
"mergeBlockTotalDifficulty": mergeBlockTD,
|
||||||
"mergeBlockParentTotalDifficulty": mergeBlockParentTD,
|
"mergeBlockParentTotalDifficulty": mergeBlockParentTD,
|
||||||
}).Info("Validated terminal block")
|
}).Info("Validated terminal block")
|
||||||
|
|
||||||
|
log.Info(mergeAsciiArt)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBlkParentHashAndTD retrieves the parent hash and total difficulty of the given block.
|
// getBlkParentHashAndTD retrieves the parent hash and total difficulty of the given block.
|
||||||
func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]byte, *uint256.Int, error) {
|
func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]byte, *uint256.Int, error) {
|
||||||
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(blkHash))
|
blk, err := s.cfg.ExecutionEngineCaller.ExecutionBlockByHash(ctx, common.BytesToHash(blkHash), false /* no txs */)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "could not get pow block")
|
return nil, nil, errors.Wrap(err, "could not get pow block")
|
||||||
}
|
}
|
||||||
if blk == nil {
|
if blk == nil {
|
||||||
return nil, nil, errors.New("pow block is nil")
|
return nil, nil, errors.New("pow block is nil")
|
||||||
}
|
}
|
||||||
|
blk.Version = version.Bellatrix
|
||||||
blkTDBig, err := hexutil.DecodeBig(blk.TotalDifficulty)
|
blkTDBig, err := hexutil.DecodeBig(blk.TotalDifficulty)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "could not decode merge block total difficulty")
|
return nil, nil, errors.Wrap(err, "could not decode merge block total difficulty")
|
||||||
@@ -98,35 +106,37 @@ func (s *Service) getBlkParentHashAndTD(ctx context.Context, blkHash []byte) ([]
|
|||||||
if overflows {
|
if overflows {
|
||||||
return nil, nil, errors.New("total difficulty overflows")
|
return nil, nil, errors.New("total difficulty overflows")
|
||||||
}
|
}
|
||||||
return blk.ParentHash, blkTDUint256, nil
|
return blk.ParentHash[:], blkTDUint256, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateTerminalBlockHash validates if the merge block is a valid terminal PoW block.
|
// canUseValidatedTerminalBlockHash validates if the merge block is a valid terminal PoW block.
|
||||||
// spec code:
|
// spec code:
|
||||||
// if TERMINAL_BLOCK_HASH != Hash32():
|
// if TERMINAL_BLOCK_HASH != Hash32():
|
||||||
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
//
|
||||||
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
// # If `TERMINAL_BLOCK_HASH` is used as an override, the activation epoch must be reached.
|
||||||
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
// assert compute_epoch_at_slot(block.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||||
// return
|
// assert block.body.execution_payload.parent_hash == TERMINAL_BLOCK_HASH
|
||||||
func validateTerminalBlockHash(blkSlot types.Slot, payload *enginev1.ExecutionPayload) error {
|
// return
|
||||||
|
func canUseValidatedTerminalBlockHash(blkSlot primitives.Slot, payload interfaces.ExecutionData) (bool, error) {
|
||||||
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
|
if bytesutil.ToBytes32(params.BeaconConfig().TerminalBlockHash.Bytes()) == [32]byte{} {
|
||||||
return nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(blkSlot) {
|
if params.BeaconConfig().TerminalBlockHashActivationEpoch > slots.ToEpoch(blkSlot) {
|
||||||
return errors.New("terminal block hash activation epoch not reached")
|
return false, errors.New("terminal block hash activation epoch not reached")
|
||||||
}
|
}
|
||||||
if !bytes.Equal(payload.ParentHash, params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
if !bytes.Equal(payload.ParentHash(), params.BeaconConfig().TerminalBlockHash.Bytes()) {
|
||||||
return errors.New("parent hash does not match terminal block hash")
|
return false, errors.New("parent hash does not match terminal block hash")
|
||||||
}
|
}
|
||||||
return nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateTerminalBlockDifficulties validates terminal pow block by comparing own total difficulty with parent's total difficulty.
|
// validateTerminalBlockDifficulties validates terminal pow block by comparing own total difficulty with parent's total difficulty.
|
||||||
//
|
//
|
||||||
// def is_valid_terminal_pow_block(block: PowBlock, parent: PowBlock) -> bool:
|
// def is_valid_terminal_pow_block(block: PowBlock, parent: PowBlock) -> bool:
|
||||||
// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
//
|
||||||
// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY
|
// is_total_difficulty_reached = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||||
// return is_total_difficulty_reached and is_parent_total_difficulty_valid
|
// is_parent_total_difficulty_valid = parent.total_difficulty < TERMINAL_TOTAL_DIFFICULTY
|
||||||
|
// return is_total_difficulty_reached and is_parent_total_difficulty_valid
|
||||||
func validateTerminalBlockDifficulties(currentDifficulty *uint256.Int, parentDifficulty *uint256.Int) (bool, error) {
|
func validateTerminalBlockDifficulties(currentDifficulty *uint256.Int, parentDifficulty *uint256.Int) (bool, error) {
|
||||||
b, ok := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
b, ok := new(big.Int).SetString(params.BeaconConfig().TerminalTotalDifficulty, 10)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|||||||
@@ -1,23 +1,20 @@
|
|||||||
package blockchain
|
package blockchain
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
mocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
mocks "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_validTerminalPowBlock(t *testing.T) {
|
func Test_validTerminalPowBlock(t *testing.T) {
|
||||||
@@ -107,25 +104,24 @@ func Test_validateMergeBlock(t *testing.T) {
|
|||||||
cfg.TerminalTotalDifficulty = "2"
|
cfg.TerminalTotalDifficulty = "2"
|
||||||
params.OverrideBeaconConfig(cfg)
|
params.OverrideBeaconConfig(cfg)
|
||||||
|
|
||||||
ctx := context.Background()
|
service, tr := minimalTestService(t)
|
||||||
beaconDB := testDB.SetupDB(t)
|
ctx := tr.ctx
|
||||||
fcs := protoarray.New(0, 0)
|
|
||||||
opts := []Option{
|
|
||||||
WithDatabase(beaconDB),
|
|
||||||
WithStateGen(stategen.New(beaconDB)),
|
|
||||||
WithForkChoiceStore(fcs),
|
|
||||||
}
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||||
service.cfg.ExecutionEngineCaller = engine
|
service.cfg.ExecutionEngineCaller = engine
|
||||||
engine.BlockByHashMap[[32]byte{'a'}] = &enginev1.ExecutionBlock{
|
a := [32]byte{'a'}
|
||||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
b := [32]byte{'b'}
|
||||||
|
mergeBlockParentHash := [32]byte{'3'}
|
||||||
|
engine.BlockByHashMap[a] = &enginev1.ExecutionBlock{
|
||||||
|
Header: gethtypes.Header{
|
||||||
|
ParentHash: b,
|
||||||
|
},
|
||||||
TotalDifficulty: "0x2",
|
TotalDifficulty: "0x2",
|
||||||
}
|
}
|
||||||
engine.BlockByHashMap[[32]byte{'b'}] = &enginev1.ExecutionBlock{
|
engine.BlockByHashMap[b] = &enginev1.ExecutionBlock{
|
||||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
Header: gethtypes.Header{
|
||||||
|
ParentHash: mergeBlockParentHash,
|
||||||
|
},
|
||||||
TotalDifficulty: "0x1",
|
TotalDifficulty: "0x1",
|
||||||
}
|
}
|
||||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||||
@@ -133,33 +129,25 @@ func Test_validateMergeBlock(t *testing.T) {
|
|||||||
Slot: 1,
|
Slot: 1,
|
||||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
ParentHash: a[:],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
bk, err := blocks.NewSignedBeaconBlock(blk)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, service.validateMergeBlock(ctx, b))
|
require.NoError(t, service.validateMergeBlock(ctx, bk))
|
||||||
|
|
||||||
cfg.TerminalTotalDifficulty = "1"
|
cfg.TerminalTotalDifficulty = "1"
|
||||||
params.OverrideBeaconConfig(cfg)
|
params.OverrideBeaconConfig(cfg)
|
||||||
err = service.validateMergeBlock(ctx, b)
|
err = service.validateMergeBlock(ctx, bk)
|
||||||
require.ErrorContains(t, "invalid TTD, configTTD: 1, currentTTD: 2, parentTTD: 1", err)
|
require.ErrorContains(t, "invalid TTD, configTTD: 1, currentTTD: 2, parentTTD: 1", err)
|
||||||
require.Equal(t, true, IsInvalidBlock(err))
|
require.Equal(t, true, IsInvalidBlock(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_getBlkParentHashAndTD(t *testing.T) {
|
func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||||
ctx := context.Background()
|
service, tr := minimalTestService(t)
|
||||||
beaconDB := testDB.SetupDB(t)
|
ctx := tr.ctx
|
||||||
fcs := protoarray.New(0, 0)
|
|
||||||
opts := []Option{
|
|
||||||
WithDatabase(beaconDB),
|
|
||||||
WithStateGen(stategen.New(beaconDB)),
|
|
||||||
WithForkChoiceStore(fcs),
|
|
||||||
}
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||||
service.cfg.ExecutionEngineCaller = engine
|
service.cfg.ExecutionEngineCaller = engine
|
||||||
@@ -167,7 +155,9 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
|||||||
p := [32]byte{'b'}
|
p := [32]byte{'b'}
|
||||||
td := "0x1"
|
td := "0x1"
|
||||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||||
ParentHash: p[:],
|
Header: gethtypes.Header{
|
||||||
|
ParentHash: p,
|
||||||
|
},
|
||||||
TotalDifficulty: td,
|
TotalDifficulty: td,
|
||||||
}
|
}
|
||||||
parentHash, totalDifficulty, err := service.getBlkParentHashAndTD(ctx, h[:])
|
parentHash, totalDifficulty, err := service.getBlkParentHashAndTD(ctx, h[:])
|
||||||
@@ -183,14 +173,18 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
|||||||
require.ErrorContains(t, "pow block is nil", err)
|
require.ErrorContains(t, "pow block is nil", err)
|
||||||
|
|
||||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||||
ParentHash: p[:],
|
Header: gethtypes.Header{
|
||||||
|
ParentHash: p,
|
||||||
|
},
|
||||||
TotalDifficulty: "1",
|
TotalDifficulty: "1",
|
||||||
}
|
}
|
||||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||||
require.ErrorContains(t, "could not decode merge block total difficulty: hex string without 0x prefix", err)
|
require.ErrorContains(t, "could not decode merge block total difficulty: hex string without 0x prefix", err)
|
||||||
|
|
||||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||||
ParentHash: p[:],
|
Header: gethtypes.Header{
|
||||||
|
ParentHash: p,
|
||||||
|
},
|
||||||
TotalDifficulty: "0XFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
|
TotalDifficulty: "0XFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
|
||||||
}
|
}
|
||||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||||
@@ -198,16 +192,39 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Test_validateTerminalBlockHash(t *testing.T) {
|
func Test_validateTerminalBlockHash(t *testing.T) {
|
||||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
wrapped, err := blocks.WrappedExecutionPayload(&enginev1.ExecutionPayload{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
ok, err := canUseValidatedTerminalBlockHash(1, wrapped)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, false, ok)
|
||||||
|
|
||||||
cfg := params.BeaconConfig()
|
cfg := params.BeaconConfig()
|
||||||
cfg.TerminalBlockHash = [32]byte{0x01}
|
cfg.TerminalBlockHash = [32]byte{0x01}
|
||||||
params.OverrideBeaconConfig(cfg)
|
params.OverrideBeaconConfig(cfg)
|
||||||
require.ErrorContains(t, "terminal block hash activation epoch not reached", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
ok, err = canUseValidatedTerminalBlockHash(1, wrapped)
|
||||||
|
require.ErrorContains(t, "terminal block hash activation epoch not reached", err)
|
||||||
|
require.Equal(t, false, ok)
|
||||||
|
|
||||||
cfg.TerminalBlockHashActivationEpoch = 0
|
cfg.TerminalBlockHashActivationEpoch = 0
|
||||||
params.OverrideBeaconConfig(cfg)
|
params.OverrideBeaconConfig(cfg)
|
||||||
require.ErrorContains(t, "parent hash does not match terminal block hash", validateTerminalBlockHash(1, &enginev1.ExecutionPayload{}))
|
ok, err = canUseValidatedTerminalBlockHash(1, wrapped)
|
||||||
|
require.ErrorContains(t, "parent hash does not match terminal block hash", err)
|
||||||
|
require.Equal(t, false, ok)
|
||||||
|
|
||||||
require.NoError(t, validateTerminalBlockHash(1, &enginev1.ExecutionPayload{ParentHash: cfg.TerminalBlockHash.Bytes()}))
|
wrapped, err = blocks.WrappedExecutionPayload(&enginev1.ExecutionPayload{
|
||||||
|
ParentHash: cfg.TerminalBlockHash.Bytes(),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
ok, err = canUseValidatedTerminalBlockHash(1, wrapped)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
|
||||||
|
service, tr := minimalTestService(t)
|
||||||
|
ctx := tr.ctx
|
||||||
|
|
||||||
|
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(ðpb.SignedBeaconBlockBellatrix{}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
blk.SetSlot(1)
|
||||||
|
require.NoError(t, blk.SetExecution(wrapped))
|
||||||
|
require.NoError(t, service.validateMergeBlock(ctx, blk))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,12 +5,11 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
|
||||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -19,24 +18,25 @@ import (
|
|||||||
// The delay is handled by the caller in `processAttestations`.
|
// The delay is handled by the caller in `processAttestations`.
|
||||||
//
|
//
|
||||||
// Spec pseudocode definition:
|
// Spec pseudocode definition:
|
||||||
// def on_attestation(store: Store, attestation: Attestation) -> None:
|
|
||||||
// """
|
|
||||||
// Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
|
|
||||||
//
|
//
|
||||||
// An ``attestation`` that is asserted as invalid may be valid at a later time,
|
// def on_attestation(store: Store, attestation: Attestation) -> None:
|
||||||
// consider scheduling it for later processing in such case.
|
// """
|
||||||
// """
|
// Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
|
||||||
// validate_on_attestation(store, attestation)
|
|
||||||
// store_target_checkpoint_state(store, attestation.data.target)
|
|
||||||
//
|
//
|
||||||
// # Get state at the `target` to fully validate attestation
|
// An ``attestation`` that is asserted as invalid may be valid at a later time,
|
||||||
// target_state = store.checkpoint_states[attestation.data.target]
|
// consider scheduling it for later processing in such case.
|
||||||
// indexed_attestation = get_indexed_attestation(target_state, attestation)
|
// """
|
||||||
// assert is_valid_indexed_attestation(target_state, indexed_attestation)
|
// validate_on_attestation(store, attestation)
|
||||||
|
// store_target_checkpoint_state(store, attestation.data.target)
|
||||||
//
|
//
|
||||||
// # Update latest messages for attesting indices
|
// # Get state at the `target` to fully validate attestation
|
||||||
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
|
// target_state = store.checkpoint_states[attestation.data.target]
|
||||||
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error {
|
// indexed_attestation = get_indexed_attestation(target_state, attestation)
|
||||||
|
// assert is_valid_indexed_attestation(target_state, indexed_attestation)
|
||||||
|
//
|
||||||
|
// # Update latest messages for attesting indices
|
||||||
|
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
|
||||||
|
func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation, disparity time.Duration) error {
|
||||||
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
|
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
@@ -62,7 +62,7 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error
|
|||||||
genesisTime := uint64(s.genesisTime.Unix())
|
genesisTime := uint64(s.genesisTime.Unix())
|
||||||
|
|
||||||
// Verify attestation target is from current epoch or previous epoch.
|
// Verify attestation target is from current epoch or previous epoch.
|
||||||
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Unix()), tgt); err != nil {
|
if err := verifyAttTargetEpoch(ctx, genesisTime, uint64(time.Now().Add(disparity).Unix()), tgt); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -71,11 +71,11 @@ func (s *Service) OnAttestation(ctx context.Context, a *ethpb.Attestation) error
|
|||||||
return errors.Wrap(err, "could not verify attestation beacon block")
|
return errors.Wrap(err, "could not verify attestation beacon block")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note that LMG GHOST and FFG consistency check is ignored because it was performed in sync's validation pipeline:
|
// Note that LMD GHOST and FFG consistency check is ignored because it was performed in sync's validation pipeline:
|
||||||
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
// validate_aggregate_proof.go and validate_beacon_attestation.go
|
||||||
|
|
||||||
// Verify attestations can only affect the fork choice of subsequent slots.
|
// Verify attestations can only affect the fork choice of subsequent slots.
|
||||||
if err := slots.VerifyTime(genesisTime, a.Data.Slot+1, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
|
if err := slots.VerifyTime(genesisTime, a.Data.Slot+1, disparity); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,19 +6,30 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/async"
|
"github.com/prysmaticlabs/prysm/v4/async"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
)
|
)
|
||||||
|
|
||||||
// getAttPreState retrieves the att pre state by either from the cache or the DB.
|
// getAttPreState retrieves the att pre state by either from the cache or the DB.
|
||||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.BeaconState, error) {
|
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
|
||||||
|
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
|
||||||
|
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||||
|
if c.Epoch == headEpoch {
|
||||||
|
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
|
||||||
|
if err == nil && slots.ToEpoch(targetSlot)+1 >= headEpoch {
|
||||||
|
if s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||||
|
return s.HeadStateReadOnly(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
|
// Use a multilock to allow scoped holding of a mutex by a checkpoint root + epoch
|
||||||
// allowing us to behave smarter in terms of how this function is used concurrently.
|
// allowing us to behave smarter in terms of how this function is used concurrently.
|
||||||
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
|
epochKey := strconv.FormatUint(uint64(c.Epoch), 10 /* base 10 */)
|
||||||
@@ -32,7 +43,36 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
|
|||||||
if cachedState != nil && !cachedState.IsNil() {
|
if cachedState != nil && !cachedState.IsNil() {
|
||||||
return cachedState, nil
|
return cachedState, nil
|
||||||
}
|
}
|
||||||
|
// Try the next slot cache for the early epoch calls, this should mostly have been covered already
|
||||||
|
// but is cheap
|
||||||
|
slot, err := slots.EpochStart(c.Epoch)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not compute epoch start")
|
||||||
|
}
|
||||||
|
cachedState = transition.NextSlotState(c.Root, slot)
|
||||||
|
if cachedState != nil && !cachedState.IsNil() {
|
||||||
|
if cachedState.Slot() != slot {
|
||||||
|
cachedState, err = transition.ProcessSlots(ctx, cachedState, slot)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not process slots")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := s.checkpointStateCache.AddCheckpointState(c, cachedState); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
|
||||||
|
}
|
||||||
|
return cachedState, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not process attestations for old non viable checkpoints otherwise
|
||||||
|
ok, err := s.cfg.ForkChoiceStore.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: [32]byte(c.Root), Epoch: c.Epoch})
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "could not check checkpoint condition in forkchoice")
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Wrap(ErrNotCheckpoint, fmt.Sprintf("epoch %d root %#x", c.Epoch, c.Root))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to state regeneration.
|
||||||
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
baseState, err := s.cfg.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(c.Root))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
|
return nil, errors.Wrapf(err, "could not get pre state for epoch %d", c.Epoch)
|
||||||
@@ -55,14 +95,13 @@ func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (stat
|
|||||||
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
|
return nil, errors.Wrap(err, "could not save checkpoint state to cache")
|
||||||
}
|
}
|
||||||
return baseState, nil
|
return baseState, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
// verifyAttTargetEpoch validates attestation is from the current or previous epoch.
|
||||||
func verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
func verifyAttTargetEpoch(_ context.Context, genesisTime, nowTime uint64, c *ethpb.Checkpoint) error {
|
||||||
currentSlot := types.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
|
currentSlot := primitives.Slot((nowTime - genesisTime) / params.BeaconConfig().SecondsPerSlot)
|
||||||
currentEpoch := slots.ToEpoch(currentSlot)
|
currentEpoch := slots.ToEpoch(currentSlot)
|
||||||
var prevEpoch types.Epoch
|
var prevEpoch primitives.Epoch
|
||||||
// Prevents previous epoch under flow
|
// Prevents previous epoch under flow
|
||||||
if currentEpoch > 1 {
|
if currentEpoch > 1 {
|
||||||
prevEpoch = currentEpoch - 1
|
prevEpoch = currentEpoch - 1
|
||||||
@@ -80,7 +119,7 @@ func (s *Service) verifyBeaconBlock(ctx context.Context, data *ethpb.Attestation
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := wrapper.BeaconBlockIsNil(b); err != nil {
|
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if b.Block().Slot() > data.Slot {
|
if b.Block().Slot() > data.Slot {
|
||||||
|
|||||||
@@ -5,52 +5,44 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
|
||||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
|
||||||
"github.com/prysmaticlabs/prysm/testing/require"
|
|
||||||
"github.com/prysmaticlabs/prysm/testing/util"
|
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||||
ctx := context.Background()
|
service, tr := minimalTestService(t)
|
||||||
beaconDB := testDB.SetupDB(t)
|
ctx, beaconDB := tr.ctx, tr.db
|
||||||
|
|
||||||
opts := []Option{
|
_, err := blockTree1(t, beaconDB, []byte{'g'})
|
||||||
WithDatabase(beaconDB),
|
|
||||||
WithForkChoiceStore(protoarray.New(0, 0)),
|
|
||||||
WithStateGen(stategen.New(beaconDB)),
|
|
||||||
}
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
blkWithoutState := util.NewBeaconBlock()
|
||||||
require.NoError(t, err)
|
blkWithoutState.Block.Slot = 0
|
||||||
|
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
|
||||||
|
|
||||||
BlkWithOutState := util.NewBeaconBlock()
|
cp := ðpb.Checkpoint{}
|
||||||
BlkWithOutState.Block.Slot = 0
|
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(BlkWithOutState)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
|
||||||
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
|
|
||||||
BlkWithStateBadAtt := util.NewBeaconBlock()
|
blkWithStateBadAtt := util.NewBeaconBlock()
|
||||||
BlkWithStateBadAtt.Block.Slot = 1
|
blkWithStateBadAtt.Block.Slot = 1
|
||||||
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithStateBadAtt)
|
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
cp = ðpb.Checkpoint{Root: r[:]}
|
||||||
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
|
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
|
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
|
||||||
|
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
s, err := util.NewBeaconState()
|
s, err := util.NewBeaconState()
|
||||||
@@ -58,13 +50,11 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
|||||||
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
|
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
||||||
|
|
||||||
BlkWithValidState := util.NewBeaconBlock()
|
blkWithValidState := util.NewBeaconBlock()
|
||||||
BlkWithValidState.Block.Slot = 2
|
blkWithValidState.Block.Slot = 32
|
||||||
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithValidState)
|
util.SaveBlock(t, ctx, beaconDB, blkWithValidState)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
|
||||||
|
|
||||||
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
|
blkWithValidStateRoot, err := blkWithValidState.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
s, err = util.NewBeaconState()
|
s, err = util.NewBeaconState()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -74,7 +64,11 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
|||||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
|
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, blkWithValidStateRoot))
|
||||||
|
|
||||||
|
service.head = &head{
|
||||||
|
state: st,
|
||||||
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -86,11 +80,6 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
|||||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
||||||
wantedErr: "slot 32 does not match target epoch 0",
|
wantedErr: "slot 32 does not match target epoch 0",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "no pre state for attestations's target block",
|
|
||||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
|
||||||
wantedErr: "could not get pre state for epoch 0",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "process attestation doesn't match current epoch",
|
name: "process attestation doesn't match current epoch",
|
||||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
||||||
@@ -124,7 +113,7 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
err := service.OnAttestation(ctx, tt.a)
|
err := service.OnAttestation(ctx, tt.a, 0)
|
||||||
if tt.wantedErr != "" {
|
if tt.wantedErr != "" {
|
||||||
assert.ErrorContains(t, tt.wantedErr, err)
|
assert.ErrorContains(t, tt.wantedErr, err)
|
||||||
} else {
|
} else {
|
||||||
@@ -134,158 +123,10 @@ func TestStore_OnAttestation_ErrorConditions_ProtoArray(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStore_OnAttestation_ErrorConditions_DoublyLinkedTree(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
beaconDB := testDB.SetupDB(t)
|
|
||||||
|
|
||||||
opts := []Option{
|
|
||||||
WithDatabase(beaconDB),
|
|
||||||
WithForkChoiceStore(doublylinkedtree.New(0, 0)),
|
|
||||||
WithStateGen(stategen.New(beaconDB)),
|
|
||||||
}
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = blockTree1(t, beaconDB, []byte{'g'})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
BlkWithOutState := util.NewBeaconBlock()
|
|
||||||
BlkWithOutState.Block.Slot = 0
|
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(BlkWithOutState)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
|
||||||
BlkWithOutStateRoot, err := BlkWithOutState.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
BlkWithStateBadAtt := util.NewBeaconBlock()
|
|
||||||
BlkWithStateBadAtt.Block.Slot = 1
|
|
||||||
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithStateBadAtt)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
|
||||||
BlkWithStateBadAttRoot, err := BlkWithStateBadAtt.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
s, err := util.NewBeaconState()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, s.SetSlot(100*params.BeaconConfig().SlotsPerEpoch))
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithStateBadAttRoot))
|
|
||||||
|
|
||||||
BlkWithValidState := util.NewBeaconBlock()
|
|
||||||
BlkWithValidState.Block.Slot = 2
|
|
||||||
wsb, err = wrapper.WrappedSignedBeaconBlock(BlkWithValidState)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
|
||||||
|
|
||||||
BlkWithValidStateRoot, err := BlkWithValidState.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
s, err = util.NewBeaconState()
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = s.SetFork(ðpb.Fork{
|
|
||||||
Epoch: 0,
|
|
||||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
|
||||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, BlkWithValidStateRoot))
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
a *ethpb.Attestation
|
|
||||||
wantedErr string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "attestation's data slot not aligned with target vote",
|
|
||||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}),
|
|
||||||
wantedErr: "slot 32 does not match target epoch 0",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no pre state for attestations's target block",
|
|
||||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: BlkWithOutStateRoot[:]}}}),
|
|
||||||
wantedErr: "could not get pre state for epoch 0",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "process attestation doesn't match current epoch",
|
|
||||||
a: util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 100 * params.BeaconConfig().SlotsPerEpoch, Target: ðpb.Checkpoint{Epoch: 100,
|
|
||||||
Root: BlkWithStateBadAttRoot[:]}}}),
|
|
||||||
wantedErr: "target epoch 100 does not match current epoch",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "process nil attestation",
|
|
||||||
a: nil,
|
|
||||||
wantedErr: "attestation can't be nil",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "process nil field (a.Data) in attestation",
|
|
||||||
a: ðpb.Attestation{},
|
|
||||||
wantedErr: "attestation's data can't be nil",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "process nil field (a.Target) in attestation",
|
|
||||||
a: ðpb.Attestation{
|
|
||||||
Data: ðpb.AttestationData{
|
|
||||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
|
||||||
Target: nil,
|
|
||||||
Source: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
|
||||||
},
|
|
||||||
AggregationBits: make([]byte, 1),
|
|
||||||
Signature: make([]byte, 96),
|
|
||||||
},
|
|
||||||
wantedErr: "attestation's target can't be nil",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
err := service.OnAttestation(ctx, tt.a)
|
|
||||||
if tt.wantedErr != "" {
|
|
||||||
assert.ErrorContains(t, tt.wantedErr, err)
|
|
||||||
} else {
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
beaconDB := testDB.SetupDB(t)
|
|
||||||
|
|
||||||
fcs := protoarray.New(0, 0)
|
|
||||||
opts := []Option{
|
|
||||||
WithDatabase(beaconDB),
|
|
||||||
WithStateGen(stategen.New(beaconDB)),
|
|
||||||
WithForkChoiceStore(fcs),
|
|
||||||
}
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
|
||||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
|
||||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
|
||||||
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
|
|
||||||
copied := genesisState.Copy()
|
|
||||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
|
||||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||||
ctx := context.Background()
|
service, tr := minimalTestService(t)
|
||||||
beaconDB := testDB.SetupDB(t)
|
ctx := tr.ctx
|
||||||
|
|
||||||
fcs := doublylinkedtree.New(0, 0)
|
|
||||||
opts := []Option{
|
|
||||||
WithDatabase(beaconDB),
|
|
||||||
WithStateGen(stategen.New(beaconDB)),
|
|
||||||
WithForkChoiceStore(fcs),
|
|
||||||
}
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||||
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
|
||||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||||
@@ -296,22 +137,17 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
|||||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||||
|
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||||
|
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStore_SaveCheckpointState(t *testing.T) {
|
func TestStore_SaveCheckpointState(t *testing.T) {
|
||||||
ctx := context.Background()
|
service, tr := minimalTestService(t)
|
||||||
beaconDB := testDB.SetupDB(t)
|
ctx := tr.ctx
|
||||||
|
|
||||||
opts := []Option{
|
|
||||||
WithDatabase(beaconDB),
|
|
||||||
WithStateGen(stategen.New(beaconDB)),
|
|
||||||
}
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
s, err := util.NewBeaconState()
|
s, err := util.NewBeaconState()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -328,16 +164,13 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
|||||||
r := [32]byte{'g'}
|
r := [32]byte{'g'}
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
|
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
|
||||||
|
|
||||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
|
||||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
|
||||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
|
|
||||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
|
||||||
|
|
||||||
r = bytesutil.ToBytes32([]byte{'A'})
|
|
||||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}
|
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
|
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
|
||||||
|
|
||||||
|
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||||
s1, err := service.getAttPreState(ctx, cp1)
|
s1, err := service.getAttPreState(ctx, cp1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
|
assert.Equal(t, 1*params.BeaconConfig().SlotsPerEpoch, s1.Slot(), "Unexpected state slot")
|
||||||
@@ -345,8 +178,17 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
|||||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}
|
cp2 := ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
|
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'B'})))
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
|
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength)}))
|
||||||
|
|
||||||
s2, err := service.getAttPreState(ctx, cp2)
|
s2, err := service.getAttPreState(ctx, cp2)
|
||||||
|
require.ErrorContains(t, "epoch 2 root 0x4200000000000000000000000000000000000000000000000000000000000000: not a checkpoint in forkchoice", err)
|
||||||
|
|
||||||
|
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, cp2, cp2)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||||
|
|
||||||
|
s2, err = service.getAttPreState(ctx, cp2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||||
|
|
||||||
s1, err = service.getAttPreState(ctx, cp1)
|
s1, err = service.getAttPreState(ctx, cp1)
|
||||||
@@ -362,33 +204,33 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
|||||||
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
assert.Equal(t, 2*params.BeaconConfig().SlotsPerEpoch, s2.Slot(), "Unexpected state slot")
|
||||||
|
|
||||||
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
|
require.NoError(t, s.SetSlot(params.BeaconConfig().SlotsPerEpoch+1))
|
||||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
|
||||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
|
||||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'b'})
|
|
||||||
service.store.SetPrevFinalizedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
|
||||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
|
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
|
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
|
||||||
|
st, root, err = prepareForkchoiceState(ctx, 31, [32]byte(cp3.Root), [32]byte(cp2.Root), [32]byte{'P'}, cp2, cp2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||||
|
|
||||||
s3, err := service.getAttPreState(ctx, cp3)
|
s3, err := service.getAttPreState(ctx, cp3)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
|
assert.Equal(t, s.Slot(), s3.Slot(), "Unexpected state slot")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStore_UpdateCheckpointState(t *testing.T) {
|
func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||||
ctx := context.Background()
|
service, tr := minimalTestService(t)
|
||||||
beaconDB := testDB.SetupDB(t)
|
ctx := tr.ctx
|
||||||
|
|
||||||
opts := []Option{
|
|
||||||
WithDatabase(beaconDB),
|
|
||||||
WithStateGen(stategen.New(beaconDB)),
|
|
||||||
}
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
epoch := types.Epoch(1)
|
|
||||||
baseState, _ := util.DeterministicGenesisState(t, 1)
|
baseState, _ := util.DeterministicGenesisState(t, 1)
|
||||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("hi"), fieldparams.RootLength)}
|
|
||||||
|
epoch := primitives.Epoch(1)
|
||||||
|
blk := util.NewBeaconBlock()
|
||||||
|
r1, err := blk.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: r1[:]}
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||||
|
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
|
||||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
|
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
|
||||||
@@ -398,8 +240,16 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
|||||||
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
|
assert.Equal(t, returned.Slot(), cached.Slot(), "State should have been cached")
|
||||||
|
|
||||||
epoch = 2
|
epoch = 2
|
||||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: bytesutil.PadTo([]byte("bye"), fieldparams.RootLength)}
|
blk = util.NewBeaconBlock()
|
||||||
|
blk.Block.Slot = 64
|
||||||
|
r2, err := blk.Block.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: r2[:]}
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||||
|
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||||
|
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
|
||||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
s, err := slots.EpochStart(newCheckpoint.Epoch)
|
s, err := slots.EpochStart(newCheckpoint.Epoch)
|
||||||
@@ -410,7 +260,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
|||||||
|
|
||||||
cached, err = service.checkpointStateCache.StateByCheckpoint(newCheckpoint)
|
cached, err = service.checkpointStateCache.StateByCheckpoint(newCheckpoint)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.DeepSSZEqual(t, returned.InnerStateUnsafe(), cached.InnerStateUnsafe())
|
require.DeepSSZEqual(t, returned.ToProtoUnsafe(), cached.ToProtoUnsafe())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
func TestAttEpoch_MatchPrevEpoch(t *testing.T) {
|
||||||
@@ -454,9 +304,7 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) {
|
|||||||
|
|
||||||
b := util.NewBeaconBlock()
|
b := util.NewBeaconBlock()
|
||||||
b.Block.Slot = 2
|
b.Block.Slot = 2
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
|
||||||
r, err := b.Block.HashTreeRoot()
|
r, err := b.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
d := ðpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
|
d := ðpb.AttestationData{Slot: 1, BeaconBlockRoot: r[:]}
|
||||||
@@ -473,9 +321,7 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
|
|||||||
|
|
||||||
b := util.NewBeaconBlock()
|
b := util.NewBeaconBlock()
|
||||||
b.Block.Slot = 2
|
b.Block.Slot = 2
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
|
||||||
r, err := b.Block.HashTreeRoot()
|
r, err := b.Block.HashTreeRoot()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
d := ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
|
d := ðpb.AttestationData{Slot: 2, BeaconBlockRoot: r[:]}
|
||||||
@@ -483,135 +329,21 @@ func TestVerifyBeaconBlock_OK(t *testing.T) {
|
|||||||
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
|
assert.NoError(t, service.verifyBeaconBlock(ctx, d), "Did not receive the wanted error")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestVerifyFinalizedConsistency_InconsistentRoot_ProtoArray(t *testing.T) {
|
func TestGetAttPreState_HeadState(t *testing.T) {
|
||||||
ctx := context.Background()
|
service, tr := minimalTestService(t)
|
||||||
beaconDB := testDB.SetupDB(t)
|
ctx := tr.ctx
|
||||||
|
baseState, _ := util.DeterministicGenesisState(t, 1)
|
||||||
|
|
||||||
fcs := protoarray.New(0, 0)
|
epoch := primitives.Epoch(1)
|
||||||
opts := []Option{
|
blk := util.NewBeaconBlock()
|
||||||
WithDatabase(beaconDB),
|
r1, err := blk.Block.HashTreeRoot()
|
||||||
WithStateGen(stategen.New(beaconDB)),
|
|
||||||
WithForkChoiceStore(fcs),
|
|
||||||
}
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: r1[:]}
|
||||||
b32 := util.NewBeaconBlock()
|
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||||
b32.Block.Slot = 32
|
require.NoError(t, transition.UpdateNextSlotCache(ctx, checkpoint.Root, baseState))
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
|
_, err = service.getAttPreState(ctx, checkpoint)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
st, err := service.checkpointStateCache.StateByCheckpoint(checkpoint)
|
||||||
r32, err := b32.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
|
||||||
b33 := util.NewBeaconBlock()
|
|
||||||
b33.Block.Slot = 33
|
|
||||||
b33.Block.ParentRoot = r32[:]
|
|
||||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
|
||||||
r33, err := b33.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
|
||||||
require.ErrorContains(t, "Root and finalized store are not consistent", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVerifyFinalizedConsistency_InconsistentRoot_DoublyLinkedTree(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
beaconDB := testDB.SetupDB(t)
|
|
||||||
|
|
||||||
fcs := doublylinkedtree.New(0, 0)
|
|
||||||
opts := []Option{
|
|
||||||
WithDatabase(beaconDB),
|
|
||||||
WithStateGen(stategen.New(beaconDB)),
|
|
||||||
WithForkChoiceStore(fcs),
|
|
||||||
}
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
b32 := util.NewBeaconBlock()
|
|
||||||
b32.Block.Slot = 32
|
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
|
||||||
r32, err := b32.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Epoch: 1}, [32]byte{})
|
|
||||||
b33 := util.NewBeaconBlock()
|
|
||||||
b33.Block.Slot = 33
|
|
||||||
b33.Block.ParentRoot = r32[:]
|
|
||||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
|
||||||
r33, err := b33.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
|
||||||
require.ErrorContains(t, "Root and finalized store are not consistent", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVerifyFinalizedConsistency_OK(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
opts := testServiceOptsWithDB(t)
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
b32 := util.NewBeaconBlock()
|
|
||||||
b32.Block.Slot = 32
|
|
||||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b32)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
|
||||||
r32, err := b32.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
|
|
||||||
|
|
||||||
b33 := util.NewBeaconBlock()
|
|
||||||
b33.Block.Slot = 33
|
|
||||||
b33.Block.ParentRoot = r32[:]
|
|
||||||
wsb, err = wrapper.WrappedSignedBeaconBlock(b33)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
|
||||||
r33, err := b33.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
opts := testServiceOptsWithDB(t)
|
|
||||||
service, err := NewService(ctx, opts...)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
b32 := util.NewBeaconBlock()
|
|
||||||
b32.Block.Slot = 32
|
|
||||||
r32, err := b32.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r32[:], Epoch: 1}, [32]byte{})
|
|
||||||
|
|
||||||
b33 := util.NewBeaconBlock()
|
|
||||||
b33.Block.Slot = 33
|
|
||||||
b33.Block.ParentRoot = r32[:]
|
|
||||||
r33, err := b33.Block.HashTreeRoot()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
state, blkRoot, err := prepareForkchoiceState(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
|
||||||
state, blkRoot, err = prepareForkchoiceState(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, 0, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
|
||||||
|
|
||||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, r32, []uint64{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = service.VerifyFinalizedConsistency(context.Background(), r33[:])
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, params.BeaconConfig().SlotsPerEpoch, st.Slot())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,29 +6,28 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prysmaticlabs/prysm/async/event"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||||
coreTime "github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||||
"github.com/prysmaticlabs/prysm/config/features"
|
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||||
"github.com/prysmaticlabs/prysm/config/params"
|
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/forks/bellatrix"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
|
||||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||||
"github.com/prysmaticlabs/prysm/monitoring/tracing"
|
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
|
||||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation"
|
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/prysmaticlabs/prysm/time/slots"
|
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -46,58 +45,60 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
|
|||||||
// computation in this method and methods it calls into.
|
// computation in this method and methods it calls into.
|
||||||
//
|
//
|
||||||
// Spec pseudocode definition:
|
// Spec pseudocode definition:
|
||||||
// def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
|
||||||
// block = signed_block.message
|
|
||||||
// # Parent block must be known
|
|
||||||
// assert block.parent_root in store.block_states
|
|
||||||
// # Make a copy of the state to avoid mutability issues
|
|
||||||
// pre_state = copy(store.block_states[block.parent_root])
|
|
||||||
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
|
|
||||||
// assert get_current_slot(store) >= block.slot
|
|
||||||
//
|
//
|
||||||
// # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
|
// def on_block(store: Store, signed_block: ReadOnlySignedBeaconBlock) -> None:
|
||||||
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
// block = signed_block.message
|
||||||
// assert block.slot > finalized_slot
|
// # Parent block must be known
|
||||||
// # Check block is a descendant of the finalized block at the checkpoint finalized slot
|
// assert block.parent_root in store.block_states
|
||||||
// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
// # Make a copy of the state to avoid mutability issues
|
||||||
|
// pre_state = copy(store.block_states[block.parent_root])
|
||||||
|
// # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
|
||||||
|
// assert get_current_slot(store) >= block.slot
|
||||||
//
|
//
|
||||||
// # Check the block is valid and compute the post-state
|
// # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
|
||||||
// state = pre_state.copy()
|
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||||
// state_transition(state, signed_block, True)
|
// assert block.slot > finalized_slot
|
||||||
// # Add new block to the store
|
// # Check block is a descendant of the finalized block at the checkpoint finalized slot
|
||||||
// store.blocks[hash_tree_root(block)] = block
|
// assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||||
// # Add new state for this block to the store
|
|
||||||
// store.block_states[hash_tree_root(block)] = state
|
|
||||||
//
|
//
|
||||||
// # Update justified checkpoint
|
// # Check the block is valid and compute the post-state
|
||||||
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
// state = pre_state.copy()
|
||||||
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
// state_transition(state, signed_block, True)
|
||||||
// store.best_justified_checkpoint = state.current_justified_checkpoint
|
// # Add new block to the store
|
||||||
// if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
|
// store.blocks[hash_tree_root(block)] = block
|
||||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
// # Add new state for this block to the store
|
||||||
|
// store.block_states[hash_tree_root(block)] = state
|
||||||
//
|
//
|
||||||
// # Update finalized checkpoint
|
// # Update justified checkpoint
|
||||||
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||||
// store.finalized_checkpoint = state.finalized_checkpoint
|
// if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
||||||
|
// store.best_justified_checkpoint = state.current_justified_checkpoint
|
||||||
|
// if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
|
||||||
|
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||||
//
|
//
|
||||||
// # Potentially update justified if different from store
|
// # Update finalized checkpoint
|
||||||
// if store.justified_checkpoint != state.current_justified_checkpoint:
|
// if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||||
// # Update justified if new justified is later than store justified
|
// store.finalized_checkpoint = state.finalized_checkpoint
|
||||||
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
|
||||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
|
||||||
// return
|
|
||||||
//
|
//
|
||||||
// # Update justified if store justified is not in chain with finalized checkpoint
|
// # Potentially update justified if different from store
|
||||||
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
// if store.justified_checkpoint != state.current_justified_checkpoint:
|
||||||
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
|
// # Update justified if new justified is later than store justified
|
||||||
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
|
// if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||||
// store.justified_checkpoint = state.current_justified_checkpoint
|
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||||
func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlock, blockRoot [32]byte) error {
|
// return
|
||||||
|
//
|
||||||
|
// # Update justified if store justified is not in chain with finalized checkpoint
|
||||||
|
// finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||||
|
// ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
|
||||||
|
// if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
|
||||||
|
// store.justified_checkpoint = state.current_justified_checkpoint
|
||||||
|
func (s *Service) onBlock(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
|
||||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
|
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
if err := wrapper.BeaconBlockIsNil(signed); err != nil {
|
if err := consensusblocks.BeaconBlockIsNil(signed); err != nil {
|
||||||
return invalidBlock{err}
|
return invalidBlock{error: err}
|
||||||
}
|
}
|
||||||
|
startTime := time.Now()
|
||||||
b := signed.Block()
|
b := signed.Block()
|
||||||
|
|
||||||
preState, err := s.getBlockPreState(ctx, b)
|
preState, err := s.getBlockPreState(ctx, b)
|
||||||
@@ -105,33 +106,53 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify that the parent block is in forkchoice
|
||||||
|
if !s.cfg.ForkChoiceStore.HasNode(b.ParentRoot()) {
|
||||||
|
return ErrNotDescendantOfFinalized
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save current justified and finalized epochs for future use.
|
||||||
|
currStoreJustifiedEpoch := s.cfg.ForkChoiceStore.JustifiedCheckpoint().Epoch
|
||||||
|
currStoreFinalizedEpoch := s.cfg.ForkChoiceStore.FinalizedCheckpoint().Epoch
|
||||||
|
preStateFinalizedEpoch := preState.FinalizedCheckpoint().Epoch
|
||||||
|
preStateJustifiedEpoch := preState.CurrentJustifiedCheckpoint().Epoch
|
||||||
|
|
||||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
stateTransitionStartTime := time.Now()
|
||||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return invalidBlock{err}
|
return invalidBlock{error: err}
|
||||||
}
|
}
|
||||||
|
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
|
||||||
|
|
||||||
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
|
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
|
isValidPayload, err := s.notifyNewPayload(ctx, postStateVersion, postStateHeader, signed)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not verify new payload: %v", err)
|
return errors.Wrap(err, "could not validate new payload")
|
||||||
}
|
}
|
||||||
if isValidPayload {
|
if isValidPayload {
|
||||||
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
|
if err := s.validateMergeTransitionBlock(ctx, preStateVersion, preStateHeader, signed); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := s.insertBlockAndAttestationsToForkChoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
|
||||||
|
if err := s.insertBlockToForkchoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
|
||||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
|
||||||
}
|
}
|
||||||
|
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
|
||||||
|
return errors.Wrap(err, "could not handle block's attestations")
|
||||||
|
}
|
||||||
|
|
||||||
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
|
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
|
||||||
if isValidPayload {
|
if isValidPayload {
|
||||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||||
@@ -139,18 +160,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// We add a proposer score boost to fork choice for the block root if applicable, right after
|
|
||||||
// running a successful state transition for the block.
|
|
||||||
secondsIntoSlot := uint64(time.Since(s.genesisTime).Seconds()) % params.BeaconConfig().SecondsPerSlot
|
|
||||||
if err := s.cfg.ForkChoiceStore.BoostProposerRoot(ctx, &forkchoicetypes.ProposerBoostRootArgs{
|
|
||||||
BlockRoot: blockRoot,
|
|
||||||
BlockSlot: signed.Block().Slot(),
|
|
||||||
CurrentSlot: slots.SinceGenesis(s.genesisTime),
|
|
||||||
SecondsIntoSlot: secondsIntoSlot,
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If slasher is configured, forward the attestations in the block via
|
// If slasher is configured, forward the attestations in the block via
|
||||||
// an event feed for processing.
|
// an event feed for processing.
|
||||||
if features.Get().EnableSlasher {
|
if features.Get().EnableSlasher {
|
||||||
@@ -178,70 +187,45 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update justified check point.
|
justified := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||||
justified, err := s.store.JustifiedCheckpt()
|
start := time.Now()
|
||||||
if err != nil {
|
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||||
return errors.Wrap(err, "could not get justified checkpoint")
|
|
||||||
}
|
|
||||||
currJustifiedEpoch := justified.Epoch
|
|
||||||
psj := postState.CurrentJustifiedCheckpoint()
|
|
||||||
if psj == nil {
|
|
||||||
return errNilJustifiedCheckpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
if psj.Epoch > currJustifiedEpoch {
|
|
||||||
if err := s.updateJustified(ctx, postState); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
finalized, err := s.store.FinalizedCheckpt()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
|
||||||
}
|
|
||||||
if finalized == nil {
|
|
||||||
return errNilFinalizedInStore
|
|
||||||
}
|
|
||||||
psf := postState.FinalizedCheckpoint()
|
|
||||||
if psf == nil {
|
|
||||||
return errNilFinalizedCheckpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
newFinalized := psf.Epoch > finalized.Epoch
|
|
||||||
if newFinalized {
|
|
||||||
s.store.SetPrevFinalizedCheckpt(finalized)
|
|
||||||
h, err := s.getPayloadHash(ctx, psf.Root)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.store.SetFinalizedCheckptAndPayloadHash(psf, h)
|
|
||||||
s.store.SetPrevJustifiedCheckpt(justified)
|
|
||||||
h, err = s.getPayloadHash(ctx, psj.Root)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
|
|
||||||
// Update Forkchoice checkpoints
|
|
||||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(psj); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(psf); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
|
|
||||||
if err != nil {
|
|
||||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
|
|
||||||
return errors.Wrap(err, msg)
|
|
||||||
}
|
|
||||||
headRoot, err := s.updateHead(ctx, balances)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithError(err).Warn("Could not update head")
|
log.WithError(err).Warn("Could not update head")
|
||||||
}
|
}
|
||||||
s.notifyEngineIfChangedHead(ctx, headRoot)
|
if blockRoot != headRoot {
|
||||||
|
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||||
|
if err != nil {
|
||||||
|
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
|
||||||
|
}
|
||||||
|
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
|
||||||
|
if err != nil {
|
||||||
|
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
|
||||||
|
}
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
|
||||||
|
"receivedWeight": receivedWeight,
|
||||||
|
"headRoot": fmt.Sprintf("%#x", headRoot),
|
||||||
|
"headWeight": headWeight,
|
||||||
|
}).Debug("Head block is not the received block")
|
||||||
|
} else {
|
||||||
|
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
|
||||||
|
go func() {
|
||||||
|
// Use a custom deadline here, since this method runs asynchronously.
|
||||||
|
// We ignore the parent method's context and instead create a new one
|
||||||
|
// with a custom deadline, therefore using the background context instead.
|
||||||
|
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||||
|
defer cancel()
|
||||||
|
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
|
||||||
|
log.WithError(err).Debug("could not update next slot state cache")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||||
|
|
||||||
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
|
// verify conditions for FCU, notifies FCU, and saves the new head.
|
||||||
|
// This function also prunes attestations, other similar operations happen in prunePostBlockOperationPools.
|
||||||
|
if _, err := s.forkchoiceUpdateWithExecution(ctx, headRoot, s.CurrentSlot()+1); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -256,46 +240,36 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
|
|
||||||
go func() {
|
|
||||||
// Use a custom deadline here, since this method runs asynchronously.
|
|
||||||
// We ignore the parent method's context and instead create a new one
|
|
||||||
// with a custom deadline, therefore using the background context instead.
|
|
||||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
|
||||||
defer cancel()
|
|
||||||
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
|
|
||||||
log.WithError(err).Debug("could not update next slot state cache")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Save justified check point to db.
|
// Save justified check point to db.
|
||||||
if postState.CurrentJustifiedCheckpoint().Epoch > currJustifiedEpoch {
|
postStateJustifiedEpoch := postState.CurrentJustifiedCheckpoint().Epoch
|
||||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, postState.CurrentJustifiedCheckpoint()); err != nil {
|
if justified.Epoch > currStoreJustifiedEpoch || (justified.Epoch == postStateJustifiedEpoch && justified.Epoch > preStateJustifiedEpoch) {
|
||||||
|
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{
|
||||||
|
Epoch: justified.Epoch, Root: justified.Root[:],
|
||||||
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update finalized check point.
|
// Save finalized check point to db and more.
|
||||||
if newFinalized {
|
postStateFinalizedEpoch := postState.FinalizedCheckpoint().Epoch
|
||||||
if err := s.updateFinalized(ctx, postState.FinalizedCheckpoint()); err != nil {
|
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||||
|
if finalized.Epoch > currStoreFinalizedEpoch || (finalized.Epoch == postStateFinalizedEpoch && finalized.Epoch > preStateFinalizedEpoch) {
|
||||||
|
if err := s.updateFinalized(ctx, ðpb.Checkpoint{Epoch: finalized.Epoch, Root: finalized.Root[:]}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fRoot := bytesutil.ToBytes32(postState.FinalizedCheckpoint().Root)
|
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(finalized.Root)
|
||||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
|
|
||||||
return errors.Wrap(err, "could not prune fork choice nodes")
|
|
||||||
}
|
|
||||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
// Send an event regarding the new finalized checkpoint over a common event feed.
|
// Send an event regarding the new finalized checkpoint over a common event feed.
|
||||||
|
stateRoot := signed.Block().StateRoot()
|
||||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||||
Type: statefeed.FinalizedCheckpoint,
|
Type: statefeed.FinalizedCheckpoint,
|
||||||
Data: ðpbv1.EventFinalizedCheckpoint{
|
Data: ðpbv1.EventFinalizedCheckpoint{
|
||||||
Epoch: postState.FinalizedCheckpoint().Epoch,
|
Epoch: postState.FinalizedCheckpoint().Epoch,
|
||||||
Block: postState.FinalizedCheckpoint().Root,
|
Block: postState.FinalizedCheckpoint().Root,
|
||||||
State: signed.Block().StateRoot(),
|
State: stateRoot[:],
|
||||||
ExecutionOptimistic: isOptimistic,
|
ExecutionOptimistic: isOptimistic,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
@@ -305,23 +279,24 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
|||||||
// with a custom deadline, therefore using the background context instead.
|
// with a custom deadline, therefore using the background context instead.
|
||||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err := s.insertFinalizedDeposits(depCtx, fRoot); err != nil {
|
if err := s.insertFinalizedDeposits(depCtx, finalized.Root); err != nil {
|
||||||
log.WithError(err).Error("Could not insert finalized deposits.")
|
log.WithError(err).Error("Could not insert finalized deposits.")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
defer reportAttestationInclusion(b)
|
defer reportAttestationInclusion(b)
|
||||||
|
if err := s.handleEpochBoundary(ctx, postState); err != nil {
|
||||||
return s.handleEpochBoundary(ctx, postState)
|
return err
|
||||||
|
}
|
||||||
|
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getStateVersionAndPayload(st state.BeaconState) (int, *ethpb.ExecutionPayloadHeader, error) {
|
func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionData, error) {
|
||||||
if st == nil {
|
if st == nil {
|
||||||
return 0, nil, errors.New("nil state")
|
return 0, nil, errors.New("nil state")
|
||||||
}
|
}
|
||||||
var preStateHeader *ethpb.ExecutionPayloadHeader
|
var preStateHeader interfaces.ExecutionData
|
||||||
var err error
|
var err error
|
||||||
preStateVersion := st.Version()
|
preStateVersion := st.Version()
|
||||||
switch preStateVersion {
|
switch preStateVersion {
|
||||||
@@ -335,7 +310,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, *ethpb.ExecutionPaylo
|
|||||||
return preStateVersion, preStateHeader, nil
|
return preStateVersion, preStateHeader, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeaconBlock,
|
func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySignedBeaconBlock,
|
||||||
blockRoots [][32]byte) error {
|
blockRoots [][32]byte) error {
|
||||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
@@ -348,8 +323,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
|||||||
return errWrongBlockCount
|
return errWrongBlockCount
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := wrapper.BeaconBlockIsNil(blks[0]); err != nil {
|
if err := consensusblocks.BeaconBlockIsNil(blks[0]); err != nil {
|
||||||
return invalidBlock{err}
|
return invalidBlock{error: err}
|
||||||
}
|
}
|
||||||
b := blks[0].Block()
|
b := blks[0].Block()
|
||||||
|
|
||||||
@@ -357,7 +332,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
|||||||
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
if err := s.verifyBlkPreState(ctx, b); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, bytesutil.ToBytes32(b.ParentRoot()))
|
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.ParentRoot())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -372,14 +347,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
|||||||
|
|
||||||
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||||
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||||
sigSet := &bls.SignatureBatch{
|
sigSet := bls.NewSet()
|
||||||
Signatures: [][]byte{},
|
|
||||||
PublicKeys: []bls.PublicKey{},
|
|
||||||
Messages: [][32]byte{},
|
|
||||||
}
|
|
||||||
type versionAndHeader struct {
|
type versionAndHeader struct {
|
||||||
version int
|
version int
|
||||||
header *ethpb.ExecutionPayloadHeader
|
header interfaces.ExecutionData
|
||||||
}
|
}
|
||||||
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
||||||
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
||||||
@@ -397,7 +368,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
|||||||
|
|
||||||
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return invalidBlock{err}
|
return invalidBlock{error: err}
|
||||||
}
|
}
|
||||||
// Save potential boundary states.
|
// Save potential boundary states.
|
||||||
if slots.IsEpochStart(preState.Slot()) {
|
if slots.IsEpochStart(preState.Slot()) {
|
||||||
@@ -416,9 +387,15 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
|||||||
}
|
}
|
||||||
sigSet.Join(set)
|
sigSet.Join(set)
|
||||||
}
|
}
|
||||||
verify, err := sigSet.Verify()
|
|
||||||
|
var verify bool
|
||||||
|
if features.Get().EnableVerboseSigVerification {
|
||||||
|
verify, err = sigSet.VerifyVerbosely()
|
||||||
|
} else {
|
||||||
|
verify, err = sigSet.Verify()
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return invalidBlock{err}
|
return invalidBlock{error: err}
|
||||||
}
|
}
|
||||||
if !verify {
|
if !verify {
|
||||||
return errors.New("batch block signature verification failed")
|
return errors.New("batch block signature verification failed")
|
||||||
@@ -441,46 +418,59 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||||
JustifiedEpoch: jCheckpoints[i].Epoch,
|
JustifiedCheckpoint: jCheckpoints[i],
|
||||||
FinalizedEpoch: fCheckpoints[i].Epoch}
|
FinalizedCheckpoint: fCheckpoints[i]}
|
||||||
pendingNodes[len(blks)-i-1] = args
|
pendingNodes[len(blks)-i-1] = args
|
||||||
s.saveInitSyncBlock(blockRoots[i], b)
|
if err := s.saveInitSyncBlock(ctx, blockRoots[i], b); err != nil {
|
||||||
if err = s.handleBlockAfterBatchVerify(ctx, b, blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
|
||||||
tracing.AnnotateError(span, err)
|
tracing.AnnotateError(span, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||||
// Insert all nodes but the last one to forkchoice
|
Slot: b.Block().Slot(),
|
||||||
if err := s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes); err != nil {
|
Root: blockRoots[i][:],
|
||||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
}); err != nil {
|
||||||
}
|
tracing.AnnotateError(span, err)
|
||||||
// Insert the last block to forkchoice
|
return err
|
||||||
lastBR := blockRoots[len(blks)-1]
|
}
|
||||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
|
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
|
||||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
|
||||||
}
|
tracing.AnnotateError(span, err)
|
||||||
// Prune forkchoice store
|
return err
|
||||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(fCheckpoints[len(blks)-1].Root))); err != nil {
|
}
|
||||||
return errors.Wrap(err, "could not prune fork choice nodes")
|
}
|
||||||
}
|
if i > 0 && fCheckpoints[i].Epoch > fCheckpoints[i-1].Epoch {
|
||||||
|
if err := s.updateFinalized(ctx, fCheckpoints[i]); err != nil {
|
||||||
// Set their optimistic status
|
tracing.AnnotateError(span, err)
|
||||||
if isValidPayload {
|
return err
|
||||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, lastBR); err != nil {
|
}
|
||||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Save boundary states that will be useful for forkchoice
|
||||||
for r, st := range boundaries {
|
for r, st := range boundaries {
|
||||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Also saves the last post state which to be used as pre state for the next batch.
|
// Also saves the last post state which to be used as pre state for the next batch.
|
||||||
lastB := blks[len(blks)-1]
|
lastBR := blockRoots[len(blks)-1]
|
||||||
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
if err := s.cfg.StateGen.SaveState(ctx, lastBR, preState); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Insert all nodes but the last one to forkchoice
|
||||||
|
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
|
||||||
|
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||||
|
}
|
||||||
|
// Insert the last block to forkchoice
|
||||||
|
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
|
||||||
|
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||||
|
}
|
||||||
|
// Set their optimistic status
|
||||||
|
if isValidPayload {
|
||||||
|
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, lastBR); err != nil {
|
||||||
|
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastB := blks[len(blks)-1]
|
||||||
arg := ¬ifyForkchoiceUpdateArg{
|
arg := ¬ifyForkchoiceUpdateArg{
|
||||||
headState: preState,
|
headState: preState,
|
||||||
headRoot: lastBR,
|
headRoot: lastBR,
|
||||||
@@ -492,66 +482,12 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.SignedBeac
|
|||||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState)
|
return s.saveHeadNoDB(ctx, lastB, lastBR, preState)
|
||||||
}
|
}
|
||||||
|
|
||||||
// handles a block after the block's batch has been verified, where we can save blocks
|
|
||||||
// their state summaries and split them off to relative hot/cold storage.
|
|
||||||
func (s *Service) handleBlockAfterBatchVerify(ctx context.Context, signed interfaces.SignedBeaconBlock,
|
|
||||||
blockRoot [32]byte, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
|
||||||
|
|
||||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
|
||||||
Slot: signed.Block().Slot(),
|
|
||||||
Root: blockRoot[:],
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rate limit how many blocks (2 epochs worth of blocks) a node keeps in the memory.
|
|
||||||
if uint64(len(s.getInitSyncBlocks())) > initialSyncBlockCacheSize {
|
|
||||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.clearInitSyncBlocks()
|
|
||||||
}
|
|
||||||
|
|
||||||
justified, err := s.store.JustifiedCheckpt()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "could not get justified checkpoint")
|
|
||||||
}
|
|
||||||
if jCheckpoint.Epoch > justified.Epoch {
|
|
||||||
if err := s.updateJustifiedInitSync(ctx, jCheckpoint); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
finalized, err := s.store.FinalizedCheckpt()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
|
||||||
}
|
|
||||||
if finalized == nil {
|
|
||||||
return errNilFinalizedInStore
|
|
||||||
}
|
|
||||||
// Update finalized check point. Prune the block cache and helper caches on every new finalized epoch.
|
|
||||||
if fCheckpoint.Epoch > finalized.Epoch {
|
|
||||||
if err := s.updateFinalized(ctx, fCheckpoint); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.store.SetPrevFinalizedCheckpt(finalized)
|
|
||||||
h, err := s.getPayloadHash(ctx, fCheckpoint.Root)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.store.SetFinalizedCheckptAndPayloadHash(fCheckpoint, h)
|
|
||||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fCheckpoint); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Epoch boundary bookkeeping such as logging epoch summaries.
|
// Epoch boundary bookkeeping such as logging epoch summaries.
|
||||||
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState) error {
|
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState) error {
|
||||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
|
var err error
|
||||||
if postState.Slot()+1 == s.nextEpochBoundarySlot {
|
if postState.Slot()+1 == s.nextEpochBoundarySlot {
|
||||||
copied := postState.Copy()
|
copied := postState.Copy()
|
||||||
copied, err := transition.ProcessSlots(ctx, copied, copied.Slot()+1)
|
copied, err := transition.ProcessSlots(ctx, copied, copied.Slot()+1)
|
||||||
@@ -566,43 +502,52 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if postState.Slot() >= s.nextEpochBoundarySlot {
|
} else if postState.Slot() >= s.nextEpochBoundarySlot {
|
||||||
s.headLock.RLock()
|
|
||||||
st := s.head.state
|
|
||||||
s.headLock.RUnlock()
|
|
||||||
if err := reportEpochMetrics(ctx, postState, st); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
|
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update caches at epoch boundary slot.
|
// Update caches at epoch boundary slot.
|
||||||
// The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1.
|
// The following updates have shortcut to return nil cheaply if fulfilled during boundary slot - 1.
|
||||||
if err := helpers.UpdateCommitteeCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
|
if err := helpers.UpdateCommitteeCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
|
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
headSt, err := s.HeadState(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This feeds in the block and block's attestations to fork choice store. It's allows fork choice store
|
// This feeds in the block to fork choice store. It's allows fork choice store
|
||||||
// to gain information on the most current chain.
|
// to gain information on the most current chain.
|
||||||
func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte, st state.BeaconState) error {
|
func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, root [32]byte, st state.BeaconState) error {
|
||||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockAndAttestationsToForkChoiceStore")
|
ctx, span := trace.StartSpan(ctx, "blockChain.insertBlockToForkchoiceStore")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
fCheckpoint := st.FinalizedCheckpoint()
|
if !s.cfg.ForkChoiceStore.HasNode(blk.ParentRoot()) {
|
||||||
jCheckpoint := st.CurrentJustifiedCheckpoint()
|
fCheckpoint := st.FinalizedCheckpoint()
|
||||||
if err := s.insertBlockToForkChoiceStore(ctx, blk, root, st, fCheckpoint, jCheckpoint); err != nil {
|
jCheckpoint := st.CurrentJustifiedCheckpoint()
|
||||||
return err
|
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
|
||||||
|
// to gain information on the most current chain.
|
||||||
|
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
|
||||||
// Feed in block's attestations to fork choice store.
|
// Feed in block's attestations to fork choice store.
|
||||||
for _, a := range blk.Body().Attestations() {
|
for _, a := range blk.Body().Attestations() {
|
||||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
|
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
|
||||||
@@ -613,32 +558,31 @@ func (s *Service) insertBlockAndAttestationsToForkChoiceStore(ctx context.Contex
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, bytesutil.ToBytes32(a.Data.BeaconBlockRoot), a.Data.Target.Epoch)
|
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||||
|
if s.cfg.ForkChoiceStore.HasNode(r) {
|
||||||
|
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.Data.Target.Epoch)
|
||||||
|
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk interfaces.BeaconBlock, root [32]byte, st state.BeaconState, fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blk, fCheckpoint, jCheckpoint); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inserts attester slashing indices to fork choice store.
|
|
||||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||||
|
// This function requires a write lock on forkchoice.
|
||||||
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {
|
||||||
for _, slashing := range slashings {
|
for _, slashing := range slashings {
|
||||||
indices := blocks.SlashableAttesterIndices(slashing)
|
indices := blocks.SlashableAttesterIndices(slashing)
|
||||||
for _, index := range indices {
|
for _, index := range indices {
|
||||||
s.ForkChoicer().InsertSlashedIndex(ctx, types.ValidatorIndex(index))
|
s.cfg.ForkChoiceStore.InsertSlashedIndex(ctx, primitives.ValidatorIndex(index))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
|
// This saves post state info to DB or cache. This also saves post state info to fork choice store.
|
||||||
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
|
// Post state info consists of processed block and state. Do not call this method unless the block and state are verified.
|
||||||
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock, st state.BeaconState) error {
|
func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interfaces.ReadOnlySignedBeaconBlock, st state.BeaconState) error {
|
||||||
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
|
ctx, span := trace.StartSpan(ctx, "blockChain.savePostStateInfo")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil {
|
if err := s.cfg.BeaconDB.SaveBlock(ctx, b); err != nil {
|
||||||
@@ -650,22 +594,9 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This removes the attestations from the mem pool. It will only remove the attestations if input root `r` is canonical,
|
// This removes the attestations in block `b` from the attestation mem pool.
|
||||||
// meaning the block `b` is part of the canonical chain.
|
func (s *Service) pruneAttsFromPool(headBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||||
func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b interfaces.SignedBeaconBlock) error {
|
atts := headBlock.Block().Body().Attestations()
|
||||||
if !features.Get().CorrectlyPruneCanonicalAtts {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
canonical, err := s.IsCanonical(ctx, r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !canonical {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
atts := b.Block().Body().Attestations()
|
|
||||||
for _, att := range atts {
|
for _, att := range atts {
|
||||||
if helpers.IsAggregated(att) {
|
if helpers.IsAggregated(att) {
|
||||||
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
|
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
|
||||||
@@ -681,18 +612,22 @@ func (s *Service) pruneCanonicalAttsFromPool(ctx context.Context, r [32]byte, b
|
|||||||
}
|
}
|
||||||
|
|
||||||
// validateMergeTransitionBlock validates the merge transition block.
|
// validateMergeTransitionBlock validates the merge transition block.
|
||||||
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader *ethpb.ExecutionPayloadHeader, blk interfaces.SignedBeaconBlock) error {
|
func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion int, stateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) error {
|
||||||
// Skip validation if block is older than Bellatrix.
|
// Skip validation if block is older than Bellatrix.
|
||||||
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
|
if blocks.IsPreBellatrixVersion(blk.Block().Version()) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip validation if block has an empty payload.
|
// Skip validation if block has an empty payload.
|
||||||
payload, err := blk.Block().Body().ExecutionPayload()
|
payload, err := blk.Block().Body().Execution()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return invalidBlock{err}
|
return invalidBlock{error: err}
|
||||||
}
|
}
|
||||||
if bellatrix.IsEmptyPayload(payload) {
|
isEmpty, err := consensusblocks.IsEmptyExecutionData(payload)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if isEmpty {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -703,11 +638,12 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Skip validation if the block is not a merge transition block.
|
// Skip validation if the block is not a merge transition block.
|
||||||
atTransition, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(stateHeader, blk.Block().Body())
|
// To reach here. The payload must be non-empty. If the state header is empty then it's at transition.
|
||||||
|
empty, err := consensusblocks.IsEmptyExecutionData(stateHeader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "could not check if merge block is terminal")
|
return err
|
||||||
}
|
}
|
||||||
if !atTransition {
|
if !empty {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return s.validateMergeBlock(ctx, blk)
|
return s.validateMergeBlock(ctx, blk)
|
||||||
@@ -715,40 +651,20 @@ func (s *Service) validateMergeTransitionBlock(ctx context.Context, stateVersion
|
|||||||
|
|
||||||
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
|
// This routine checks if there is a cached proposer payload ID available for the next slot proposer.
|
||||||
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
|
// If there is not, it will call forkchoice updated with the correct payload attribute then cache the payload ID.
|
||||||
func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *event.Feed) {
|
func (s *Service) spawnLateBlockTasksLoop() {
|
||||||
// Wait for state to be initialized.
|
|
||||||
stateChannel := make(chan *feed.Event, 1)
|
|
||||||
stateSub := stateFeed.Subscribe(stateChannel)
|
|
||||||
go func() {
|
go func() {
|
||||||
select {
|
_, err := s.clockWaiter.WaitForClock(s.ctx)
|
||||||
case <-s.ctx.Done():
|
if err != nil {
|
||||||
stateSub.Unsubscribe()
|
log.WithError(err).Error("spawnLateBlockTasksLoop encountered an error waiting for initialization")
|
||||||
return
|
return
|
||||||
case <-stateChannel:
|
|
||||||
stateSub.Unsubscribe()
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
|
||||||
ticker := time.NewTicker(time.Second)
|
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
|
||||||
defer ticker.Stop()
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case ti := <-ticker.C:
|
case <-ticker.C():
|
||||||
if !atHalfSlot(ti) {
|
s.lateBlockTasks(s.ctx)
|
||||||
continue
|
|
||||||
}
|
|
||||||
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot() + 1)
|
|
||||||
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
|
|
||||||
if has && id == [8]byte{} {
|
|
||||||
if _, err := s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
|
||||||
headState: s.headState(ctx),
|
|
||||||
headRoot: s.headRoot(),
|
|
||||||
headBlock: s.headBlock().Block(),
|
|
||||||
}); err != nil {
|
|
||||||
log.WithError(err).Error("Could not prepare payload on empty ID")
|
|
||||||
}
|
|
||||||
missedPayloadIDFilledCount.Inc()
|
|
||||||
}
|
|
||||||
case <-s.ctx.Done():
|
case <-s.ctx.Done():
|
||||||
log.Debug("Context closed, exiting routine")
|
log.Debug("Context closed, exiting routine")
|
||||||
return
|
return
|
||||||
@@ -757,8 +673,47 @@ func (s *Service) fillMissingPayloadIDRoutine(ctx context.Context, stateFeed *ev
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if time `t` is halfway through the slot in sec.
|
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||||
func atHalfSlot(t time.Time) bool {
|
// related to late blocks. It emits a MissedSlot state feed event.
|
||||||
s := params.BeaconConfig().SecondsPerSlot
|
// It calls FCU and sets the right attributes if we are proposing next slot
|
||||||
return uint64(t.Second())%s == s/2
|
// it also updates the next slot cache to deal with skipped slots.
|
||||||
|
func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||||
|
if s.CurrentSlot() == s.HeadSlot() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||||
|
Type: statefeed.MissedSlot,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Head root should be empty when retrieving proposer index for the next slot.
|
||||||
|
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
|
||||||
|
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.
|
||||||
|
if (!has && !features.Get().PrepareAllPayloads) || id != [8]byte{} {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.headLock.RLock()
|
||||||
|
headBlock, err := s.headBlock()
|
||||||
|
if err != nil {
|
||||||
|
s.headLock.RUnlock()
|
||||||
|
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
headRoot := s.headRoot()
|
||||||
|
headState := s.headState(ctx)
|
||||||
|
s.headLock.RUnlock()
|
||||||
|
_, err = s.notifyForkchoiceUpdate(ctx, ¬ifyForkchoiceUpdateArg{
|
||||||
|
headState: headState,
|
||||||
|
headRoot: headRoot,
|
||||||
|
headBlock: headBlock.Block(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||||
|
}
|
||||||
|
lastRoot, lastState := transition.LastCachedState()
|
||||||
|
if lastState == nil {
|
||||||
|
lastRoot, lastState = headRoot[:], headState
|
||||||
|
}
|
||||||
|
if err = transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||||
|
log.WithError(err).Debug("could not update next slot state cache")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user