mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
101 Commits
v1.0.0-bet
...
v1.0.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fc7c6776f6 | ||
|
|
b150acffca | ||
|
|
54a42ce4a8 | ||
|
|
9d174d5927 | ||
|
|
1b1b36497f | ||
|
|
04615cb97b | ||
|
|
b243665d3e | ||
|
|
654ef1afe5 | ||
|
|
0dbf5c4e63 | ||
|
|
c456dcdce8 | ||
|
|
10857223d0 | ||
|
|
4ef8a0f99e | ||
|
|
af0977b8d0 | ||
|
|
528272fc66 | ||
|
|
c5c868d0a6 | ||
|
|
622ab94465 | ||
|
|
ae8a619775 | ||
|
|
36b1eb66d5 | ||
|
|
639dcb028c | ||
|
|
edb40ddea4 | ||
|
|
0d2e3d978c | ||
|
|
29c6a0c42c | ||
|
|
aefa3e191a | ||
|
|
987205afc6 | ||
|
|
3ae4b793e6 | ||
|
|
600427bdb0 | ||
|
|
c7dd33431f | ||
|
|
0cf9800b75 | ||
|
|
3cc2ebc5d5 | ||
|
|
2cb814648a | ||
|
|
dc897a2007 | ||
|
|
a051e684ae | ||
|
|
64be627a6d | ||
|
|
6f766ed583 | ||
|
|
b0dfc46603 | ||
|
|
8c3faaa4c7 | ||
|
|
4c0db8bca4 | ||
|
|
0c5c246ee7 | ||
|
|
f871e1f3ef | ||
|
|
658dd95313 | ||
|
|
57fe012bc2 | ||
|
|
d62420b940 | ||
|
|
11bbea2562 | ||
|
|
2172cd60a6 | ||
|
|
2a546cc50b | ||
|
|
7d0031ee77 | ||
|
|
acf49fb38f | ||
|
|
26658a9f1f | ||
|
|
60d99c83eb | ||
|
|
98557e8f5e | ||
|
|
1ba747b3c9 | ||
|
|
71ec919306 | ||
|
|
34a26740ff | ||
|
|
7e76b02bb7 | ||
|
|
519b003fc3 | ||
|
|
9a10462c64 | ||
|
|
ac60ff2bc2 | ||
|
|
f8a855d168 | ||
|
|
74c7733abf | ||
|
|
f63e89813d | ||
|
|
c021e2e8bc | ||
|
|
c3fc40907d | ||
|
|
3fb78ff575 | ||
|
|
7bd97546f0 | ||
|
|
5140ceec68 | ||
|
|
97ad5cd5fd | ||
|
|
4dc65c5787 | ||
|
|
1b012ccfa5 | ||
|
|
60cdd69b05 | ||
|
|
90a66df529 | ||
|
|
c4a1fe4d0d | ||
|
|
8a256de2dd | ||
|
|
c3451a6ce9 | ||
|
|
4b6441f626 | ||
|
|
eb7ab16f92 | ||
|
|
e6ecda5ebe | ||
|
|
095c4d5dd5 | ||
|
|
59d63087b1 | ||
|
|
e1dd532af3 | ||
|
|
cfed4fa1b5 | ||
|
|
7735a083b2 | ||
|
|
fec469291e | ||
|
|
acb47f2920 | ||
|
|
925fba0570 | ||
|
|
1a72733c53 | ||
|
|
2976bf7723 | ||
|
|
7c54cfea3f | ||
|
|
d3f8599d19 | ||
|
|
2034c662af | ||
|
|
ad5151f25d | ||
|
|
f75a8efc0d | ||
|
|
39817c0586 | ||
|
|
168cffb0dd | ||
|
|
194ee7c439 | ||
|
|
5889670cc7 | ||
|
|
7449eba612 | ||
|
|
d85cf028ef | ||
|
|
71c6164c42 | ||
|
|
83601245f2 | ||
|
|
758ec96d6d | ||
|
|
977e539fe9 |
218
.well-known/security.pub
Normal file
218
.well-known/security.pub
Normal file
@@ -0,0 +1,218 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQINBFpZUO0BEAC/tqN6QctJKSOF+A7jrBzvNvs6rVEi9Hn55u/scKNIPkSRJRTy
|
||||
8kVsDMha+EKXka8oQiVM+sHoHMIMCa3tdicm1/k5f67X8dqadQmFP4DhYRYIKCKT
|
||||
TkntNG568ynf/yUs/YY9Ce8H17JvgytkLK50mxMxycUlYREMaRPMR8Wt1Arrd9QT
|
||||
U0I2cZemeqUORKuiVuZjj/7BVDHvSXHvzpi5zKI86sJQTnzcuGqyNsrUU4n4sYrb
|
||||
I+0TfEHzmSdxoAXSaMYjomPLmbaSdBiK/CeNKF8xuFCKJRd8wOUe5FcIN3DCdk0e
|
||||
yoFye5RMC+W09Ro+tK/jTQ/sTUuNLJm0VUlddQQeoGlhQdLLwiU4PJqcyeL4KaN1
|
||||
l8cVml7xr1CdemhGV4wmEqAgxnNFN5mKnS2KcDaHxRz7MoGNdgVVQuxxaE0+OsdJ
|
||||
zCKtA12Q/OcR24qYVFg5+O+bUNhy23mqIxx0HiQ0DsK+IDvcLLWqta0aP9wd9wxG
|
||||
eObh9WkCxELTLg8xlbe0d7R3juaRjBLdD5d3UyjqGh+7zUflMsFhpUPraNXdKzvm
|
||||
AqT25cveadM7q/CNzFXeCwmKjZab8Jic8VB80KcikmX6y9eGOHwjFixBogxowlBq
|
||||
3KpeNTqJMNHYBzEb0V18P8huKVO0SMfg11Z1/nU4NA4lcjiVImb5xnJS0wARAQAB
|
||||
tCxQcmVzdG9uIFZhbiBMb29uIDxwcmVzdG9uQHByeXNtYXRpY2xhYnMuY29tPokC
|
||||
NwQTAQgAIQUCWuZ7uwIbAwULCQgHAgYVCAkKCwIEFgIDAQIeAQIXgAAKCRBy4z5N
|
||||
8aUDbtchD/4hQDTj7qsccQKpaL8furw+OWuoZtX38smO9LrcSoLaZlk5NRxFNlhQ
|
||||
FaL6iyDLoDkWUXEPg94x1dMFBUfMdIuh6iEF2pcvCN6WG3Pd2j2kG6OxaIx+rI7a
|
||||
eaD2Wds312etYATu+7AI6pxlQVqPeZi6kZVvXo8r9qzEnl1nifo7d3L4ut6FerDz
|
||||
/ptvIuhW++BEgksILkwMA44U7owTllkZ5wSbXRJer0bI/jLPLTaRETVMgWmF5L2n
|
||||
PWKhBnhBXf/P00zTHVoba0peJ/RGdzlb1JZH+uCQk0wGBl0rBMUmiihIB8DZBZDX
|
||||
5prwMdoHG9qAT9SuJ8ZOjPKaBHVVVw4JOU6rX2+p9E49CVATsdoPfWVbNyVRGi5f
|
||||
Oo0bJPZU3uO10Q09CIeyqT6JPDCZYS7po2bpfFjQTDkoIv0uPWOsV5l3cvFxVlcD
|
||||
Pvir4669xhujmoVBOrp18mn/W/rMft2TJ84inp0seKSKdwBUeEyIZwwu1YTorFe4
|
||||
bgJghDu/Y+K4y0wq7rpPimoiXSoJOaaIPrOzsKAVu20zJB4eoulXlPwHexix8wwf
|
||||
xeVH4bGl3wtTFWKja0+EQVdxpt+uUlABvrheQbNlNGz5ROOAtjvwASI3YW/jVVaG
|
||||
wSbOUuMEHfC1rSnj5Y9fN6FbilxJAZ2UbvZE6iXqqemfRdFuB5yedbQmUHJlc3Rv
|
||||
biBWYW4gTG9vbiA8cHJlc3RvbjkwQGdtYWlsLmNvbT6JAjcEEwEIACEFAlqrvbMC
|
||||
GwMFCwkIBwIGFQgJCgsCBBYCAwECHgECF4AACgkQcuM+TfGlA269zg/9FynobfLE
|
||||
Vh+Aid7l5C6pprHflHpdNVhdHzjvosLKfPMCcBJLyCLAIueKC0vvKbt5kyNV6Adl
|
||||
6fibdO/vRrNsEdmKXTdfDnN03yVX+VO54rqyuweR09CbLHakECh5awWpk0x9E/Gc
|
||||
3MlU8YDCVNPcWAw8JPZZ0y6s50/MFFXya+/opw45neH6WXrbqbIapzZ4V1bhkloF
|
||||
TTz7LYp1dk6qjus/f1jHgujclJ6FazG+OqCpzJ22lnuwNYs8sv1DclW+lEUfIin5
|
||||
PvzcSRCCd6NilFRSGzfBhM7wxZrAp0JzWXpM1jmd2WHtgErusTaTXRTATjPf9Chg
|
||||
SE9UT3EJvJ5fPxuxm+qOAowpJwe8Irv+YuMzL8C6C2BhZIV8XID3/cErNeQbPocj
|
||||
QFksmBEwpe9EG3Yhd5SmSXRhCtBFnyFKyOqPpAE2s+U0b7qsnWc50UFbIytPEm4C
|
||||
YmdNL6IVilp/+LYFcRWWK/ppOtvtgbj8r7+Foidi/tCauJGt2BzhWH7DkLEdXGxk
|
||||
PfR/rTgyuAQZowl03DaNwAPKegY2SIuROO9kpQACwTWZNg4l2yrZv09qrKBhohyn
|
||||
b2i4pwHPZ5/bWLikdzENcJuvbH5qhf6tV0FIaNGbTkWX00uI++bvWAxxE25jZv56
|
||||
e7VTaWuMt8Ly4DBcwl5JyWuvZ74+yv7QGp20WlByZXN0b24gVmFuIExvb24gKDB4
|
||||
ZjcxRTlDNzY2Q2RmMTY5ZURGYkUyNzQ5NDkwOTQzQzFEQzZiOEE1NSkgPHByZXN0
|
||||
b25AbWFjaGluZXBvd2VyZWQuY29tPokCNwQTAQgAIQUCWllQ7QIbAwULCQgHAgYV
|
||||
CAkKCwIEFgIDAQIeAQIXgAAKCRBy4z5N8aUDbmvdD/4jkT1IXJyj65sgHus0HHYC
|
||||
MBp6mzvtpDcPljg/5Nl1WXydv4zYGnEnIwWIqDYwwvokOKllxAjgevcplHqZa0cb
|
||||
7XKCFk5h+56FLPHv9e0WK1fOFeo2ad3imNRstSHgaumGAWHJMg9vWbDisGv1zjQ0
|
||||
usFkrMKoX34YzMt8KBD7IuQyeBkYNupT8EfByeA9Ta+wkvS+X6BojsFB1UWDAFCY
|
||||
z8RgTcWIFjWtGZwIkWUKCzebqXbElpJF8ckZU9q4qVhyZ2DT+8BS/Lm0Sf4z6VTC
|
||||
4EN10ZuN+F+J3DMR2Zuudp4X5OUGkDG4KuU/kvj6EJRWpbTS1D9ReK7hoApIbV72
|
||||
Um6Mf7kC9EKvxgL1gOfl4aj3Io9M0R8FT/0WSMduQzf/jI3XqdNH0nYo2kTDdZm8
|
||||
S972cyvt6frKYy6RsOlPz1+S61iCmupsRY+HyKDTa0vnpXg2c4dE1neF5eMI6SVw
|
||||
viJvCG2cflctJ2PLiINZJYAg87gV5Rm1i/Lt2YG5zdxAXXJt2R0uuU9fv4DcHZLU
|
||||
yx69Yuh+5UiEaXYU7xoRCdZJYyHbvaC2hPcDGnEa3K1QbgnI5hGAtG3cwdbYC5e3
|
||||
bcaJb/LdwzkdRnHLgpxa/UTAIfejEI1U2kvVvNoe/HvEXq/OPrhFDvE4rW8AzbX+
|
||||
ISTWWRY0lLSr8/SD0TDJMbkBDQRam2geAQgA0kESOibOO3CcXrHtqP9lPGmj6rVe
|
||||
G18fRmPDJiWtx863QqJeByuuUKwrGkPW/sqtIa5Ano+iXVHpk7m955nRjBmz4gd8
|
||||
xqSGZd9XpNObYPA5iirAO8ztpgQvuvsHH9y9Ge50NnR7hQSMUbGVeCUU/vljwT60
|
||||
/U+UPnsTAmkhvkEI72x50l5Ih9ihcBcr5jJpi08XktE3sFOoannac0kZQJ6WXFrY
|
||||
4ILbv8fVqcRf44xMKOlFB9qHhleGW0H9ZUjTKs9quRt7r5D1MOiwrZDjNN3LqMco
|
||||
oWj37hb+3KkmIIsAYB2DjnWYkMPu2B0O4uSOHYAWfEJtRvA8qW7sWj+q1wARAQAB
|
||||
iQIlBBgBCAAPBQJam2geAhsgBQkB4TOAAAoJEHLjPk3xpQNulz0P/2m9veAAGGCO
|
||||
yPsqiNVVa8lrhmLGh/W+BaoszQ/r+pfin4xTOV5K5h3MC5CVJM0JxP/cxNol+Nmr
|
||||
cYGbZgq4QhLlH6PdQ7cReL5ED6Wi+eHb4ocvXJqUuZ2Gl8Z7gzQzp+WFbLrn8vXj
|
||||
LcyGGEETV84jy+X5cKu24eAjTFK+unfqwfxXFZP5vhIEVe7+uG6A/pMB5eLDqEUh
|
||||
FQUcWqCF2Imt08Kcn7HL31GoIY0ABHdD+ICXZE5lTm6iJGzpFBKdTMm/e5igCJ3v
|
||||
/tgtWZbnvyseLR/T/gU1JyheS9kNXP5sICwicBbY/vnGdEP6OKpDOSfQam5x4BBj
|
||||
cvsBnsNuonY7OJn4iLY6LviQ0MM91PbaJUUJrp9Uyi4hj9iO/MZBaG0Giu0FKjG6
|
||||
Vk+RlCYPjYIvHflQLZHe9BWLPN1AvaLKszt3IYaxS5seXCu0ZqHDGCBVqVCNDDJk
|
||||
MJbHlrOVZ9T6mZhA+aQ1EJvTWk3MNj1AOyCJdiXtOOdg+4Fm5dN7nLpumLIg2yP2
|
||||
afI7YfrPGA7sm+T0IMnOWxkK+KODC7OV9h/QjyBJDcUYGXLapuK9eP80cr8hKvH7
|
||||
S3G4Top/rXDpnBNQ2azCqmUwaJWNRhLucC80Vd00d4pWIOwAPWpiV70Fq8OhQFhT
|
||||
PNXwFXVLwtNfPvPxN1s+Vk+BBXp+M19AuQENBFqbaC8BCADSq89Z9xWRS2fvWI/N
|
||||
+yEWliIU8XiqC9Ch+/6mS2LEyzB1pPLIIQcRvM6rq2dxXIRveVGpb63ck9vUtuJG
|
||||
//es+DnDkO7re+ZmWHX+LAqMYNdaobSYxHkkR4CcY2HbPSEUbb//Zwk4BDyp3g3W
|
||||
bKK9noVbslZuSwWNrxjX/Hieh/dIGYkNFeWOlmNfUYsevzqNDjsziOagyXKxLc++
|
||||
hUM3GKgzXRQJBvBpgzQc4bRY+YGHXtZurk9AiZ4ZBhWv2Qrb5OYYislE9sdv3KWV
|
||||
Iv1EBpbkAJ9MM1HgS8bkIOIpNs4XxHY6fTWWdrXi+NgZXQwQRYaWTQsXL3mktarS
|
||||
fFfTABEBAAGJAiUEGAEIAA8FAlqbaC8CGwwFCQHhM4AACgkQcuM+TfGlA24vqg/8
|
||||
CsVBHO4mh8SSaxdWNEU+mG4pU230BRCwrfn42wuSKb7WNLTTcWMDNI0KY/JNcWSq
|
||||
G2qa6lngiAyOS72Jd635ptZ6Wvjd0WtBt90NN2jtn+aRqwQ8uItlYMQscofFzskj
|
||||
QnBF+NWER+44nxboghuQ041m6aI2XmYUErSOBZi6onbC3svH6coMldkkiueWFbdB
|
||||
qac3UXue4LUcaGR5T9pCQsLgTl3D8V5icEM+HpTVVGQZkVrOuKMKe6T9N5FS/WFu
|
||||
T6CuFf/OyU15a6RE4WW9QqKYsaHl8B6+0P7uqPoVIxs8hfJcwaUu9XwIiZYBZg7N
|
||||
yYCQ7JBapC5KZlIcTCfFSxby8lCJoZbIM3Pk/pgCuKxGaS9rHHUGuIvz8TfnM9FO
|
||||
2zlxl4E6k3NFgGKF3p6oiKayC74o6EOw50p6DMjrisG7kkWVsTRCMINF7CcfeVme
|
||||
MI9ljGAMB1hPtHPhl27hMRfq+/iIbR9gb7+Xw2yKQL2QRjMDIGGxP3q4NjD4tV8P
|
||||
VyTbAAwNARG8oMpNM628v3tsW+WYNot1SPUQuZbIx1pCwMeTqljWsdQxWRVW5UxM
|
||||
dnXTCqZhQwH0ICG/jbepbP6ciVB/CSa7TVohEK6jiTMorhqynRMMJ6p48Z6HIlyY
|
||||
0Ss8q9K29eXaqmLB8Oy3HmupqxH95TqMntqivzf6i6e5AQ0EWptoPQEIAL1OdDIl
|
||||
7E3VKAEWH5GnMzdnl9bju/ovoMjLqGevHS9Gyz4OPyZvCQ2pS8k0sgqwsn4F8vWM
|
||||
7L3xKTyQTSYOPby3do58+kxUrdTNlqGKEEcZDG+MSzxKyft7m+37fzbg6tcU+O3L
|
||||
/7m8nYWQSRKJeist7Q8HrQJzehuzcgAnNmpeBqXHnAwRBvtqORvz5cQAIQ4EsEvP
|
||||
f/unTjw95JtL1LtBOaOaynF9ap/TZ34OvDdARmZSdqPpRtEvjfgIboIYYt1dNmDH
|
||||
kiSaaKaqBLCJTD2z5KT8ccDeF8lzYHAYzNy8v2lTc9vagCZH+lf3d2d6umNcr4y1
|
||||
NGEN4ZEhrmt/lP0AEQEAAYkDRAQYAQgADwUCWptoPQIbAgUJAeEzgAEpCRBy4z5N
|
||||
8aUDbsBdIAQZAQgABgUCWptoPQAKCRD619WmVzqkJDTvB/49MQNQk8YJTwAnbdSH
|
||||
7stU2uQFBbkljE8Juz5WJK53lL6xUVUp/3gKrQio1F+z9uRVqRh0cQnqX6mPMe5a
|
||||
2dlHEIDHTJjSlR5GCCBRDbssV6SN72xSVgbxVGZ9L32qUYtiwGnxwXQC9D9KsonD
|
||||
YfGfUhD1CLAldr6HwhJkOq4QKz5GF4ty8sMKEcpM5UaR2sa2y6Ebn9Vzma10YaVp
|
||||
X7RlZM/iTiDhTmWuxLh7e21DI95k/eFqHpKA912N0n1BdzZPbwk83nVRxXg793NU
|
||||
RFpegzlLawtaCW9oVJOYqErMGOYN5nbXAHXsr5X7Or70v1Yo1khgzNOfnirO57T9
|
||||
VjT0J1QP/j1xobgMuNda7Fpwc0xo2oIKEf+SWY9GQrkUK7wCLDbpgbGVxTmREkyE
|
||||
6oyDzW1QpQXRjLS9Wvtun0J3Wn6r6Aar0VaGKa7uiiq8UORWtFkWQAzzyBj87Rjx
|
||||
eWZWV1dzLK7eMJdyN0gsOzcejrsOqf1sydzvhm4K66byjDZ78piv0DdyPIb4OsiQ
|
||||
QU2GH+QwGRYIkYlU9f9g+hSasAfzvrATHlJZNrOjOCgXbut/yP9ug3DHKj76wmoU
|
||||
n/Y4rpmskAzIQrZIpimkpNBmZVTGr4bkWcokVzrRFor3NCpl1qA1K9Cd43wARH8t
|
||||
Zwk4evI4abbqUId0vVNCKSSDyCCjgNwRsmU8RXdn7r0vs4ObKuWfY9Yl2y8tq3qO
|
||||
YPHr0r50YXWtKqUNy5JUc3Ow9DFR1p4O4yfmiSyTLUyOYbglfvtnO32OJkrrZ8Kq
|
||||
iSDnyiq9u2nYJAEHk7AchF5TJrTCnd8yWNIjohild+wc+rMKktspoEcxmT6zaK4T
|
||||
AymmEe3jzQhlxptpBm68t2E5kaYFQh+NnizXlRcmFjBgEhH2CxCoerUpK82Y+MuE
|
||||
S/ODmF9kULre14aXAAspj4vldQ/LJm0SYEfTkE1sDipJQUzv6oS5AFloQDXZPWTB
|
||||
15P+Xsj8sJV9hWCfJZVmppWuPg/pCYXwdjUHUYouTz3ZL6qnUbm2uQINBFpZUO0B
|
||||
EADZkfsbXPpDcMALUgVmB3cJU90tFqc8Q5guK9oSs3ibx4SmyhBVmeF2TF6PQNoK
|
||||
YvpUR50hAcx2AbwtY51u0XxrAr8kFiL6R5ce/0pVMfXGchcC58CJ3uf+O+OPt080
|
||||
ftyVSTsY9xEnBohMoJescn0L/IPaM6KkkIFIMAI4Ct3QCHox7WHgPLrqB7Efx2Dj
|
||||
Qkgjbioqj8zVKDwxTs0S3sknch675gOhsCkaI7KMcRGACDinKRF3wQha4brNa8En
|
||||
vPTV01Cv0ttCo6NCcbQzQi8QQYpMQcWVkquEJWweZQvL/OvYWdT13JgnIp66pkmo
|
||||
+JvGTOqQpSnIx6OQnV9yqwqsg4E0dkCE+9rDYAHpwvmRkaI2sItjN4KAEQdTWES8
|
||||
RgGVgfCtvNH87PqpaPIgarMDY/j5KqTDp/7Nc5oGLCmhwZiYzQa7Bm5uVNnYIyOO
|
||||
d1IjfclgVdVAzMOmrFytvXFCBcga1khL15taC7s6Vuld89TgMZdSot2RVz8W8Xc+
|
||||
39ZrBvzrCeYsPq5/U0Z85cnOSw4skwh06wsxTvL1D70SilI2c0YdR1sVgbhq04HN
|
||||
7FyE7SDQ1GqxyTJAU+OPH3Pk97Bl25vWD43RCCIjSUHhKzQRPno2ItObFepE+QJH
|
||||
lSO1YMXmZDAfsRts3dca3VSDOdAQely6G7HQu5kXWXGtRQARAQABiQIfBBgBCAAJ
|
||||
BQJaWVDtAhsMAAoJEHLjPk3xpQNuRlsQAKnkyjjXRvfMB3eKZ1PrWf7DBx5WL8Hk
|
||||
r2QAnv0diDeRTzotQyzKivf3/W3gQc9kQi/ilaPuuXeW+yKiNQaIbai7EC0DdyHa
|
||||
qG9s8F7UDoojFOGCc3OVpQvuucVbv4a6EpqXBI6ayrQW0jMXakiuIF/dL5uCGRLn
|
||||
sPMB87xJfMrkqEziymJMooQTRckgj2S9J2QYDZFxmKChPl1kXMlmx6Y4FhzrsYwo
|
||||
I47hW9hHG1+8129FOgwYTwELEuX6FKShcKpyy77b4Tar3UvdzNkfaysFPvX3O7Oh
|
||||
Bes2VgfslEZSgK2CScigvLIz9Ehe9CUw6WEC6LZW3bbC+Cbz624gOsm/GYI9Llsc
|
||||
5/NMMwQTCoTRm+b0UAYQvzHDS7km9yceNSfFlkpE/lWnIt9E0H+8bOwEbYF8y2qy
|
||||
yLXIm7MYEm4UnUZ0j8kihP+wSHsZP2xPhjEUcQw1ZWhWLACvzlEC0a3zsovoJ6U8
|
||||
zrqqfEBtuQgfwwJRWArMLQn/rlEJSrTrFfehwhvH3dPVSU36N5nBA8ls5YlSHBQv
|
||||
38dChpBXu3wzFhetkSuHxdlfopeZMtDmljnIkBNTEFg01oqJNPbyiX2JQcfKizCt
|
||||
maCIiJY+hOYIKkJdkt1FyOhADBciebxCQrpIIzWupeyQNuVj3I4g6YaQX00+kgiB
|
||||
H1XKrAg/dpMNmQENBFrHiEoBCADASg9zHYzaSU0/1q1hcmTHU6H4syCQXHHd3zF7
|
||||
n/RcsGnt4RBuUi/BUvNp3zYR6uFOyyk6LPV1hq2Ca8e/oSFrDYxqLladESQd9GNN
|
||||
stDeK3HinsWJCVwSbkzNJbUtyr6SclmWt66vNqBZngMallJRQe8QDqpg0ZSZj/0d
|
||||
VGxPBR16zc/2ddGnXJFe/V5XAWAap9SEo44pyGK4xf87Bgq8jT33LuQtd8exOk3E
|
||||
atkK5jLEn9xmiheoSePEhOoQSrJMHfMjFka0PYZlCeaaHw7r7yXb/VoHFOAPxb6k
|
||||
a1cunbp39b4z7Jy9kLBy0tbDnAs/sLp4LUN3Vx1JLoXBSIsHABEBAAG0JFJhdWwg
|
||||
Sm9yZGFuIDxyYXVsQHByeXNtYXRpY2xhYnMuY29tPokBNwQTAQoAIQUCWseISgIb
|
||||
AwULCQgHAwUVCgkICwUWAgMBAAIeAQIXgAAKCRCVRSpwGBD+26lmB/wJxChWwva0
|
||||
StjMRSk3V7JTBPi5RNQm3VY8UydUNjsXzHVvtjUczOj+zHfo8tYUYa/ypWujXEX9
|
||||
bVYVMr9JGNjm+rysI1gmW1gcx9pZr9gde4CR4Owfpwjh8oFnSrBBrcaelb0Krrv4
|
||||
Okms43fEw1bWpllayal5SqknOIxpw1ZiNpG3jVe/C9n1/3Nw4XF5R0RHDxXTu6Hu
|
||||
H3t7zRQwAJcOod6ccRdzNR9CsGdnOoNPG3pqs9w6zWUp0QETMAMcEpSLCJumaVfQ
|
||||
24PK+MF92F5JRwNVt80xSLA9KMyNnv/ZvxZXXLjkIhsAmwYkHUs3WSm5HJ4qqOCX
|
||||
V8VwFbwnea3eiQIzBBABCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl9zs2AA
|
||||
CgkQcuM+TfGlA24dMA/+N31SPAXVgwiNasBknb+YSq2/OQPogxQUc8tupvDcQ62H
|
||||
u0kvA7pUPUFjITJ1xsm8CRXfb7Hge9rNUw9Y2MNKf4sIDQs3bFeKOiAGVbO8Z055
|
||||
5VCTWWPhXzjWoR84YWrx0Go8WT/V3Lahy4frGA3Vsza6wzi2P6c7LF4jqX2iBD1l
|
||||
OpAYNGyt0sX/RLp3s2jOTWJwVyRR4UKSZOgpi9OTGLXrq5oU2dpwEIzBmhaWIs+u
|
||||
oD5/4TaAt4lEFu3Sxk5w8fJlUXbM4IG8A1l+dnRPF5rtjtfvuX0GeQcDtJ5e1qHj
|
||||
7PvVj8HjGPwxqsAjYHpg6QjyQCdtHYHdboEIU+OXMYRPRh2Iv0GUoeuMqoSsvFgn
|
||||
c9PGN8Ai5u+oNzKeLJhpxpLV7s9zAbsripdnvDBn7RwuNx2ziqZayxoYvFRCAQl5
|
||||
wzr0/eo/wq36D9EI7uJ2I3yt1g/VkwWQsAeE2skuGGbwed263cWTkl6g1w+vlDY3
|
||||
/jc3a8HcS0zIUX194ChrbbNezGb74mQFoLk6PksLfhXseAKCs8bvPaTwIm0JGTMT
|
||||
YzkRufrv1+I9KPFy3fTpvnMZTbud4nPCLsK179whk+Jrdv866i+E2WZ1JyzIZ9bB
|
||||
P1x+ABi82PMdzhTw+pTkR1CVHmrmOiSi2MHRYGMedM9ECGnPIdab5d75r1qkuvO5
|
||||
AQ0EWseISgEIAKHrgTVeZ0CWZMETk7NFjDTvBpoTMQDT7DDe2vFQHc8D2AvR3Aod
|
||||
+CUaOeY0Yp0GOemd7BEcUbpYbCQk1B9G8vHNcQqTMLjBgujXwF6w2c+lqEu2/tyI
|
||||
2/uGCOBDs4wuQZo+akCCMekqYjdZBfZV6nQzf/l7eQHIq+sNtnSiZmHdH46PLi7/
|
||||
17wR9GxzKvuv1R73DCyekCcz1Puo0b7lfGD28kESJK1Mg9SAOqVjtaS58Oxo+Y1M
|
||||
ZWRqh0tkAkgOBpdyddGy6TX/9c3a0U3eBQweRpNDh0eCEh5UsYDluL4NtXj7rVYd
|
||||
4mHONJzI3h1LnKWvpVYmk703MPmtgeJwNzEAEQEAAYkBHwQYAQoACQUCWseISgIb
|
||||
DAAKCRCVRSpwGBD+25PxCACNBj/2HBSpdYAxEGhNHSaw62y7Jwuf7NbsKIAqygzi
|
||||
m2+dxae7PbAm64jEXYJA5GaCOs4xO52S/2+1N87e5J86VRNg0vlA9/ivFzERAxEg
|
||||
rgIUyGXYfS8oA/4r5+PfKt/NvXO2wH3MPakrqZqXhOv+1IPvOt03wWoZKmYyVT6g
|
||||
YnzutOsvH6cbhUh/D0WpuU8ZgkrFu5Xe7ynZoLm1qQOA23pkxxQbeNs0uoKUja9v
|
||||
bx4eBtaliLc6rsXt+1WzdXA5ZRNqkdn87Tz5IU0FuQYVblYvPz9QoUlvx5siWVaP
|
||||
Mmc7dIctWaWyaJmgjkLKdy36ydS5axhqn158yIjOZV3emQINBF8h6lMBEACovC4z
|
||||
oieJ9iMZpWfylsLQKkeEmfSXnjcjW4RLUjs2CRWj+W6H7eCRy/MBOdx+6zAb8TE/
|
||||
n/TSlP5l5Xx40BGDAMUZVFrJVEkMPK5kUmNzybg7PiuMd3qZE+pNyHEXXlLU77Dn
|
||||
VO26TD9RvpKXdjm+ATsnQ6rvDNszkYI2Bj1tPxwZ7bRi96U/upL3WfYOsTLHirM3
|
||||
pEkI3zfMZj8ufDX9XlmGrQ384E/hTgjpLXmDm/jMRlX3mRzDkV1HO+gEicfePm5+
|
||||
K4eWlMCNXN5bcGwoEFY2LwAojRcbRaH/UH2S3btkG2eSK2fOFEwQ0G4vIyoLF60R
|
||||
cEk1s6cYIgk3kVsmNSzA5iJ81nD5bbfTceUKsjTZiw5RmN0Vh5g75pw+3uoXB+55
|
||||
egabEIt/GTZZlP6zhd/CKjTQR0R4+ZaEbZFOCtABukC5xfWGCRdNmXAWMBe0MYpW
|
||||
ub2TRLISLfNNc7GWM4Y17d9aRhaql9gY6QLIRNGYuvGPiRMaJADZx46LtbWo8YiG
|
||||
xLId7H8D+/0MSzMOg7RhELqYScYDigiVEXkTHA3QSprf/ohjm8woRhQY5CjCEZp5
|
||||
8MvhD40VAo4Gxgx1V8lwB3CD3kDgFEaUZh2H+N/fmRegACN2lzEm1krOiS7sAB48
|
||||
/Av99/1VXalLoBbzFTnuAYwnmLk8vdparT5LlwARAQABtChUZXJlbmNlIFRzYW8g
|
||||
PHRlcmVuY2VAcHJ5c21hdGljbGFicy5jb20+iQJOBBMBCAA4FiEEMX1ukQWPjzwj
|
||||
A7p3VjE+RFgSl6YFAl8h6lMCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQ
|
||||
VjE+RFgSl6YJgBAAqIAMzMFf/+qE896kuXYSlmaYtzhrMXgQVD09UgSq4JfVYudE
|
||||
e8EfamWz8RFV7znxHO/5Fp4yoWeDGc2b40DhivvRb1p4XSJi+F64moeHt+qn4Hay
|
||||
CDs5wGv40KJk0+2r7iILc3Gw4NP6r4c2HU2EqSW8fn/bYM3yxLsYEXt5zhgipKpR
|
||||
CB4WmqojW+CxXkj8dv4CyY+HlKJ6nZWstK42h736njM44rjMDSDt3lv0ZYykopTw
|
||||
09THnHkWOXY4PSrKOzE+OqsPMPOD3Gq3bYPqQ0ZgdS4FeuDesqqHjJabRX+7DgJK
|
||||
lLEnylfDlvDKf6I/tqrAfqCCdQvEjpdQXG84GSdK//wT1biunHvGeyHM/PZx9V5O
|
||||
X8YoecswcYjozU1F7x8buk8GxV7ZuJ2pbdXr7jTou1dA7e4Nas9Qu1RcK39MoTYc
|
||||
Ub467ONFCX+Y3MzELTnEyx4hSwTxlWg7VjCQ63zrFOd8VG0WUydlqiHgIE8WCJVE
|
||||
pQlrr2v/A6ieM1HY1Ftjt+mk/qncqibCdyIofyV4o5FrVo8arx74jR+Lhvkp1T9N
|
||||
uRj8V6M3g9vxJdAsy1O5/ZrdwKJ6Yy1n62+9b/uxhklYONK7DpDlWU0ZLWHlKEIV
|
||||
zzfILCiLf+nxMQdVwuthlcysg8JwYEE8xDXc851ciuj0ygv3Wm0BY44V6/m5Ag0E
|
||||
XyHqUwEQALuKzOAlo75p2vSYD7TecE/E0hBOS+cjs8kRH+oIzm5fx7sf00SC1jU2
|
||||
q5QLYLixNeT+l0bD70R7b8r2uFu1aZL7pQqbGIGisLHlxu8611+PCpE5AsQi3Wui
|
||||
IZ6Y8K7grJ28vviBiZUBY3iCCRH0LuvyZN3R0zgyMGbzouQk5wuGJUkRHJKtV5by
|
||||
FVEl3CYudRtAp5LPFw6j7UzT5yQqBmY6tXp5WMmmAvOtnu8ohpRhzY21dJMlSykX
|
||||
Ne9rcARy8mVWNdcXJUIc85z0BmyrdiA4YY0XiZTHD9mslj+af4QHsaS+p3aorTLD
|
||||
5BKkp5Ek79a1BUxBjrao4W2fljYf129/SHbwds/Dup26zB2vi/fhbfVPvevXLPpi
|
||||
Vm4uz8fE4D5lPYZAdu5GtO2V9kWbhDtU1R1SJSdFDI9Sev3B+NLrstclGfdbFQKF
|
||||
shbUxydjSX56OJvh5hee50PcCz+Ab+usoyUPkcOfET/L55AdqJo3cVYtnAKpOJG/
|
||||
mKP5Ih3LZVm9wCdWTCzboEtDfLlcjCc5kLiE9Pq7sKMnXm/NcXNFWBkRuaHg+Mt5
|
||||
Yk659/Q6oUG3yUrS2d7cSeuNRWNlaRlnk3hZtB13xpmoHGYmrMOe7wiEE5xEnOju
|
||||
1ctRRRX6lNUPlSvID83Y9JSYL9hYMbidRmFY28AIddT/PjVTgfi1ABEBAAGJAjYE
|
||||
GAEIACAWIQQxfW6RBY+PPCMDundWMT5EWBKXpgUCXyHqUwIbDAAKCRBWMT5EWBKX
|
||||
poMOD/4/sRLnK94pY2qtSSfNZEumOdQVvges08u34saG4mT1iLD9TcRgsFvgwcTV
|
||||
Pec9Git4iJmoJpFylvOzoBWmtDzJ7TB3JxvtwUfFH2uZ22k12eChyAFHI8TxASqI
|
||||
TV2YwkIgB2PTmva1GrdNKM6y5bfS1BdS0u+Etgp4zN9nyQECY2sM2accUi2S7JBV
|
||||
7o6lEJWCRgFUwIfY6fdfxhZo/w0g2u5wftUQYlbXWSQLImFFZBc/13k8EMxsfbMi
|
||||
LQ/wxKEfe1ZwFoXqIS6cq8CHTdj70QO7K9ah6krGPmL23LVuxlkQIR7mT5yFJtv2
|
||||
VRT4IYgOUl6rAd08B6MOy6MOMa14FpNrAybPK8Yd/eSdUNciVf17q33Yc15x+trs
|
||||
En1hHbDS82xaLJ/Ld4mANeJVt3FodgVmzZGpKheFEOFXXeuX1ZZ4c9TgX+0wBKoh
|
||||
aBb8EGBVo4aV4GbZ/gEa8wls5NNTwd56H4G94hiq+qFM39x4YjhAvkM0hLRYzR05
|
||||
tSCdlEbkh2K6RbOhBPsNHCbypWRt2fN8/q4uLPJJt7NRQ2zi6H/x04HGblhXdX6H
|
||||
mmYjJv1LTbem9VptcpvPauNsibeIvIWA2nYM2ncDWt6gJpOH9Zh4fHuaG4aCdNmJ
|
||||
hPNeJNnmLcpDQvR9wU5w7e5tC/ZSeTZ5ul1zOKa1qZ4lJ50BDQ==
|
||||
=a30p
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
@@ -3,24 +3,23 @@ Hash: SHA512
|
||||
|
||||
Contact: mailto:security@prysmaticlabs.com
|
||||
Encryption: openpgp4fpr:0AE0051D647BA3C1A917AF4072E33E4DF1A5036E
|
||||
Encryption: openpgp4fpr:341396BAFACC28C5082327F889725027FC8EC0D4
|
||||
Encryption: openpgp4fpr:FEE44615A19049DF0CA0C2735E2B7E5734DFADCB
|
||||
Encryption: openpgp4fpr:CD08DE68C60B82D3EE2A3F7D95452A701810FEDB
|
||||
Encryption: openpgp4fpr:317D6E91058F8F3C2303BA7756313E44581297A6
|
||||
Preferred-Languages: en
|
||||
Canonical: https://github.com/prysmaticlabs/prysm/tree/master/.well-known/security.txt
|
||||
|
||||
-----BEGIN PGP SIGNATURE-----
|
||||
|
||||
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl6HrcwACgkQcuM+TfGl
|
||||
A26voQ/8DFB5wUHP0uyY8k7FGbxhLzSeImxomnUHJaUGfdczbCdYPMEHc9zI1iZP
|
||||
6LRiy9wS6qhqj/GSKVwvDPr+ZymXuV3L22GOP2lRhl7Z9Mm21ZJNOcoQBFOZnyHu
|
||||
DAy9HeTmeuJxYkf8weqZYXyzEoKJBDmfuWmEFjrtMcFXUfT3aJn1E2A/AQdcVQIC
|
||||
9L+iGWwFwjsPhcfaMuwcB7QMheDO6KSB7XPPCbrZ036Np8UTZ4qbZ5y73tlfkcOc
|
||||
tYTrMSPtS4eNutiDOP5Np36cLzRtNpm/BziAK+7ZKiYY0HI5h9IkCTLO4x2UmAMX
|
||||
sPoeaAB5z2QLIwmU9J2NhJrwiNMGTpJ+0bowy8U4cgzAX20CXVjRqGhy+cir8Ewg
|
||||
DjEGjWINUw6W0yzJp0mKSKzuOhdTTmzIYBeMBsyce+pgN1KGFCxeIwxGxyJzADdw
|
||||
mYQdljRXn4yEYP/KEpu/F2o8L4ptRO2jZWKvTvdzSSGGSyKyF4HsIRJ7m98DaB6S
|
||||
0oGq1KpbKKTbQi5g8UShGV2gPeMCs5ZIIqK2b/cRzUet18aUuofLmR4lkKZa9yEG
|
||||
rbzuJq/gB2vgQwExUEgVQ3/DfVc+y80e3YZ5s+rzV0vbLxl4Gh4yExpLo7hRf9iY
|
||||
EFvMzH+BEEb5VfCwByZyV1BmesZVIosr7K6UmVtPe0bZGvv3uIg=
|
||||
=5qpD
|
||||
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl++klgACgkQcuM+TfGl
|
||||
A27rQw/6A29p1W20J0v+h218p8XWLSUpTIGLnZTxw6KqdyVXMzlsQK0YG4G2s2AB
|
||||
0LKh7Ae/Di5E0U+Z4AjUW5nc5eaCxK36GMscH9Ah0rgJwNYxEJw7/2o8ZqVT/Ip2
|
||||
+56rFihRqxFZfaCNKFVuZFaL9jKewV9FKYP38ID6/SnTcrOHiu2AoAlyZGmB03p+
|
||||
iT57SPRHatygeY4xb/gwcfREFWEv+VHGyBTv8A+6ABZDxyurboCFMERHzFICrbmk
|
||||
8UdHxxlWZDnHAbAUyAwpERC5znx6IHXQJwF8TMtu6XY6a6axT2XBOyJDF9/mZOz+
|
||||
kdkz6loX5uxaQBGLtTv6Kqf1yUGANOZ16VhHvWwL209LmHmigIVQ+qSM6c79PsW/
|
||||
vrsqdz3GBsiMC5Fq2vYgnbgzpfE8Atjn0y7E+j4R7IvwOAE/Ro/b++nqnc4YqhME
|
||||
P/yTcfGftaCrdSNnQCXeoV9JxpFM5Xy8KV3eexvNKbcgA/9DtgxL5i+s5ZJkUT9A
|
||||
+qJvoRrRyIym32ghkHgtFJKB3PLCdobeoOVRk6EnMo9zKSiSK2rZEJW8Ccbo515D
|
||||
W9qUOn3GF7lNVuUFAU/YKEdmDp/AVaViZ7vH+8aq0LC0HBkZ8XlzWnWoArS8sMhw
|
||||
fX0R9g/HMgrwNte/d0mwim5lJ2Plgv60Bh4grJqwZJeWbU0zi1U=
|
||||
=uW+X
|
||||
-----END PGP SIGNATURE-----
|
||||
|
||||
@@ -4,7 +4,7 @@ Note: The latest and most up to date documenation can be found on our [docs port
|
||||
|
||||
Excited by our work and want to get involved in building out our sharding releases? Or maybe you haven't learned as much about the Ethereum protocol but are a savvy developer?
|
||||
|
||||
You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PR’s after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/che9auJ) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding) drop us a line there if you want to get more involved or have any questions on our implementation!
|
||||
You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PR’s after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/CTYGPUJ) drop us a line there if you want to get more involved or have any questions on our implementation!
|
||||
|
||||
## Contribution Steps
|
||||
|
||||
@@ -62,12 +62,6 @@ Changes that only affect a single file can be tested with
|
||||
$ go test <file_you_are_working_on>
|
||||
```
|
||||
|
||||
Changes that affect multiple files can be tested with ...
|
||||
|
||||
```
|
||||
$ golangci-lint run && bazel test //...
|
||||
```
|
||||
|
||||
**10. Stage the file or files that you want to commit.**
|
||||
|
||||
```
|
||||
@@ -181,7 +175,7 @@ We consider two types of contributions to our repo and categorize them as follow
|
||||
|
||||
### Part-Time Contributors
|
||||
|
||||
Anyone can become a part-time contributor and help out on implementing sharding. The responsibilities of a part-time contributor include:
|
||||
Anyone can become a part-time contributor and help out on implementing eth2. The responsibilities of a part-time contributor include:
|
||||
|
||||
- Engaging in Gitter conversations, asking the questions on how to begin contributing to the project
|
||||
- Opening up github issues to express interest in code to implement
|
||||
@@ -192,8 +186,6 @@ Anyone can become a part-time contributor and help out on implementing sharding.
|
||||
- Follow up on open PRs
|
||||
- Have an estimated timeframe to completion and let the core contributors know if a PR will take longer than expected
|
||||
|
||||
We do not expect all part-time contributors to be experts on all the latest sharding documentation, but all contributors should at least be familiarized with our sharding [README.md](https://github.com/prysmaticlabs/prysm/blob/master/validator/README.md) and have gone through the required Ethereum readings as posted on our [READINGS.md](https://github.com/prysmaticlabs/prysm/blob/master/docs/READINGS.md) document.
|
||||
|
||||
### Core Contributors
|
||||
|
||||
Core contributors are remote contractors of Prysmatic Labs, LLC. and are considered critical team members of our organization. Core devs have all of the responsibilities of part-time contributors plus the majority of the following:
|
||||
|
||||
22
README.md
22
README.md
@@ -1,20 +1,28 @@
|
||||
# Prysm: An Ethereum 2.0 Client Written in Go
|
||||
# Prysm: An Ethereum 2.0 Implementation Written in Go
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/eth2.0-specs/tree/v0.12.3)
|
||||
[](https://discord.gg/KSA7rPr)
|
||||
[](https://github.com/ethereum/eth2.0-specs/tree/v1.0.0)
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the Ethereum 2.0 client specifications developed by [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the Ethereum 2.0 specification, developed by [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
|
||||
### Getting Started
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/KSA7rPr).
|
||||
|
||||
### Come join the testnet!
|
||||
Participation is now open to the public for our Ethereum 2.0 phase 0 testnet release. Visit [prylabs.net](https://prylabs.net) for more information on the project or to sign up as a validator on the network. You can visualize the nodes in the network on [eth2stats.io](https://eth2stats.io), explore validator rewards/penalties via Bitfly's block explorer: [beaconcha.in](https://beaconcha.in), and follow the latest blocks added to the chain on [beaconscan](https://beaconscan.com).
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/CTYGPUJ).
|
||||
|
||||
### Staking on Mainnet
|
||||
|
||||
To participate in staking, you can join the [official eth2 launchpad](https://launchpad.ethereum.org). The launchpad is the only recommended way to become a validator on mainnet. You can visualize the nodes in the network on [eth2stats.io](https://eth2stats.io), explore validator rewards/penalties via Bitfly's block explorer: [beaconcha.in](https://beaconcha.in), and follow the latest blocks added to the chain on [beaconscan](https://beaconscan.com).
|
||||
|
||||
## Contributing
|
||||
|
||||
Want to get involved? Check out our [Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/) to learn more!
|
||||
|
||||
## License
|
||||
|
||||
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||
|
||||
## Legal Disclaimer
|
||||
|
||||
[Terms of Use](/TERMS_OF_SERVICE.md)
|
||||
|
||||
43
TESTNET.md
43
TESTNET.md
@@ -1,43 +0,0 @@
|
||||
# Testnet
|
||||
|
||||
The Prysmatic Labs test network is available for anyone to join. The easiest way to participate is by joining through the website, https://prylabs.net.
|
||||
|
||||
## Interop
|
||||
|
||||
For developers looking to connect a client other than Prysm to the test network, here is the relevant information for compatability.
|
||||
|
||||
|
||||
**Spec version** - [v0.8.3](https://github.com/ethereum/eth2.0-specs/tree/v0.8.3)
|
||||
|
||||
**ETH 1 Deposit Contract Address** - See https://prylabs.net/contract. This contract is deployed on the [goerli](https://goerli.net/) network.
|
||||
|
||||
**Genesis time** - The ETH1 block time in which the 64th deposit to start ETH2 was included. This is NOT midnight of the next day as required by spec.
|
||||
|
||||
### ETH 2 Configuration
|
||||
|
||||
Use the [minimal config](https://github.com/ethereum/eth2.0-specs/blob/v0.8.3/configs/minimal.yaml) with the following changes.
|
||||
|
||||
| field | value |
|
||||
|-------|-------|
|
||||
| MIN_DEPOSIT_AMOUNT | 100 |
|
||||
| MAX_EFFECTIVE_BALANCE | 3.2 * 1e9 |
|
||||
| EJECTION_BALANCE | 1.6 * 1e9 |
|
||||
| EFFECTIVE_BALANCE_INCREMENT | 0.1 * 1e9 |
|
||||
| ETH1_FOLLOW_DISTANCE | 16 |
|
||||
| GENESIS_FORK_VERSION | See [latest code](https://github.com/prysmaticlabs/prysm/blob/master/shared/params/config.go#L236) |
|
||||
|
||||
These parameters reduce the minimal config to 1/10 of the required ETH.
|
||||
|
||||
We have a genesis.ssz file available for download [here](https://prysmaticlabs.com/uploads/genesis.ssz)
|
||||
|
||||
### Connecting to the network
|
||||
|
||||
We have a libp2p bootstrap node available at `/dns4/prylabs.net/tcp/30001/p2p/16Uiu2HAm7Qwe19vz9WzD2Mxn7fXd1vgHHp4iccuyq7TxwRXoAGfc`.
|
||||
|
||||
Some of the Prysmatic Labs hosted nodes are behind a libp2p relay, so your libp2p implementation protocol should understand this functionality.
|
||||
|
||||
### Other
|
||||
|
||||
Undoubtably, you will have bugs. Reach out to us on [Discord](https://discord.gg/KSA7rPr) and be sure to capture issues on Github at https://github.com/prysmaticlabs/prysm/issues.
|
||||
|
||||
If you have instructions for you client, we would love to attempt this on your behalf. Kindly send over the instructions via github issue, PR, email to team@prysmaticlabs.com, or discord.
|
||||
@@ -139,9 +139,9 @@ load(
|
||||
|
||||
container_pull(
|
||||
name = "alpine_cc_linux_amd64",
|
||||
digest = "sha256:3f7f4dfcb6dceac3a902f36609cc232262e49f5656a6dc4bb3da89e35fecc8a5",
|
||||
digest = "sha256:752aa0c9a88461ffc50c5267bb7497ef03a303e38b2c8f7f2ded9bebe5f1f00e",
|
||||
registry = "index.docker.io",
|
||||
repository = "fasibio/alpine-libgcc",
|
||||
repository = "pinglamb/alpine-glibc",
|
||||
)
|
||||
|
||||
container_pull(
|
||||
@@ -352,9 +352,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "6bb16ff0dc9348090cc31a9ea453643d32b617e66ac6e7bb38985d530070631b",
|
||||
sha256 = "117f5366af9cf009354ed1abe02f906168158473461d69c8056984b9b0292619",
|
||||
urls = [
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/0.0.2-alpha/prysm-web-ui.tar.gz",
|
||||
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.2/prysm-web-ui.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_test")
|
||||
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle")
|
||||
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle", "container_image")
|
||||
load("//tools:go_image.bzl", "go_image_alpine", "go_image_debug")
|
||||
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
|
||||
|
||||
@@ -45,13 +45,19 @@ go_image(
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
container_image(
|
||||
name = "image_with_creation_time",
|
||||
base = "image",
|
||||
stamp = True,
|
||||
)
|
||||
|
||||
container_bundle(
|
||||
name = "image_bundle",
|
||||
images = {
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest": ":image",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}": ":image",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest": ":image",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}": ":image",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest": ":image_with_creation_time",
|
||||
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest": ":image_with_creation_time",
|
||||
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
|
||||
},
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
# Prysmatic Labs Beacon Chain Implementation
|
||||
|
||||
This is the main project folder for the beacon chain implementation of Ethereum Serenity in Golang by [Prysmatic Labs](https://prysmaticlabs.com). Before you begin, check out our [Contribution Guidelines](https://github.com/prysmaticlabs/prysm/blob/master/CONTRIBUTING.md) and join our active chat room on Discord or Gitter below:
|
||||
This is the main project folder for the beacon chain implementation of eth2 written in Go by [Prysmatic Labs](https://prysmaticlabs.com).
|
||||
|
||||
[](https://discord.gg/KSA7rPr)
|
||||
[](https://gitter.im/prysmaticlabs/prysm?badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
You can also read our main [README](https://github.com/prysmaticlabs/prysm/blob/master/README.md) and join our active chat room on Discord.
|
||||
|
||||
Also, read the latest beacon chain [design spec](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at prysmatic labs.
|
||||
Check out the [FAQs](https://notes.ethereum.org/9MMuzWeFTTSg-3Tz_YeiBA?view). Refer this page on [why](http://email.mg2.substack.com/c/eJwlj9GOhCAMRb9G3jRQQPGBh5mM8xsbhKrsDGIAM9m_X9xN2qZtbpt7rCm4xvSjj5gLOTOmL-809CMbKXFaOKakIl4DZYr2AGyQIGjHOnWH22OiYnoIxmDijaBhhS6fcy7GvjobA9m0mSXOcnZq5GBqLkilXBZhBsus5ZK89VbKkRt-a-BZI6DzZ7iur1lQ953KJ9bemnxgahuQU9XJu6pFPdu8meT8vragzEjpMCwMGLlgLo6h5z1JumQTu4IJd4v15xqMf_8ZLP_Y1bSLdbnrD-LL71i2Kj7DLxaWWF4)
|
||||
we are combining sharding and casper together.
|
||||
[](https://discord.gg/CTYGPUJ)
|
||||
|
||||
Also, read the official beacon chain [specification](https://github.com/ethereum/eth2.0-specs/blob/master/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at Prysmatic Labs.
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
@@ -99,6 +100,14 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
|
||||
<-stateChannel
|
||||
stateSub.Unsubscribe()
|
||||
|
||||
if s.genesisTime.IsZero() {
|
||||
log.Warn("ProcessAttestations routine waiting for genesis time")
|
||||
for s.genesisTime.IsZero() {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
log.Warn("Genesis time received, now available to process attestations")
|
||||
}
|
||||
|
||||
st := slotutil.GetSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
|
||||
@@ -339,6 +339,9 @@ func (s *Service) Stop() error {
|
||||
// Status always returns nil unless there is an error condition that causes
|
||||
// this service to be unhealthy.
|
||||
func (s *Service) Status() error {
|
||||
if s.genesisRoot == params.BeaconConfig().ZeroHash {
|
||||
return errors.New("genesis state has not been created")
|
||||
}
|
||||
if runtime.NumGoroutine() > s.maxRoutines {
|
||||
return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine())
|
||||
}
|
||||
|
||||
@@ -244,7 +244,7 @@ func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
if untilDepositIndex > int64(len(dc.deposits)) {
|
||||
if untilDepositIndex >= int64(len(dc.deposits)) {
|
||||
untilDepositIndex = int64(len(dc.deposits) - 1)
|
||||
}
|
||||
|
||||
|
||||
@@ -700,6 +700,49 @@ func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof()},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 4))
|
||||
|
||||
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func makeDepositProof() [][]byte {
|
||||
proof := make([][]byte, int(params.BeaconConfig().DepositContractTreeDepth)+1)
|
||||
for i := range proof {
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//shared/cmd:go_default_library",
|
||||
"//shared/params:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
@@ -17,7 +18,7 @@ var (
|
||||
DepositContractFlag = &cli.StringFlag{
|
||||
Name: "deposit-contract",
|
||||
Usage: "Deposit contract address. Beacon chain node will listen logs coming from the deposit contract to determine when validator is eligible to participate.",
|
||||
Value: "0x07b39F4fDE4A38bACe212b546dAc87C58DfE3fDC", // Medalla deposit contract address.
|
||||
Value: params.BeaconNetworkConfig().DepositContractAddress,
|
||||
}
|
||||
// RPCHost defines the host on which the RPC server should listen.
|
||||
RPCHost = &cli.StringFlag{
|
||||
@@ -69,7 +70,7 @@ var (
|
||||
Name: "grpc-gateway-corsdomain",
|
||||
Usage: "Comma separated list of domains from which to accept cross origin requests " +
|
||||
"(browser enforced). This flag has no effect if not used with --grpc-gateway-port.",
|
||||
Value: "http://localhost:4242,http://127.0.0.1:4242,http://localhost:4200",
|
||||
Value: "http://localhost:4200,http://localhost:7500,http://127.0.0.1:4200,http://127.0.0.1:7500,http://0.0.0.0:4200,http://0.0.0.0:7500",
|
||||
}
|
||||
// MinSyncPeers specifies the required number of successful peer handshakes in order
|
||||
// to start syncing with external peers.
|
||||
@@ -130,6 +131,10 @@ var (
|
||||
Name: "enable-debug-rpc-endpoints",
|
||||
Usage: "Enables the debug rpc service, containing utility endpoints such as /eth/v1alpha1/beacon/state.",
|
||||
}
|
||||
SubscribeToAllSubnets = &cli.BoolFlag{
|
||||
Name: "subscribe-all-subnets",
|
||||
Usage: "Subscribe to all possible attestation subnets.",
|
||||
}
|
||||
// HistoricalSlasherNode is a set of beacon node flags required for performing historical detection with a slasher.
|
||||
HistoricalSlasherNode = &cli.BoolFlag{
|
||||
Name: "historical-slasher-node",
|
||||
@@ -162,4 +167,10 @@ var (
|
||||
Name: "db-backup-output-dir",
|
||||
Usage: "Output directory for db backups",
|
||||
}
|
||||
// Eth1HeaderReqLimit defines a flag to set the maximum number of headers that a deposit log query can fetch. If none is set, 1000 will be the limit.
|
||||
Eth1HeaderReqLimit = &cli.Uint64Flag{
|
||||
Name: "eth1-header-req-limit",
|
||||
Usage: "Sets the maximum number of headers that a deposit log query can fetch.",
|
||||
Value: uint64(1000),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -12,6 +12,7 @@ type GlobalFlags struct {
|
||||
HeadSync bool
|
||||
DisableSync bool
|
||||
DisableDiscv5 bool
|
||||
SubscribeToAllSubnets bool
|
||||
MinimumSyncPeers int
|
||||
BlockBatchLimit int
|
||||
BlockBatchLimitBurstFactor int
|
||||
@@ -44,6 +45,10 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
|
||||
log.Warn("Using Disable Sync flag, using this flag on a live network might lead to adverse consequences.")
|
||||
cfg.DisableSync = true
|
||||
}
|
||||
if ctx.Bool(SubscribeToAllSubnets.Name) {
|
||||
log.Warn("Subscribing to All Attestation Subnets")
|
||||
cfg.SubscribeToAllSubnets = true
|
||||
}
|
||||
cfg.DisableDiscv5 = ctx.Bool(DisableDiscv5.Name)
|
||||
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
|
||||
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
|
||||
|
||||
@@ -62,6 +62,7 @@ func (g *Gateway) Start() {
|
||||
ethpb.RegisterNodeHandler,
|
||||
ethpb.RegisterBeaconChainHandler,
|
||||
ethpb.RegisterBeaconNodeValidatorHandler,
|
||||
pbrpc.RegisterHealthHandler,
|
||||
}
|
||||
if g.enableDebugRPCEndpoints {
|
||||
handlers = append(handlers, pbrpc.RegisterDebugHandler)
|
||||
|
||||
@@ -50,12 +50,14 @@ var appFlags = []cli.Flag{
|
||||
flags.InteropGenesisTimeFlag,
|
||||
flags.SlotsPerArchivedPoint,
|
||||
flags.EnableDebugRPCEndpoints,
|
||||
flags.SubscribeToAllSubnets,
|
||||
flags.EnableBackupWebhookFlag,
|
||||
flags.BackupWebhookOutputDir,
|
||||
flags.HistoricalSlasherNode,
|
||||
flags.ChainID,
|
||||
flags.NetworkID,
|
||||
flags.WeakSubjectivityCheckpt,
|
||||
flags.Eth1HeaderReqLimit,
|
||||
cmd.MinimalConfigFlag,
|
||||
cmd.E2EConfigFlag,
|
||||
cmd.RPCMaxPageSizeFlag,
|
||||
|
||||
@@ -486,12 +486,13 @@ func (b *BeaconNode) registerPOWChainService() error {
|
||||
}
|
||||
|
||||
cfg := &powchain.Web3ServiceConfig{
|
||||
HTTPEndPoint: b.cliCtx.String(flags.HTTPWeb3ProviderFlag.Name),
|
||||
DepositContract: common.HexToAddress(depAddress),
|
||||
BeaconDB: b.db,
|
||||
DepositCache: b.depositCache,
|
||||
StateNotifier: b,
|
||||
StateGen: b.stateGen,
|
||||
HTTPEndPoint: b.cliCtx.String(flags.HTTPWeb3ProviderFlag.Name),
|
||||
DepositContract: common.HexToAddress(depAddress),
|
||||
BeaconDB: b.db,
|
||||
DepositCache: b.depositCache,
|
||||
StateNotifier: b,
|
||||
StateGen: b.stateGen,
|
||||
Eth1HeaderReqLimit: b.cliCtx.Uint64(flags.Eth1HeaderReqLimit.Name),
|
||||
}
|
||||
web3Service, err := powchain.NewService(b.ctx, cfg)
|
||||
if err != nil {
|
||||
@@ -507,7 +508,10 @@ func (b *BeaconNode) registerPOWChainService() error {
|
||||
}
|
||||
}
|
||||
if len(knownContract) > 0 && !bytes.Equal(cfg.DepositContract.Bytes(), knownContract) {
|
||||
return fmt.Errorf("database contract is %#x but tried to run with %#x", knownContract, cfg.DepositContract.Bytes())
|
||||
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
|
||||
"you are trying to run on a different network than what the database contains. You can run once with "+
|
||||
"'--clear-db' to wipe the old database or use an alternative data directory with '--datadir'",
|
||||
knownContract, cfg.DepositContract.Bytes())
|
||||
}
|
||||
|
||||
log.Infof("Deposit contract: %#x", cfg.DepositContract.Bytes())
|
||||
@@ -598,6 +602,8 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
|
||||
host := b.cliCtx.String(flags.RPCHost.Name)
|
||||
port := b.cliCtx.String(flags.RPCPort.Name)
|
||||
beaconMonitoringHost := b.cliCtx.String(cmd.MonitoringHostFlag.Name)
|
||||
beaconMonitoringPort := b.cliCtx.Int(flags.MonitoringPortFlag.Name)
|
||||
cert := b.cliCtx.String(flags.CertFlag.Name)
|
||||
key := b.cliCtx.String(flags.KeyFlag.Name)
|
||||
mockEth1DataVotes := b.cliCtx.Bool(flags.InteropMockEth1DataVotesFlag.Name)
|
||||
@@ -607,6 +613,8 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
rpcService := rpc.NewService(b.ctx, &rpc.Config{
|
||||
Host: host,
|
||||
Port: port,
|
||||
BeaconMonitoringHost: beaconMonitoringHost,
|
||||
BeaconMonitoringPort: beaconMonitoringPort,
|
||||
CertFlag: cert,
|
||||
KeyFlag: key,
|
||||
BeaconDB: b.db,
|
||||
|
||||
@@ -6,12 +6,6 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
numPendingAttesterSlashingFailedSigVerify = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "pending_attester_slashing_fail_sig_verify_total",
|
||||
Help: "Times an pending attester slashing fails sig verification",
|
||||
},
|
||||
)
|
||||
numPendingAttesterSlashings = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "num_pending_attester_slashings",
|
||||
@@ -24,18 +18,6 @@ var (
|
||||
Help: "Number of attester slashings included in blocks",
|
||||
},
|
||||
)
|
||||
attesterSlashingReattempts = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "attester_slashing_reattempts_total",
|
||||
Help: "Times an attester slashing for an already slashed validator is received",
|
||||
},
|
||||
)
|
||||
numPendingProposerSlashingFailedSigVerify = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "pending_proposer_slashing_fail_sig_verify_total",
|
||||
Help: "Times an pending proposer slashing fails sig verification",
|
||||
},
|
||||
)
|
||||
numPendingProposerSlashings = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "num_pending_proposer_slashings",
|
||||
@@ -48,10 +30,4 @@ var (
|
||||
Help: "Number of proposer slashings included in blocks",
|
||||
},
|
||||
)
|
||||
proposerSlashingReattempts = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "proposer_slashing_reattempts_total",
|
||||
Help: "Times a proposer slashing for an already slashed validator is received",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -124,7 +124,6 @@ func (p *Pool) InsertAttesterSlashing(
|
||||
defer span.End()
|
||||
|
||||
if err := blocks.VerifyAttesterSlashing(ctx, state, slashing); err != nil {
|
||||
numPendingAttesterSlashingFailedSigVerify.Inc()
|
||||
return errors.Wrap(err, "could not verify attester slashing")
|
||||
}
|
||||
|
||||
@@ -139,7 +138,6 @@ func (p *Pool) InsertAttesterSlashing(
|
||||
// If the validator has already exited, has already been slashed, or if its index
|
||||
// has been recently included in the pool of slashings, skip including this indice.
|
||||
if !ok {
|
||||
attesterSlashingReattempts.Inc()
|
||||
cantSlash = append(cantSlash, val)
|
||||
continue
|
||||
}
|
||||
@@ -150,7 +148,6 @@ func (p *Pool) InsertAttesterSlashing(
|
||||
return p.pendingAttesterSlashing[i].validatorToSlash >= val
|
||||
})
|
||||
if found != len(p.pendingAttesterSlashing) && p.pendingAttesterSlashing[found].validatorToSlash == val {
|
||||
attesterSlashingReattempts.Inc()
|
||||
cantSlash = append(cantSlash, val)
|
||||
continue
|
||||
}
|
||||
@@ -185,7 +182,6 @@ func (p *Pool) InsertProposerSlashing(
|
||||
defer span.End()
|
||||
|
||||
if err := blocks.VerifyProposerSlashing(state, slashing); err != nil {
|
||||
numPendingProposerSlashingFailedSigVerify.Inc()
|
||||
return errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
|
||||
@@ -198,7 +194,6 @@ func (p *Pool) InsertProposerSlashing(
|
||||
// has been recently included in the pool of slashings, do not process this new
|
||||
// slashing.
|
||||
if !ok {
|
||||
proposerSlashingReattempts.Inc()
|
||||
return fmt.Errorf("validator at index %d cannot be slashed", idx)
|
||||
}
|
||||
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
|
||||
const (
|
||||
// overlay parameters
|
||||
gossipSubD = 6 // topic stable mesh target count
|
||||
gossipSubDlo = 5 // topic stable mesh low watermark
|
||||
gossipSubD = 8 // topic stable mesh target count
|
||||
gossipSubDlo = 6 // topic stable mesh low watermark
|
||||
gossipSubDhi = 12 // topic stable mesh high watermark
|
||||
|
||||
// gossip parameters
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p_core//network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//peer:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//net:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -48,10 +48,11 @@ type PeerData struct {
|
||||
Enr *enr.Record
|
||||
NextValidTime time.Time
|
||||
// Chain related data.
|
||||
ChainState *pb.Status
|
||||
MetaData *pb.MetaData
|
||||
ChainStateLastUpdated time.Time
|
||||
// Scorers related data.
|
||||
MetaData *pb.MetaData
|
||||
ChainState *pb.Status
|
||||
ChainStateLastUpdated time.Time
|
||||
ChainStateValidationError error
|
||||
// Scorers internal data.
|
||||
BadResponses int
|
||||
ProcessedBlocks uint64
|
||||
BlockProviderUpdated time.Time
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
srcs = [
|
||||
"bad_responses.go",
|
||||
"block_providers.go",
|
||||
"peer_status.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers",
|
||||
@@ -13,6 +14,8 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/rand:go_default_library",
|
||||
"//shared/timeutils:go_default_library",
|
||||
@@ -25,6 +28,7 @@ go_test(
|
||||
srcs = [
|
||||
"bad_responses_test.go",
|
||||
"block_providers_test.go",
|
||||
"peer_status_test.go",
|
||||
"scorers_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
@@ -33,6 +37,8 @@ go_test(
|
||||
"//beacon-chain/flags:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//proto/beacon/p2p/v1:go_default_library",
|
||||
"//shared/featureconfig:go_default_library",
|
||||
"//shared/rand:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
package scorers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
|
||||
)
|
||||
|
||||
var _ Scorer = (*BadResponsesScorer)(nil)
|
||||
|
||||
const (
|
||||
// DefaultBadResponsesThreshold defines how many bad responses to tolerate before peer is deemed bad.
|
||||
DefaultBadResponsesThreshold = 6
|
||||
// DefaultBadResponsesWeight is a default weight. Since score represents penalty, it has negative weight.
|
||||
DefaultBadResponsesWeight = -1.0
|
||||
// DefaultBadResponsesDecayInterval defines how often to decay previous statistics.
|
||||
// Every interval bad responses counter will be decremented by 1.
|
||||
DefaultBadResponsesDecayInterval = time.Hour
|
||||
@@ -20,7 +19,6 @@ const (
|
||||
|
||||
// BadResponsesScorer represents bad responses scoring service.
|
||||
type BadResponsesScorer struct {
|
||||
ctx context.Context
|
||||
config *BadResponsesScorerConfig
|
||||
store *peerdata.Store
|
||||
}
|
||||
@@ -29,29 +27,22 @@ type BadResponsesScorer struct {
|
||||
type BadResponsesScorerConfig struct {
|
||||
// Threshold specifies number of bad responses tolerated, before peer is banned.
|
||||
Threshold int
|
||||
// Weight defines weight of bad response/threshold ratio on overall score.
|
||||
Weight float64
|
||||
// DecayInterval specifies how often bad response stats should be decayed.
|
||||
DecayInterval time.Duration
|
||||
}
|
||||
|
||||
// newBadResponsesScorer creates new bad responses scoring service.
|
||||
func newBadResponsesScorer(
|
||||
ctx context.Context, store *peerdata.Store, config *BadResponsesScorerConfig) *BadResponsesScorer {
|
||||
func newBadResponsesScorer(store *peerdata.Store, config *BadResponsesScorerConfig) *BadResponsesScorer {
|
||||
if config == nil {
|
||||
config = &BadResponsesScorerConfig{}
|
||||
}
|
||||
scorer := &BadResponsesScorer{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
store: store,
|
||||
}
|
||||
if scorer.config.Threshold == 0 {
|
||||
scorer.config.Threshold = DefaultBadResponsesThreshold
|
||||
}
|
||||
if scorer.config.Weight == 0.0 {
|
||||
scorer.config.Weight = DefaultBadResponsesWeight
|
||||
}
|
||||
if scorer.config.DecayInterval == 0 {
|
||||
scorer.config.DecayInterval = DefaultBadResponsesDecayInterval
|
||||
}
|
||||
@@ -65,8 +56,11 @@ func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
|
||||
return s.score(pid)
|
||||
}
|
||||
|
||||
// score is a lock-free version of ScoreBadResponses.
|
||||
// score is a lock-free version of Score.
|
||||
func (s *BadResponsesScorer) score(pid peer.ID) float64 {
|
||||
if s.isBadPeer(pid) {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
@@ -74,7 +68,8 @@ func (s *BadResponsesScorer) score(pid peer.ID) float64 {
|
||||
}
|
||||
if peerData.BadResponses > 0 {
|
||||
score = float64(peerData.BadResponses) / float64(s.config.Threshold)
|
||||
score = score * s.config.Weight
|
||||
// Since score represents a penalty, negate it.
|
||||
score *= -1
|
||||
}
|
||||
return score
|
||||
}
|
||||
@@ -131,7 +126,7 @@ func (s *BadResponsesScorer) isBadPeer(pid peer.ID) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are bad.
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
func (s *BadResponsesScorer) BadPeers() []peer.ID {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
|
||||
@@ -86,7 +86,6 @@ func TestScorers_BadResponses_Decay(t *testing.T) {
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package scorers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
@@ -15,6 +14,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
)
|
||||
|
||||
var _ Scorer = (*BlockProviderScorer)(nil)
|
||||
|
||||
const (
|
||||
// DefaultBlockProviderProcessedBatchWeight is a default reward weight of a processed batch of blocks.
|
||||
DefaultBlockProviderProcessedBatchWeight = float64(0.1)
|
||||
@@ -35,7 +36,6 @@ const (
|
||||
|
||||
// BlockProviderScorer represents block provider scoring service.
|
||||
type BlockProviderScorer struct {
|
||||
ctx context.Context
|
||||
config *BlockProviderScorerConfig
|
||||
store *peerdata.Store
|
||||
// maxScore is a cached value for maximum attainable block provider score.
|
||||
@@ -62,13 +62,11 @@ type BlockProviderScorerConfig struct {
|
||||
}
|
||||
|
||||
// newBlockProviderScorer creates block provider scoring service.
|
||||
func newBlockProviderScorer(
|
||||
ctx context.Context, store *peerdata.Store, config *BlockProviderScorerConfig) *BlockProviderScorer {
|
||||
func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerConfig) *BlockProviderScorer {
|
||||
if config == nil {
|
||||
config = &BlockProviderScorerConfig{}
|
||||
}
|
||||
scorer := &BlockProviderScorer{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
store: store,
|
||||
}
|
||||
@@ -176,6 +174,20 @@ func (s *BlockProviderScorer) processedBlocks(pid peer.ID) uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
|
||||
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
|
||||
// out low-scorers (see WeightSorted method).
|
||||
func (s *BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
// No peers are considered bad by block providers scorer.
|
||||
func (s *BlockProviderScorer) BadPeers() []peer.ID {
|
||||
return []peer.ID{}
|
||||
}
|
||||
|
||||
// Decay updates block provider counters by decaying them.
|
||||
// This urges peers to keep up the performance to continue getting a high score (and allows
|
||||
// new peers to contest previously high scoring ones).
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/shared/featureconfig"
|
||||
"github.com/prysmaticlabs/prysm/shared/rand"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
@@ -438,17 +439,20 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
peerStatusGen := func() *peers.Status {
|
||||
return peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{
|
||||
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
ProcessedBatchWeight: 0.05,
|
||||
ProcessedBlocksCap: 20 * batchSize,
|
||||
Decay: 10 * batchSize,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{
|
||||
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
ProcessedBatchWeight: 0.05,
|
||||
ProcessedBlocksCap: 20 * batchSize,
|
||||
Decay: 10 * batchSize,
|
||||
},
|
||||
},
|
||||
})
|
||||
peerStatuses := peerStatusGen()
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
if tt.update != nil {
|
||||
tt.update(scorer)
|
||||
@@ -456,4 +460,29 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
|
||||
tt.check(scorer)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("peer scorer disabled", func(t *testing.T) {
|
||||
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{
|
||||
EnablePeerScorer: false,
|
||||
})
|
||||
defer resetCfg()
|
||||
peerStatuses := peerStatusGen()
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
assert.Equal(t, "disabled", scorer.FormatScorePretty("peer1"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
|
||||
scorer.IncrementProcessedBlocks("peer1", 64)
|
||||
assert.Equal(t, false, scorer.IsBadPeer("peer1"))
|
||||
assert.Equal(t, 0, len(scorer.BadPeers()))
|
||||
}
|
||||
|
||||
143
beacon-chain/p2p/peers/scorers/peer_status.go
Normal file
143
beacon-chain/p2p/peers/scorers/peer_status.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package scorers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
)
|
||||
|
||||
var _ Scorer = (*PeerStatusScorer)(nil)
|
||||
|
||||
// PeerStatusScorer represents scorer that evaluates peers based on their statuses.
|
||||
// Peer statuses are updated by regularly polling peers (see sync/rpc_status.go).
|
||||
type PeerStatusScorer struct {
|
||||
config *PeerStatusScorerConfig
|
||||
store *peerdata.Store
|
||||
ourHeadSlot uint64
|
||||
highestPeerHeadSlot uint64
|
||||
}
|
||||
|
||||
// PeerStatusScorerConfig holds configuration parameters for peer status scoring service.
|
||||
type PeerStatusScorerConfig struct{}
|
||||
|
||||
// newPeerStatusScorer creates new peer status scoring service.
|
||||
func newPeerStatusScorer(store *peerdata.Store, config *PeerStatusScorerConfig) *PeerStatusScorer {
|
||||
if config == nil {
|
||||
config = &PeerStatusScorerConfig{}
|
||||
}
|
||||
return &PeerStatusScorer{
|
||||
config: config,
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
// Score returns calculated peer score.
|
||||
func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.score(pid)
|
||||
}
|
||||
|
||||
// score is a lock-free version of Score.
|
||||
func (s *PeerStatusScorer) score(pid peer.ID) float64 {
|
||||
if s.isBadPeer(pid) {
|
||||
return BadPeerScore
|
||||
}
|
||||
score := float64(0)
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok || peerData.ChainState == nil {
|
||||
return score
|
||||
}
|
||||
if peerData.ChainState.HeadSlot < s.ourHeadSlot {
|
||||
return score
|
||||
}
|
||||
// Calculate score as a ratio to the known maximum head slot.
|
||||
// The closer the current peer's head slot to the maximum, the higher is the calculated score.
|
||||
if s.highestPeerHeadSlot > 0 {
|
||||
score = float64(peerData.ChainState.HeadSlot) / float64(s.highestPeerHeadSlot)
|
||||
return math.Round(score*ScoreRoundingFactor) / ScoreRoundingFactor
|
||||
}
|
||||
return score
|
||||
}
|
||||
|
||||
// IsBadPeer states if the peer is to be considered bad.
|
||||
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeer(pid)
|
||||
}
|
||||
|
||||
// isBadPeer is lock-free version of IsBadPeer.
|
||||
func (s *PeerStatusScorer) isBadPeer(pid peer.ID) bool {
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// Mark peer as bad, if the latest error is one of the terminal ones.
|
||||
terminalErrs := []error{
|
||||
p2ptypes.ErrWrongForkDigestVersion,
|
||||
}
|
||||
for _, err := range terminalErrs {
|
||||
if errors.Is(peerData.ChainStateValidationError, err) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad.
|
||||
func (s *PeerStatusScorer) BadPeers() []peer.ID {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeer(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
return badPeers
|
||||
}
|
||||
|
||||
// SetPeerStatus sets chain state data for a given peer.
|
||||
func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, validationError error) {
|
||||
s.store.Lock()
|
||||
defer s.store.Unlock()
|
||||
|
||||
peerData := s.store.PeerDataGetOrCreate(pid)
|
||||
peerData.ChainState = chainState
|
||||
peerData.ChainStateLastUpdated = timeutils.Now()
|
||||
peerData.ChainStateValidationError = validationError
|
||||
|
||||
// Update maximum known head slot (scores will be calculated with respect to that maximum value).
|
||||
if chainState != nil && chainState.HeadSlot > s.highestPeerHeadSlot {
|
||||
s.highestPeerHeadSlot = chainState.HeadSlot
|
||||
}
|
||||
}
|
||||
|
||||
// PeerStatus gets the chain state of the given remote peer.
|
||||
// This can return nil if there is no known chain state for the peer.
|
||||
// This will error if the peer does not exist.
|
||||
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.Status, error) {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.peerStatus(pid)
|
||||
}
|
||||
|
||||
// peerStatus lock-free version of PeerStatus.
|
||||
func (s *PeerStatusScorer) peerStatus(pid peer.ID) (*pb.Status, error) {
|
||||
if peerData, ok := s.store.PeerData(pid); ok {
|
||||
return peerData.ChainState, nil
|
||||
}
|
||||
return nil, peerdata.ErrPeerUnknown
|
||||
}
|
||||
|
||||
// SetHeadSlot updates known head slot.
|
||||
func (s *PeerStatusScorer) SetHeadSlot(slot uint64) {
|
||||
s.ourHeadSlot = slot
|
||||
}
|
||||
197
beacon-chain/p2p/peers/scorers/peer_status_test.go
Normal file
197
beacon-chain/p2p/peers/scorers/peer_status_test.go
Normal file
@@ -0,0 +1,197 @@
|
||||
package scorers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
)
|
||||
|
||||
func TestScorers_PeerStatus_Score(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
update func(scorer *scorers.PeerStatusScorer)
|
||||
check func(scorer *scorers.PeerStatusScorer)
|
||||
}{
|
||||
{
|
||||
name: "nonexistent peer",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(64)
|
||||
},
|
||||
check: func(scorer *scorers.PeerStatusScorer) {
|
||||
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existent bad peer",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
},
|
||||
check: func(scorer *scorers.PeerStatusScorer) {
|
||||
assert.Equal(t, scorers.BadPeerScore, scorer.Score("peer1"), "Unexpected score")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existent peer no head slot for the host node is known",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, nil)
|
||||
},
|
||||
check: func(scorer *scorers.PeerStatusScorer) {
|
||||
assert.Equal(t, 1.0, scorer.Score("peer1"), "Unexpected score")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existent peer head is before ours",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(128)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 64,
|
||||
}, nil)
|
||||
},
|
||||
check: func(scorer *scorers.PeerStatusScorer) {
|
||||
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existent peer partial score",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
headSlot := uint64(128)
|
||||
scorer.SetHeadSlot(headSlot)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 64,
|
||||
}, nil)
|
||||
// Set another peer to a higher score.
|
||||
scorer.SetPeerStatus("peer2", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 128,
|
||||
}, nil)
|
||||
},
|
||||
check: func(scorer *scorers.PeerStatusScorer) {
|
||||
headSlot := uint64(128)
|
||||
assert.Equal(t, float64(headSlot+64)/float64(headSlot+128), scorer.Score("peer1"), "Unexpected score")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existent peer full score",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
headSlot := uint64(128)
|
||||
scorer.SetHeadSlot(headSlot)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: headSlot + 64,
|
||||
}, nil)
|
||||
},
|
||||
check: func(scorer *scorers.PeerStatusScorer) {
|
||||
assert.Equal(t, 1.0, scorer.Score("peer1"), "Unexpected score")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existent peer no max known slot",
|
||||
update: func(scorer *scorers.PeerStatusScorer) {
|
||||
scorer.SetHeadSlot(0)
|
||||
scorer.SetPeerStatus("peer1", &pb.Status{
|
||||
HeadRoot: make([]byte, 32),
|
||||
HeadSlot: 0,
|
||||
}, nil)
|
||||
},
|
||||
check: func(scorer *scorers.PeerStatusScorer) {
|
||||
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().PeerStatusScorer()
|
||||
if tt.update != nil {
|
||||
tt.update(scorer)
|
||||
}
|
||||
tt.check(scorer)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
pid := peer.ID("peer1")
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
|
||||
}
|
||||
|
||||
func TestScorers_PeerStatus_BadPeers(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
pid1 := peer.ID("peer1")
|
||||
pid2 := peer.ID("peer2")
|
||||
pid3 := peer.ID("peer3")
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid1))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid3))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().PeerStatusScorer().BadPeers()))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
|
||||
func TestScorers_PeerStatus_PeerStatus(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
status, err := peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
|
||||
require.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err)
|
||||
assert.Equal(t, (*pb.Status)(nil), status)
|
||||
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer1", &pb.Status{
|
||||
HeadSlot: 128,
|
||||
}, nil)
|
||||
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer2", &pb.Status{
|
||||
HeadSlot: 128,
|
||||
}, p2ptypes.ErrInvalidEpoch)
|
||||
status, err = peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(128), status.HeadSlot)
|
||||
assert.Equal(t, nil, peerStatuses.Scorers().ValidationError("peer1"))
|
||||
assert.ErrorContains(t, p2ptypes.ErrInvalidEpoch.Error(), peerStatuses.Scorers().ValidationError("peer2"))
|
||||
assert.Equal(t, nil, peerStatuses.Scorers().ValidationError("peer3"))
|
||||
}
|
||||
@@ -9,35 +9,58 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
|
||||
)
|
||||
|
||||
var _ Scorer = (*Service)(nil)
|
||||
|
||||
// ScoreRoundingFactor defines how many digits to keep in decimal part.
|
||||
// This parameter is used in math.Round(score*ScoreRoundingFactor) / ScoreRoundingFactor.
|
||||
const ScoreRoundingFactor = 10000
|
||||
|
||||
// BadPeerScore defines score that is returned for a bad peer (all other metrics are ignored).
|
||||
const BadPeerScore = -1.00
|
||||
|
||||
// Scorer defines minimum set of methods every peer scorer must expose.
|
||||
type Scorer interface {
|
||||
Score(pid peer.ID) float64
|
||||
IsBadPeer(pid peer.ID) bool
|
||||
BadPeers() []peer.ID
|
||||
}
|
||||
|
||||
// Service manages peer scorers that are used to calculate overall peer score.
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
store *peerdata.Store
|
||||
scorers struct {
|
||||
badResponsesScorer *BadResponsesScorer
|
||||
blockProviderScorer *BlockProviderScorer
|
||||
peerStatusScorer *PeerStatusScorer
|
||||
}
|
||||
weights map[Scorer]float64
|
||||
totalWeight float64
|
||||
}
|
||||
|
||||
// Config holds configuration parameters for scoring service.
|
||||
type Config struct {
|
||||
BadResponsesScorerConfig *BadResponsesScorerConfig
|
||||
BlockProviderScorerConfig *BlockProviderScorerConfig
|
||||
PeerStatusScorerConfig *PeerStatusScorerConfig
|
||||
}
|
||||
|
||||
// NewService provides fully initialized peer scoring service.
|
||||
func NewService(ctx context.Context, store *peerdata.Store, config *Config) *Service {
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
store: store,
|
||||
store: store,
|
||||
weights: make(map[Scorer]float64),
|
||||
}
|
||||
s.scorers.badResponsesScorer = newBadResponsesScorer(ctx, store, config.BadResponsesScorerConfig)
|
||||
s.scorers.blockProviderScorer = newBlockProviderScorer(ctx, store, config.BlockProviderScorerConfig)
|
||||
go s.loop(s.ctx)
|
||||
|
||||
// Register scorers.
|
||||
s.scorers.badResponsesScorer = newBadResponsesScorer(store, config.BadResponsesScorerConfig)
|
||||
s.setScorerWeight(s.scorers.badResponsesScorer, 1.0)
|
||||
s.scorers.blockProviderScorer = newBlockProviderScorer(store, config.BlockProviderScorerConfig)
|
||||
s.setScorerWeight(s.scorers.blockProviderScorer, 1.0)
|
||||
s.scorers.peerStatusScorer = newPeerStatusScorer(store, config.PeerStatusScorerConfig)
|
||||
s.setScorerWeight(s.scorers.peerStatusScorer, 0.0)
|
||||
|
||||
// Start background tasks.
|
||||
go s.loop(ctx)
|
||||
|
||||
return s
|
||||
}
|
||||
@@ -52,6 +75,22 @@ func (s *Service) BlockProviderScorer() *BlockProviderScorer {
|
||||
return s.scorers.blockProviderScorer
|
||||
}
|
||||
|
||||
// PeerStatusScorer exposes peer chain status scoring service.
|
||||
func (s *Service) PeerStatusScorer() *PeerStatusScorer {
|
||||
return s.scorers.peerStatusScorer
|
||||
}
|
||||
|
||||
// ActiveScorersCount returns number of scorers that can affect score (have non-zero weight).
|
||||
func (s *Service) ActiveScorersCount() int {
|
||||
cnt := 0
|
||||
for _, w := range s.weights {
|
||||
if w > 0 {
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
// Score returns calculated peer score across all tracked metrics.
|
||||
func (s *Service) Score(pid peer.ID) float64 {
|
||||
s.store.RLock()
|
||||
@@ -61,11 +100,57 @@ func (s *Service) Score(pid peer.ID) float64 {
|
||||
if _, ok := s.store.PeerData(pid); !ok {
|
||||
return 0
|
||||
}
|
||||
score += s.scorers.badResponsesScorer.score(pid)
|
||||
score += s.scorers.blockProviderScorer.score(pid)
|
||||
score += s.scorers.badResponsesScorer.score(pid) * s.scorerWeight(s.scorers.badResponsesScorer)
|
||||
score += s.scorers.blockProviderScorer.score(pid) * s.scorerWeight(s.scorers.blockProviderScorer)
|
||||
score += s.scorers.peerStatusScorer.score(pid) * s.scorerWeight(s.scorers.peerStatusScorer)
|
||||
return math.Round(score*ScoreRoundingFactor) / ScoreRoundingFactor
|
||||
}
|
||||
|
||||
// IsBadPeer traverses all the scorers to see if any of them classifies peer as bad.
|
||||
func (s *Service) IsBadPeer(pid peer.ID) bool {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
return s.isBadPeer(pid)
|
||||
}
|
||||
|
||||
// isBadPeer is a lock-free version of isBadPeer.
|
||||
func (s *Service) isBadPeer(pid peer.ID) bool {
|
||||
if s.scorers.badResponsesScorer.isBadPeer(pid) {
|
||||
return true
|
||||
}
|
||||
if s.scorers.peerStatusScorer.isBadPeer(pid) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// BadPeers returns the peers that are considered bad by any of registered scorers.
|
||||
func (s *Service) BadPeers() []peer.ID {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
|
||||
badPeers := make([]peer.ID, 0)
|
||||
for pid := range s.store.Peers() {
|
||||
if s.isBadPeer(pid) {
|
||||
badPeers = append(badPeers, pid)
|
||||
}
|
||||
}
|
||||
return badPeers
|
||||
}
|
||||
|
||||
// ValidationError returns peer data validation error, which potentially provides more information
|
||||
// why peer is considered bad.
|
||||
func (s *Service) ValidationError(pid peer.ID) error {
|
||||
s.store.RLock()
|
||||
defer s.store.RUnlock()
|
||||
|
||||
peerData, ok := s.store.PeerData(pid)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return peerData.ChainStateValidationError
|
||||
}
|
||||
|
||||
// loop handles background tasks.
|
||||
func (s *Service) loop(ctx context.Context) {
|
||||
decayBadResponsesStats := time.NewTicker(s.scorers.badResponsesScorer.Params().DecayInterval)
|
||||
@@ -84,3 +169,14 @@ func (s *Service) loop(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setScorerWeight adds scorer to map of known scorers.
|
||||
func (s *Service) setScorerWeight(scorer Scorer, weight float64) {
|
||||
s.weights[scorer] = weight
|
||||
s.totalWeight += s.weights[scorer]
|
||||
}
|
||||
|
||||
// scorerWeight calculates contribution percentage of a given scorer in total score.
|
||||
func (s *Service) scorerWeight(scorer Scorer) float64 {
|
||||
return s.weights[scorer] / s.totalWeight
|
||||
}
|
||||
|
||||
@@ -28,8 +28,8 @@ func TestScorers_Service_Init(t *testing.T) {
|
||||
t.Run("bad responses scorer", func(t *testing.T) {
|
||||
params := peerStatuses.Scorers().BadResponsesScorer().Params()
|
||||
assert.Equal(t, scorers.DefaultBadResponsesThreshold, params.Threshold, "Unexpected threshold value")
|
||||
assert.Equal(t, scorers.DefaultBadResponsesWeight, params.Weight, "Unexpected weight value")
|
||||
assert.Equal(t, scorers.DefaultBadResponsesDecayInterval, params.DecayInterval, "Unexpected decay interval value")
|
||||
assert.Equal(t, scorers.DefaultBadResponsesDecayInterval,
|
||||
params.DecayInterval, "Unexpected decay interval value")
|
||||
})
|
||||
|
||||
t.Run("block providers scorer", func(t *testing.T) {
|
||||
@@ -48,7 +48,6 @@ func TestScorers_Service_Init(t *testing.T) {
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
Weight: -1,
|
||||
DecayInterval: 1 * time.Minute,
|
||||
},
|
||||
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
@@ -64,7 +63,6 @@ func TestScorers_Service_Init(t *testing.T) {
|
||||
t.Run("bad responses scorer", func(t *testing.T) {
|
||||
params := peerStatuses.Scorers().BadResponsesScorer().Params()
|
||||
assert.Equal(t, 2, params.Threshold, "Unexpected threshold value")
|
||||
assert.Equal(t, -1.0, params.Weight, "Unexpected weight value")
|
||||
assert.Equal(t, 1*time.Minute, params.DecayInterval, "Unexpected decay interval value")
|
||||
})
|
||||
|
||||
@@ -119,7 +117,8 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
for _, pid := range pids {
|
||||
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
|
||||
// Not yet used peer gets boosted score.
|
||||
assert.Equal(t, s.BlockProviderScorer().MaxScore(), s.Score(pid), "Unexpected score for not yet used peer")
|
||||
startScore := s.BlockProviderScorer().MaxScore()
|
||||
assert.Equal(t, startScore/float64(s.ActiveScorersCount()), s.Score(pid), "Unexpected score for not yet used peer")
|
||||
}
|
||||
return s, pids
|
||||
}
|
||||
@@ -136,27 +135,29 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
|
||||
t.Run("bad responses score", func(t *testing.T) {
|
||||
s, pids := setupScorer()
|
||||
zeroScore := s.BlockProviderScorer().MaxScore()
|
||||
// Peers start with boosted start score (new peers are boosted by block provider).
|
||||
startScore := s.BlockProviderScorer().MaxScore() / float64(s.ActiveScorersCount())
|
||||
penalty := (-1 / float64(s.BadResponsesScorer().Params().Threshold)) / float64(s.ActiveScorersCount())
|
||||
|
||||
// Update peers' stats and test the effect on peer order.
|
||||
s.BadResponsesScorer().Increment("peer2")
|
||||
assert.DeepEqual(t, pack(s, zeroScore, zeroScore-0.2, zeroScore), peerScores(s, pids), "Unexpected scores")
|
||||
assert.DeepEqual(t, pack(s, startScore, startScore+penalty, startScore), peerScores(s, pids))
|
||||
s.BadResponsesScorer().Increment("peer1")
|
||||
s.BadResponsesScorer().Increment("peer1")
|
||||
assert.DeepEqual(t, pack(s, zeroScore-0.4, zeroScore-0.2, zeroScore), peerScores(s, pids), "Unexpected scores")
|
||||
assert.DeepEqual(t, pack(s, startScore+2*penalty, startScore+penalty, startScore), peerScores(s, pids))
|
||||
|
||||
// See how decaying affects order of peers.
|
||||
s.BadResponsesScorer().Decay()
|
||||
assert.DeepEqual(t, pack(s, zeroScore-0.2, zeroScore, zeroScore), peerScores(s, pids), "Unexpected scores")
|
||||
assert.DeepEqual(t, pack(s, startScore+penalty, startScore, startScore), peerScores(s, pids))
|
||||
s.BadResponsesScorer().Decay()
|
||||
assert.DeepEqual(t, pack(s, zeroScore, zeroScore, zeroScore), peerScores(s, pids), "Unexpected scores")
|
||||
assert.DeepEqual(t, pack(s, startScore, startScore, startScore), peerScores(s, pids))
|
||||
})
|
||||
|
||||
t.Run("block providers score", func(t *testing.T) {
|
||||
s, pids := setupScorer()
|
||||
s1 := s.BlockProviderScorer()
|
||||
zeroScore := s.BlockProviderScorer().MaxScore()
|
||||
batchWeight := s1.Params().ProcessedBatchWeight
|
||||
startScore := s.BlockProviderScorer().MaxScore() / 2
|
||||
batchWeight := s1.Params().ProcessedBatchWeight / 2
|
||||
|
||||
// Partial batch.
|
||||
s1.IncrementProcessedBlocks("peer1", batchSize/4)
|
||||
@@ -164,11 +165,11 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
|
||||
// Single batch.
|
||||
s1.IncrementProcessedBlocks("peer1", batchSize)
|
||||
assert.DeepEqual(t, pack(s, batchWeight, zeroScore, zeroScore), peerScores(s, pids), "Unexpected scores")
|
||||
assert.DeepEqual(t, pack(s, batchWeight, startScore, startScore), peerScores(s, pids), "Unexpected scores")
|
||||
|
||||
// Multiple batches.
|
||||
s1.IncrementProcessedBlocks("peer2", batchSize*4)
|
||||
assert.DeepEqual(t, pack(s, batchWeight, batchWeight*4, zeroScore), peerScores(s, pids), "Unexpected scores")
|
||||
assert.DeepEqual(t, pack(s, batchWeight, batchWeight*4, startScore), peerScores(s, pids), "Unexpected scores")
|
||||
|
||||
// Partial batch.
|
||||
s1.IncrementProcessedBlocks("peer3", batchSize/2)
|
||||
@@ -187,25 +188,22 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("overall score", func(t *testing.T) {
|
||||
// Full score, no penalty.
|
||||
s, _ := setupScorer()
|
||||
s1 := s.BlockProviderScorer()
|
||||
s2 := s.BadResponsesScorer()
|
||||
batchWeight := s1.Params().ProcessedBatchWeight
|
||||
batchWeight := s1.Params().ProcessedBatchWeight / float64(s.ActiveScorersCount())
|
||||
penalty := (-1 / float64(s.BadResponsesScorer().Params().Threshold)) / float64(s.ActiveScorersCount())
|
||||
|
||||
// Full score, no penalty.
|
||||
s1.IncrementProcessedBlocks("peer1", batchSize*5)
|
||||
assert.Equal(t, roundScore(batchWeight*5), s1.Score("peer1"))
|
||||
assert.Equal(t, roundScore(batchWeight*5), s.Score("peer1"))
|
||||
// Now, adjust score by introducing penalty for bad responses.
|
||||
s2.Increment("peer1")
|
||||
s2.Increment("peer1")
|
||||
assert.Equal(t, -0.4, s2.Score("peer1"), "Unexpected bad responses score")
|
||||
assert.Equal(t, roundScore(batchWeight*5), s1.Score("peer1"), "Unexpected block provider score")
|
||||
assert.Equal(t, roundScore(batchWeight*5-0.4), s.Score("peer1"), "Unexpected overall score")
|
||||
assert.Equal(t, roundScore(batchWeight*5+2*penalty), s.Score("peer1"), "Unexpected overall score")
|
||||
// If peer continues to misbehave, score becomes negative.
|
||||
s2.Increment("peer1")
|
||||
assert.Equal(t, -0.6, s2.Score("peer1"), "Unexpected bad responses score")
|
||||
assert.Equal(t, roundScore(batchWeight*5), s1.Score("peer1"), "Unexpected block provider score")
|
||||
assert.Equal(t, roundScore(batchWeight*5-0.6), s.Score("peer1"), "Unexpected overall score")
|
||||
assert.Equal(t, roundScore(batchWeight*5+3*penalty), s.Score("peer1"), "Unexpected overall score")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -218,7 +216,6 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 5,
|
||||
Weight: -0.5,
|
||||
DecayInterval: 50 * time.Millisecond,
|
||||
},
|
||||
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
|
||||
@@ -264,3 +261,45 @@ func TestScorers_Service_loop(t *testing.T) {
|
||||
assert.Equal(t, false, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
|
||||
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
|
||||
}
|
||||
|
||||
func TestScorers_Service_IsBadPeer(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
}
|
||||
|
||||
func TestScorers_Service_BadPeers(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: 2,
|
||||
DecayInterval: 50 * time.Second,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
|
||||
for _, pid := range []peer.ID{"peer1", "peer3"} {
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
|
||||
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
|
||||
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer3"))
|
||||
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
|
||||
@@ -52,14 +53,20 @@ const (
|
||||
PeerConnecting
|
||||
)
|
||||
|
||||
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
|
||||
const maxLimitBuffer = 150
|
||||
const (
|
||||
// ColocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
|
||||
ColocationLimit = 5
|
||||
|
||||
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
|
||||
maxLimitBuffer = 150
|
||||
)
|
||||
|
||||
// Status is the structure holding the peer status information.
|
||||
type Status struct {
|
||||
ctx context.Context
|
||||
scorers *scorers.Service
|
||||
store *peerdata.Store
|
||||
ctx context.Context
|
||||
scorers *scorers.Service
|
||||
store *peerdata.Store
|
||||
ipTracker map[string]uint64
|
||||
}
|
||||
|
||||
// StatusConfig represents peer status service params.
|
||||
@@ -76,9 +83,10 @@ func NewStatus(ctx context.Context, config *StatusConfig) *Status {
|
||||
MaxPeers: maxLimitBuffer + config.PeerLimit,
|
||||
})
|
||||
return &Status{
|
||||
ctx: ctx,
|
||||
store: store,
|
||||
scorers: scorers.NewService(ctx, store, config.ScorerParams),
|
||||
ctx: ctx,
|
||||
store: store,
|
||||
scorers: scorers.NewService(ctx, store, config.ScorerParams),
|
||||
ipTracker: map[string]uint64{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,11 +108,15 @@ func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, dire
|
||||
|
||||
if peerData, ok := p.store.PeerData(pid); ok {
|
||||
// Peer already exists, just update its address info.
|
||||
prevAddress := peerData.Address
|
||||
peerData.Address = address
|
||||
peerData.Direction = direction
|
||||
if record != nil {
|
||||
peerData.Enr = record
|
||||
}
|
||||
if !sameIP(prevAddress, address) {
|
||||
p.addIpToTracker(pid)
|
||||
}
|
||||
return
|
||||
}
|
||||
peerData := &peerdata.PeerData{
|
||||
@@ -117,6 +129,7 @@ func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, dire
|
||||
peerData.Enr = record
|
||||
}
|
||||
p.store.SetPeerData(pid, peerData)
|
||||
p.addIpToTracker(pid)
|
||||
}
|
||||
|
||||
// Address returns the multiaddress of the given remote peer.
|
||||
@@ -156,25 +169,14 @@ func (p *Status) ENR(pid peer.ID) (*enr.Record, error) {
|
||||
|
||||
// SetChainState sets the chain state of the given remote peer.
|
||||
func (p *Status) SetChainState(pid peer.ID, chainState *pb.Status) {
|
||||
p.store.Lock()
|
||||
defer p.store.Unlock()
|
||||
|
||||
peerData := p.store.PeerDataGetOrCreate(pid)
|
||||
peerData.ChainState = chainState
|
||||
peerData.ChainStateLastUpdated = timeutils.Now()
|
||||
p.scorers.PeerStatusScorer().SetPeerStatus(pid, chainState, nil)
|
||||
}
|
||||
|
||||
// ChainState gets the chain state of the given remote peer.
|
||||
// This can return nil if there is no known chain state for the peer.
|
||||
// This will error if the peer does not exist.
|
||||
func (p *Status) ChainState(pid peer.ID) (*pb.Status, error) {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
|
||||
if peerData, ok := p.store.PeerData(pid); ok {
|
||||
return peerData.ChainState, nil
|
||||
}
|
||||
return nil, peerdata.ErrPeerUnknown
|
||||
return p.scorers.PeerStatusScorer().PeerStatus(pid)
|
||||
}
|
||||
|
||||
// IsActive checks if a peers is active and returns the result appropriately.
|
||||
@@ -277,10 +279,10 @@ func (p *Status) ChainStateLastUpdated(pid peer.ID) (time.Time, error) {
|
||||
return timeutils.Now(), peerdata.ErrPeerUnknown
|
||||
}
|
||||
|
||||
// IsBad states if the peer is to be considered bad.
|
||||
// IsBad states if the peer is to be considered bad (by *any* of the registered scorers).
|
||||
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
|
||||
func (p *Status) IsBad(pid peer.ID) bool {
|
||||
return p.scorers.BadResponsesScorer().IsBadPeer(pid)
|
||||
return p.isfromBadIP(pid) || p.scorers.IsBadPeer(pid)
|
||||
}
|
||||
|
||||
// NextValidTime gets the earliest possible time it is to contact/dial
|
||||
@@ -463,6 +465,7 @@ func (p *Status) Prune() {
|
||||
for _, peerData := range peersToPrune {
|
||||
p.store.DeletePeerData(peerData.pid)
|
||||
}
|
||||
p.tallyIPTracker()
|
||||
}
|
||||
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than ours that is agreed
|
||||
@@ -579,6 +582,88 @@ func (p *Status) HighestEpoch() uint64 {
|
||||
return helpers.SlotToEpoch(highestSlot)
|
||||
}
|
||||
|
||||
func (p *Status) isfromBadIP(pid peer.ID) bool {
|
||||
p.store.RLock()
|
||||
defer p.store.RUnlock()
|
||||
|
||||
peerData, ok := p.store.PeerData(pid)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if peerData.Address == nil {
|
||||
return false
|
||||
}
|
||||
ip, err := manet.ToIP(peerData.Address)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > ColocationLimit {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Status) addIpToTracker(pid peer.ID) {
|
||||
data, ok := p.store.PeerData(pid)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if data.Address == nil {
|
||||
return
|
||||
}
|
||||
ip, err := manet.ToIP(data.Address)
|
||||
if err != nil {
|
||||
// Should never happen, it is
|
||||
// assumed every IP coming in
|
||||
// is a valid ip.
|
||||
return
|
||||
}
|
||||
// Ignore loopback addresses.
|
||||
if ip.IsLoopback() {
|
||||
return
|
||||
}
|
||||
stringIP := ip.String()
|
||||
p.ipTracker[stringIP] += 1
|
||||
}
|
||||
|
||||
func (p *Status) tallyIPTracker() {
|
||||
tracker := map[string]uint64{}
|
||||
// Iterate through all peers.
|
||||
for _, peerData := range p.store.Peers() {
|
||||
if peerData.Address == nil {
|
||||
continue
|
||||
}
|
||||
ip, err := manet.ToIP(peerData.Address)
|
||||
if err != nil {
|
||||
// Should never happen, it is
|
||||
// assumed every IP coming in
|
||||
// is a valid ip.
|
||||
continue
|
||||
}
|
||||
stringIP := ip.String()
|
||||
tracker[stringIP] += 1
|
||||
}
|
||||
p.ipTracker = tracker
|
||||
}
|
||||
|
||||
func sameIP(firstAddr, secondAddr ma.Multiaddr) bool {
|
||||
// Exit early if we do get nil multiaddresses
|
||||
if firstAddr == nil || secondAddr == nil {
|
||||
return false
|
||||
}
|
||||
firstIP, err := manet.ToIP(firstAddr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
secondIP, err := manet.ToIP(secondAddr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return firstIP.Equal(secondIP)
|
||||
}
|
||||
|
||||
func retrieveIndicesFromBitfield(bitV bitfield.Bitvector64) []uint64 {
|
||||
committeeIdxs := make([]uint64, 0, bitV.Count())
|
||||
for i := uint64(0); i < 64; i++ {
|
||||
|
||||
@@ -3,6 +3,7 @@ package peers_test
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -517,6 +518,45 @@ func TestPrune(t *testing.T) {
|
||||
assert.ErrorContains(t, "peer unknown", err)
|
||||
}
|
||||
|
||||
func TestPeerIPTracker(t *testing.T) {
|
||||
maxBadResponses := 2
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
badIP := "211.227.218.116"
|
||||
badPeers := []peer.ID{}
|
||||
for i := 0; i < peers.ColocationLimit+10; i++ {
|
||||
port := strconv.Itoa(3000 + i)
|
||||
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
badPeers = append(badPeers, createPeer(t, p, addr))
|
||||
}
|
||||
for _, pr := range badPeers {
|
||||
assert.Equal(t, true, p.IsBad(pr), "peer with bad ip is not bad")
|
||||
}
|
||||
|
||||
// Add in bad peers, so that our records are trimmed out
|
||||
// from the peer store.
|
||||
for i := 0; i < p.MaxPeerLimit()+100; i++ {
|
||||
// Peer added to peer handler.
|
||||
pid := addPeer(t, p, peers.PeerConnected)
|
||||
p.Scorers().BadResponsesScorer().Increment(pid)
|
||||
}
|
||||
p.Prune()
|
||||
|
||||
for _, pr := range badPeers {
|
||||
assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrimmedOrderedPeers(t *testing.T) {
|
||||
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
@@ -833,3 +873,15 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState)
|
||||
})
|
||||
return id
|
||||
}
|
||||
|
||||
func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr) peer.ID {
|
||||
mhBytes := []byte{0x11, 0x04}
|
||||
idBytes := make([]byte, 4)
|
||||
_, err := rand.Read(idBytes)
|
||||
require.NoError(t, err)
|
||||
mhBytes = append(mhBytes, idBytes...)
|
||||
id, err := peer.IDFromBytes(mhBytes)
|
||||
require.NoError(t, err)
|
||||
p.Add(new(enr.Record), id, addr, network.DirUnknown)
|
||||
return id
|
||||
}
|
||||
|
||||
@@ -116,9 +116,19 @@ func msgIDFunction(pmsg *pubsub_pb.Message) string {
|
||||
|
||||
func setPubSubParameters() {
|
||||
heartBeatInterval := 700 * time.Millisecond
|
||||
pubsub.GossipSubDlo = 5
|
||||
pubsub.GossipSubDlo = 6
|
||||
pubsub.GossipSubD = 8
|
||||
pubsub.GossipSubHeartbeatInterval = heartBeatInterval
|
||||
pubsub.GossipSubHistoryLength = 6
|
||||
pubsub.GossipSubHistoryGossip = 3
|
||||
pubsub.TimeCacheDuration = 550 * heartBeatInterval
|
||||
|
||||
// Set a larger gossip history to ensure that slower
|
||||
// messages have a longer time to be propagated. This
|
||||
// comes with the tradeoff of larger memory usage and
|
||||
// size of the seen message cache.
|
||||
if featureconfig.Get().EnableLargerGossipHistory {
|
||||
pubsub.GossipSubHistoryLength = 12
|
||||
pubsub.GossipSubHistoryLength = 5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,6 +156,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
pubsub.WithNoAuthor(),
|
||||
pubsub.WithMessageIdFn(msgIDFunction),
|
||||
pubsub.WithSubscriptionFilter(s),
|
||||
pubsub.WithPeerOutboundQueueSize(256),
|
||||
}
|
||||
// Add gossip scoring options.
|
||||
if featureconfig.Get().EnablePeerScorer {
|
||||
@@ -179,7 +180,6 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
Weight: -100,
|
||||
DecayInterval: time.Hour,
|
||||
},
|
||||
},
|
||||
@@ -440,7 +440,7 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
|
||||
return nil
|
||||
}
|
||||
if s.Peers().IsBad(info.ID) {
|
||||
return nil
|
||||
return errors.New("refused to connect to bad peer")
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
|
||||
defer cancel()
|
||||
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/event"
|
||||
"github.com/prysmaticlabs/prysm/shared/p2putils"
|
||||
@@ -313,3 +315,48 @@ func initializeStateWithForkDigest(ctx context.Context, t *testing.T, ef *event.
|
||||
|
||||
return fd
|
||||
}
|
||||
|
||||
func TestService_connectWithPeer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
peers *peers.Status
|
||||
info peer.AddrInfo
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "bad peer",
|
||||
peers: func() *peers.Status {
|
||||
ps := peers.NewStatus(context.Background(), &peers.StatusConfig{
|
||||
ScorerParams: &scorers.Config{},
|
||||
})
|
||||
for i := 0; i < 10; i++ {
|
||||
ps.Scorers().BadResponsesScorer().Increment("bad")
|
||||
}
|
||||
return ps
|
||||
}(),
|
||||
info: peer.AddrInfo{ID: "bad"},
|
||||
wantErr: "refused to connect to bad peer",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
h, _, _ := createHost(t, 34567)
|
||||
defer func() {
|
||||
if err := h.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
ctx := context.Background()
|
||||
s := &Service{
|
||||
host: h,
|
||||
peers: tt.peers,
|
||||
}
|
||||
err := s.connectWithPeer(ctx, tt.info)
|
||||
if len(tt.wantErr) > 0 {
|
||||
require.ErrorContains(t, tt.wantErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
)
|
||||
|
||||
func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
|
||||
// This test needs to be entirely rewritten and should be done in a follow up PR from #7885.
|
||||
t.Skip("This test is now failing after PR 7885 due to false positive")
|
||||
port := 2000
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
|
||||
@@ -3,7 +3,11 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["types.go"],
|
||||
srcs = [
|
||||
"rpc_errors.go",
|
||||
"rpc_goodbye_codes.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
|
||||
15
beacon-chain/p2p/types/rpc_errors.go
Normal file
15
beacon-chain/p2p/types/rpc_errors.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package types
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
ErrWrongForkDigestVersion = errors.New("wrong fork digest version")
|
||||
ErrInvalidEpoch = errors.New("invalid epoch")
|
||||
ErrInvalidFinalizedRoot = errors.New("invalid finalized root")
|
||||
ErrInvalidSequenceNum = errors.New("invalid sequence number provided")
|
||||
ErrGeneric = errors.New("internal service error")
|
||||
ErrInvalidParent = errors.New("mismatched parent root")
|
||||
ErrRateLimited = errors.New("rate limited")
|
||||
ErrIODeadline = errors.New("i/o deadline exceeded")
|
||||
ErrInvalidRequest = errors.New("invalid range, step or count")
|
||||
)
|
||||
40
beacon-chain/p2p/types/rpc_goodbye_codes.go
Normal file
40
beacon-chain/p2p/types/rpc_goodbye_codes.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package types
|
||||
|
||||
// RPCGoodbyeCode represents goodbye code, used in sync package.
|
||||
type RPCGoodbyeCode = SSZUint64
|
||||
|
||||
const (
|
||||
// Spec defined codes.
|
||||
GoodbyeCodeClientShutdown RPCGoodbyeCode = iota
|
||||
GoodbyeCodeWrongNetwork
|
||||
GoodbyeCodeGenericError
|
||||
|
||||
// Teku specific codes
|
||||
GoodbyeCodeUnableToVerifyNetwork = RPCGoodbyeCode(128)
|
||||
|
||||
// Lighthouse specific codes
|
||||
GoodbyeCodeTooManyPeers = RPCGoodbyeCode(129)
|
||||
GoodbyeCodeBadScore = RPCGoodbyeCode(250)
|
||||
GoodbyeCodeBanned = RPCGoodbyeCode(251)
|
||||
)
|
||||
|
||||
// GoodbyeCodeMessages defines a mapping between goodbye codes and string messages.
|
||||
var GoodbyeCodeMessages = map[RPCGoodbyeCode]string{
|
||||
GoodbyeCodeClientShutdown: "client shutdown",
|
||||
GoodbyeCodeWrongNetwork: "irrelevant network",
|
||||
GoodbyeCodeGenericError: "fault/error",
|
||||
GoodbyeCodeUnableToVerifyNetwork: "unable to verify network",
|
||||
GoodbyeCodeTooManyPeers: "client has too many peers",
|
||||
GoodbyeCodeBadScore: "peer score too low",
|
||||
GoodbyeCodeBanned: "client banned this node",
|
||||
}
|
||||
|
||||
// ErrToGoodbyeCode converts given error to RPC goodbye code.
|
||||
func ErrToGoodbyeCode(err error) RPCGoodbyeCode {
|
||||
switch err {
|
||||
case ErrWrongForkDigestVersion:
|
||||
return GoodbyeCodeWrongNetwork
|
||||
default:
|
||||
return GoodbyeCodeGenericError
|
||||
}
|
||||
}
|
||||
@@ -45,12 +45,17 @@ type headerInfo struct {
|
||||
Time uint64
|
||||
}
|
||||
|
||||
func headerToHeaderInfo(hdr *gethTypes.Header) *headerInfo {
|
||||
func headerToHeaderInfo(hdr *gethTypes.Header) (*headerInfo, error) {
|
||||
if hdr.Number == nil {
|
||||
// A nil number will panic when calling *big.Int.Set(...)
|
||||
return nil, errors.New("cannot convert block header with nil block number")
|
||||
}
|
||||
|
||||
return &headerInfo{
|
||||
Hash: hdr.Hash(),
|
||||
Number: new(big.Int).Set(hdr.Number),
|
||||
Time: hdr.Time,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
// hashKeyFn takes the hex string representation as the key for a headerInfo.
|
||||
@@ -151,7 +156,10 @@ func (b *headerCache) AddHeader(hdr *gethTypes.Header) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
hInfo := headerToHeaderInfo(hdr)
|
||||
hInfo, err := headerToHeaderInfo(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.hashCache.AddIfNotPresent(hInfo); err != nil {
|
||||
return err
|
||||
|
||||
@@ -2,6 +2,7 @@ package powchain
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -102,3 +103,47 @@ func TestBlockCache_maxSize(t *testing.T) {
|
||||
assert.Equal(t, int(maxCacheSize), len(cache.hashCache.ListKeys()))
|
||||
assert.Equal(t, int(maxCacheSize), len(cache.heightCache.ListKeys()))
|
||||
}
|
||||
|
||||
func Test_headerToHeaderInfo(t *testing.T) {
|
||||
type args struct {
|
||||
hdr *gethTypes.Header
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *headerInfo
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "OK",
|
||||
args: args{hdr: &gethTypes.Header{
|
||||
Number: big.NewInt(500),
|
||||
Time: 2345,
|
||||
}},
|
||||
want: &headerInfo{
|
||||
Number: big.NewInt(500),
|
||||
Hash: common.Hash{239, 10, 13, 71, 156, 192, 23, 93, 73, 154, 255, 209, 163, 204, 129, 12, 179, 183, 65, 70, 205, 200, 57, 12, 17, 211, 209, 4, 104, 133, 73, 86},
|
||||
Time: 2345,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nil number",
|
||||
args: args{hdr: &gethTypes.Header{
|
||||
Time: 2345,
|
||||
}},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := headerToHeaderInfo(tt.args.hdr)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("headerToHeaderInfo() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("headerToHeaderInfo() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -229,7 +229,10 @@ func (s *Service) retrieveHeaderInfo(ctx context.Context, bNum uint64) (*headerI
|
||||
if err := s.headerCache.AddHeader(blk); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info = headerToHeaderInfo(blk)
|
||||
info, err = headerToHeaderInfo(blk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ var (
|
||||
|
||||
const eth1LookBackPeriod = 100
|
||||
const eth1DataSavingInterval = 100
|
||||
const eth1HeaderReqLimit = 1000
|
||||
const defaultEth1HeaderReqLimit = uint64(1000)
|
||||
const depositlogRequestLimit = 10000
|
||||
|
||||
// Eth2GenesisPowchainInfo retrieves the genesis time and eth1 block number of the beacon chain
|
||||
@@ -81,14 +81,7 @@ func (s *Service) ProcessLog(ctx context.Context, depositLog gethTypes.Log) erro
|
||||
return errors.Wrap(err, "Could not process deposit log")
|
||||
}
|
||||
if s.lastReceivedMerkleIndex%eth1DataSavingInterval == 0 {
|
||||
eth1Data := &protodb.ETH1ChainData{
|
||||
CurrentEth1Data: s.latestEth1Data,
|
||||
ChainstartData: s.chainStartData,
|
||||
BeaconState: s.preGenesisState.InnerStateUnsafe(), // I promise not to mutate it!
|
||||
Trie: s.depositTrie.ToProto(),
|
||||
DepositContainers: s.depositCache.AllDepositContainers(ctx),
|
||||
}
|
||||
return s.beaconDB.SavePowchainData(ctx, eth1Data)
|
||||
return s.savePowchainData(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -231,6 +224,11 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
|
||||
StartTime: chainStartTime,
|
||||
},
|
||||
})
|
||||
if err := s.savePowchainData(s.ctx); err != nil {
|
||||
// continue on, if the save fails as this will get re-saved
|
||||
// in the next interval.
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) createGenesisTime(timeStamp uint64) uint64 {
|
||||
@@ -278,7 +276,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
}
|
||||
for currentBlockNum < latestFollowHeight {
|
||||
start := currentBlockNum
|
||||
end := currentBlockNum + eth1HeaderReqLimit
|
||||
end := currentBlockNum + s.eth1HeaderReqLimit
|
||||
// Appropriately bound the request, as we do not
|
||||
// want request blocks beyond the current follow distance.
|
||||
if end > latestFollowHeight {
|
||||
@@ -352,9 +350,9 @@ func (s *Service) processPastLogs(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// requestBatchedLogs requests and processes all the logs from the period
|
||||
// last polled to now.
|
||||
func (s *Service) requestBatchedLogs(ctx context.Context) error {
|
||||
// requestBatchedHeadersAndLogs requests and processes all the headers and
|
||||
// logs from the period last polled to now.
|
||||
func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
|
||||
// We request for the nth block behind the current head, in order to have
|
||||
// stabilized logs when we retrieve it from the 1.0 chain.
|
||||
|
||||
@@ -363,7 +361,12 @@ func (s *Service) requestBatchedLogs(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
for i := s.latestEth1Data.LastRequestedBlock + 1; i <= requestedBlock; i++ {
|
||||
err := s.ProcessETH1Block(ctx, big.NewInt(int64(i)))
|
||||
// Cache eth1 block header here.
|
||||
_, err := s.BlockHashByHeight(ctx, big.NewInt(int64(i)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.ProcessETH1Block(ctx, big.NewInt(int64(i)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -490,3 +493,15 @@ func (s *Service) checkForChainstart(blockHash [32]byte, blockNumber *big.Int, b
|
||||
s.ProcessChainStart(s.chainStartData.GenesisTime, blockHash, blockNumber)
|
||||
}
|
||||
}
|
||||
|
||||
// save all powchain related metadata to disk.
|
||||
func (s *Service) savePowchainData(ctx context.Context) error {
|
||||
eth1Data := &protodb.ETH1ChainData{
|
||||
CurrentEth1Data: s.latestEth1Data,
|
||||
ChainstartData: s.chainStartData,
|
||||
BeaconState: s.preGenesisState.InnerStateUnsafe(), // I promise not to mutate it!
|
||||
Trie: s.depositTrie.ToProto(),
|
||||
DepositContainers: s.depositCache.AllDepositContainers(ctx),
|
||||
}
|
||||
return s.beaconDB.SavePowchainData(ctx, eth1Data)
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
@@ -148,16 +149,18 @@ type Service struct {
|
||||
runError error
|
||||
preGenesisState *stateTrie.BeaconState
|
||||
stateGen *stategen.State
|
||||
eth1HeaderReqLimit uint64
|
||||
}
|
||||
|
||||
// Web3ServiceConfig defines a config struct for web3 service to use through its life cycle.
|
||||
type Web3ServiceConfig struct {
|
||||
HTTPEndPoint string
|
||||
DepositContract common.Address
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache *depositcache.DepositCache
|
||||
StateNotifier statefeed.Notifier
|
||||
StateGen *stategen.State
|
||||
HTTPEndPoint string
|
||||
DepositContract common.Address
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache *depositcache.DepositCache
|
||||
StateNotifier statefeed.Notifier
|
||||
StateGen *stategen.State
|
||||
Eth1HeaderReqLimit uint64
|
||||
}
|
||||
|
||||
// NewService sets up a new instance with an ethclient when
|
||||
@@ -175,6 +178,11 @@ func NewService(ctx context.Context, config *Web3ServiceConfig) (*Service, error
|
||||
return nil, errors.Wrap(err, "could not setup genesis state")
|
||||
}
|
||||
|
||||
eth1HeaderReqLimit := config.Eth1HeaderReqLimit
|
||||
if eth1HeaderReqLimit == 0 {
|
||||
eth1HeaderReqLimit = defaultEth1HeaderReqLimit
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
@@ -200,6 +208,7 @@ func NewService(ctx context.Context, config *Web3ServiceConfig) (*Service, error
|
||||
preGenesisState: genState,
|
||||
headTicker: time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerETH1Block) * time.Second),
|
||||
stateGen: config.StateGen,
|
||||
eth1HeaderReqLimit: eth1HeaderReqLimit,
|
||||
}
|
||||
|
||||
eth1Data, err := config.BeaconDB.PowchainData(ctx)
|
||||
@@ -348,22 +357,6 @@ func (s *Service) followBlockHeight(ctx context.Context) (uint64, error) {
|
||||
if s.latestEth1Data.BlockHeight > params.BeaconConfig().Eth1FollowDistance {
|
||||
latestValidBlock = s.latestEth1Data.BlockHeight - params.BeaconConfig().Eth1FollowDistance
|
||||
}
|
||||
blockTime, err := s.BlockTimeByHeight(ctx, big.NewInt(int64(latestValidBlock)))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
followTime := func(t uint64) uint64 {
|
||||
return t + params.BeaconConfig().Eth1FollowDistance*params.BeaconConfig().SecondsPerETH1Block
|
||||
}
|
||||
for followTime(blockTime) > s.latestEth1Data.BlockTime && latestValidBlock > 0 {
|
||||
// reduce block height to get eth1 block which
|
||||
// fulfills stated condition
|
||||
latestValidBlock--
|
||||
blockTime, err = s.BlockTimeByHeight(ctx, big.NewInt(int64(latestValidBlock)))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return latestValidBlock, nil
|
||||
}
|
||||
|
||||
@@ -392,20 +385,38 @@ func (s *Service) dialETH1Nodes() (*ethclient.Client, *gethRPC.Client, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
httpClient := ethclient.NewClient(httpRPCClient)
|
||||
|
||||
// Add a method to clean-up and close clients in the event
|
||||
// of any connection failure.
|
||||
closeClients := func() {
|
||||
httpRPCClient.Close()
|
||||
httpClient.Close()
|
||||
}
|
||||
syncProg, err := httpClient.SyncProgress(s.ctx)
|
||||
if err != nil {
|
||||
closeClients()
|
||||
return nil, nil, err
|
||||
}
|
||||
if syncProg != nil {
|
||||
closeClients()
|
||||
return nil, nil, errors.New("eth1 node has not finished syncing yet")
|
||||
}
|
||||
// Make a simple call to ensure we are actually connected to a working node.
|
||||
cID, err := httpClient.ChainID(s.ctx)
|
||||
if err != nil {
|
||||
closeClients()
|
||||
return nil, nil, err
|
||||
}
|
||||
nID, err := httpClient.NetworkID(s.ctx)
|
||||
if err != nil {
|
||||
closeClients()
|
||||
return nil, nil, err
|
||||
}
|
||||
if cID.Uint64() != params.BeaconNetworkConfig().ChainID {
|
||||
closeClients()
|
||||
return nil, nil, fmt.Errorf("eth1 node using incorrect chain id, %d != %d", cID.Uint64(), params.BeaconNetworkConfig().ChainID)
|
||||
}
|
||||
if nID.Uint64() != params.BeaconNetworkConfig().NetworkID {
|
||||
closeClients()
|
||||
return nil, nil, fmt.Errorf("eth1 node using incorrect network id, %d != %d", nID.Uint64(), params.BeaconNetworkConfig().NetworkID)
|
||||
}
|
||||
|
||||
@@ -643,7 +654,7 @@ func (s *Service) handleETH1FollowDistance() {
|
||||
log.Error("Beacon node is not respecting the follow distance")
|
||||
return
|
||||
}
|
||||
if err := s.requestBatchedLogs(ctx); err != nil {
|
||||
if err := s.requestBatchedHeadersAndLogs(ctx); err != nil {
|
||||
s.runError = err
|
||||
log.Error(err)
|
||||
return
|
||||
@@ -681,7 +692,11 @@ func (s *Service) initPOWService() {
|
||||
continue
|
||||
}
|
||||
// Cache eth1 headers from our voting period.
|
||||
s.cacheHeadersForEth1DataVote(ctx)
|
||||
if err := s.cacheHeadersForEth1DataVote(ctx); err != nil {
|
||||
log.Errorf("Unable to process past headers %v", err)
|
||||
s.retryETH1Node(err)
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -756,23 +771,46 @@ func (s *Service) logTillChainStart() {
|
||||
|
||||
// cacheHeadersForEth1DataVote makes sure that voting for eth1data after startup utilizes cached headers
|
||||
// instead of making multiple RPC requests to the ETH1 endpoint.
|
||||
func (s *Service) cacheHeadersForEth1DataVote(ctx context.Context) {
|
||||
blocksPerVotingPeriod := params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch *
|
||||
params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().SecondsPerETH1Block
|
||||
|
||||
func (s *Service) cacheHeadersForEth1DataVote(ctx context.Context) error {
|
||||
// Find the end block to request from.
|
||||
end, err := s.followBlockHeight(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to fetch height of follow block: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// We fetch twice the number of headers just to be safe.
|
||||
start := uint64(0)
|
||||
if end >= 2*blocksPerVotingPeriod {
|
||||
start = end - 2*blocksPerVotingPeriod
|
||||
start, err := s.determineEarliestVotingBlock(ctx, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We call batchRequestHeaders for its header caching side-effect, so we don't need the return value.
|
||||
_, err = s.batchRequestHeaders(start, end)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to cache headers: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// determines the earliest voting block from which to start caching all our previous headers from.
|
||||
func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock uint64) (uint64, error) {
|
||||
genesisTime := s.chainStartData.GenesisTime
|
||||
currSlot := helpers.CurrentSlot(genesisTime)
|
||||
|
||||
// In the event genesis has not occurred yet, we just request go back follow_distance blocks.
|
||||
if genesisTime == 0 || currSlot == 0 {
|
||||
earliestBlk := uint64(0)
|
||||
if followBlock > params.BeaconConfig().Eth1FollowDistance {
|
||||
earliestBlk = followBlock - params.BeaconConfig().Eth1FollowDistance
|
||||
}
|
||||
return earliestBlk, nil
|
||||
}
|
||||
votingTime := helpers.VotingPeriodStartTime(genesisTime, currSlot)
|
||||
followBackDist := 2 * params.BeaconConfig().SecondsPerETH1Block * params.BeaconConfig().Eth1FollowDistance
|
||||
if followBackDist > votingTime {
|
||||
return 0, errors.Errorf("invalid genesis time provided. %d > %d", followBackDist, votingTime)
|
||||
}
|
||||
earliestValidTime := votingTime - followBackDist
|
||||
blkNum, err := s.BlockNumberByTimestamp(ctx, earliestValidTime)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return blkNum.Uint64(), nil
|
||||
}
|
||||
|
||||
@@ -452,3 +452,78 @@ func TestInitDepositCache_OK(t *testing.T) {
|
||||
require.NoError(t, s.initDepositCaches(context.Background(), ctrs))
|
||||
require.Equal(t, 3, len(s.depositCache.PendingContainers(context.Background(), nil)))
|
||||
}
|
||||
|
||||
func TestNewService_EarliestVotingBlock(t *testing.T) {
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB, _ := dbutil.SetupDB(t)
|
||||
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndPoint: endpoint,
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
|
||||
web3Service.eth1DataFetcher = &goodFetcher{backend: testAcc.Backend}
|
||||
// simulated backend sets eth1 block
|
||||
// time as 10 seconds
|
||||
conf := params.BeaconConfig()
|
||||
conf.SecondsPerETH1Block = 10
|
||||
conf.Eth1FollowDistance = 50
|
||||
params.OverrideBeaconConfig(conf)
|
||||
defer func() {
|
||||
params.UseMainnetConfig()
|
||||
}()
|
||||
|
||||
// Genesis not set
|
||||
followBlock := uint64(2000)
|
||||
blk, err := web3Service.determineEarliestVotingBlock(context.Background(), followBlock)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, followBlock-conf.Eth1FollowDistance, blk, "unexpected earliest voting block")
|
||||
|
||||
// Genesis is set.
|
||||
|
||||
numToForward := 1500
|
||||
// forward 1500 blocks
|
||||
for i := 0; i < numToForward; i++ {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
currTime := testAcc.Backend.Blockchain().CurrentHeader().Time
|
||||
now := time.Now()
|
||||
err = testAcc.Backend.AdjustTime(now.Sub(time.Unix(int64(currTime), 0)))
|
||||
require.NoError(t, err)
|
||||
testAcc.Backend.Commit()
|
||||
|
||||
currTime = testAcc.Backend.Blockchain().CurrentHeader().Time
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentHeader().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentHeader().Time
|
||||
web3Service.chainStartData.GenesisTime = currTime
|
||||
|
||||
// With a current slot of zero, only request follow_blocks behind.
|
||||
blk, err = web3Service.determineEarliestVotingBlock(context.Background(), followBlock)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, followBlock-conf.Eth1FollowDistance, blk, "unexpected earliest voting block")
|
||||
|
||||
}
|
||||
|
||||
func TestNewService_Eth1HeaderRequLimit(t *testing.T) {
|
||||
testAcc, err := contracts.Setup()
|
||||
require.NoError(t, err, "Unable to set up simulated backend")
|
||||
beaconDB, _ := dbutil.SetupDB(t)
|
||||
|
||||
s1, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndPoint: endpoint,
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
})
|
||||
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
|
||||
assert.Equal(t, defaultEth1HeaderReqLimit, s1.eth1HeaderReqLimit, "default eth1 header request limit not set")
|
||||
|
||||
s2, err := NewService(context.Background(), &Web3ServiceConfig{
|
||||
HTTPEndPoint: endpoint,
|
||||
DepositContract: testAcc.ContractAddr,
|
||||
BeaconDB: beaconDB,
|
||||
Eth1HeaderReqLimit: uint64(150),
|
||||
})
|
||||
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
|
||||
assert.Equal(t, uint64(150), s2.eth1HeaderReqLimit, "unable to set eth1HeaderRequestLimit")
|
||||
}
|
||||
|
||||
@@ -265,7 +265,7 @@ func (bs *Server) StreamIndexedAttestations(
|
||||
}
|
||||
if data.Attestation == nil || data.Attestation.Aggregate == nil {
|
||||
// One nil attestation shouldn't stop the stream.
|
||||
log.Info("Indexed attestations stream got nil attestation or nil attestation aggregate")
|
||||
log.Debug("Indexed attestations stream got nil attestation or nil attestation aggregate")
|
||||
continue
|
||||
}
|
||||
bs.ReceivedAttestationsBuffer <- data.Attestation.Aggregate
|
||||
@@ -340,7 +340,7 @@ func (bs *Server) collectReceivedAttestations(ctx context.Context) {
|
||||
// We aggregate the received attestations, we know they all have the same data root.
|
||||
aggAtts, err := attaggregation.Aggregate(atts)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not aggregate collected attestations")
|
||||
log.WithError(err).Error("Could not aggregate attestations")
|
||||
continue
|
||||
}
|
||||
if len(aggAtts) == 0 {
|
||||
@@ -356,7 +356,7 @@ func (bs *Server) collectReceivedAttestations(ctx context.Context) {
|
||||
case att := <-bs.ReceivedAttestationsBuffer:
|
||||
attDataRoot, err := att.Data.HashTreeRoot()
|
||||
if err != nil {
|
||||
log.Errorf("Could not hash tree root data: %v", err)
|
||||
log.Errorf("Could not hash tree root attestation data: %v", err)
|
||||
continue
|
||||
}
|
||||
attsByRoot[attDataRoot] = append(attsByRoot[attDataRoot], att)
|
||||
|
||||
@@ -624,6 +624,13 @@ func TestServer_ListIndexedAttestations_GenesisEpoch(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(indexedAtts), len(res.IndexedAttestations), "Incorrect indexted attestations length")
|
||||
sort.Slice(indexedAtts, func(i, j int) bool {
|
||||
return indexedAtts[i].Data.Slot < indexedAtts[j].Data.Slot
|
||||
})
|
||||
sort.Slice(res.IndexedAttestations, func(i, j int) bool {
|
||||
return res.IndexedAttestations[i].Data.Slot < res.IndexedAttestations[j].Data.Slot
|
||||
})
|
||||
|
||||
assert.DeepEqual(t, indexedAtts, res.IndexedAttestations, "Incorrect list indexed attestations response")
|
||||
}
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ func (bs *Server) ListBlocks(
|
||||
case *ethpb.ListBlocksRequest_Epoch:
|
||||
blks, _, err := bs.BeaconDB.Blocks(ctx, filters.NewFilter().SetStartEpoch(q.Epoch).SetEndEpoch(q.Epoch))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Failed to get blocks: %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "Could not get blocks: %v", err)
|
||||
}
|
||||
|
||||
numBlks := len(blks)
|
||||
@@ -194,12 +194,12 @@ func (bs *Server) StreamBlocks(_ *ptypes.Empty, stream ethpb.BeaconChain_StreamB
|
||||
}
|
||||
headState, err := bs.HeadFetcher.HeadState(bs.Ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("blockSlot", data.SignedBlock.Block.Slot).Warn("Could not get head state to verify block signature")
|
||||
log.WithError(err).WithField("blockSlot", data.SignedBlock.Block.Slot).Error("Could not get head state")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := blocks.VerifyBlockSignature(headState, data.SignedBlock); err != nil {
|
||||
log.WithError(err).WithField("blockSlot", data.SignedBlock.Block.Slot).Warn("Could not verify block signature")
|
||||
log.WithError(err).WithField("blockSlot", data.SignedBlock.Block.Slot).Error("Could not verify block signature")
|
||||
continue
|
||||
}
|
||||
if err := stream.Send(data.SignedBlock); err != nil {
|
||||
|
||||
@@ -73,7 +73,7 @@ func (bs *Server) retrieveCommitteesForEpoch(
|
||||
}
|
||||
requestedState, err := bs.StateGen.StateBySlot(ctx, startSlot)
|
||||
if err != nil {
|
||||
return nil, nil, status.Error(codes.Internal, "Could not get state")
|
||||
return nil, nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
|
||||
}
|
||||
seed, err := helpers.Seed(requestedState, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
|
||||
@@ -82,7 +82,7 @@ func TestServer_ListBeaconCommittees_PreviousEpoch(t *testing.T) {
|
||||
mixes[i] = make([]byte, 32)
|
||||
}
|
||||
require.NoError(t, headState.SetRandaoMixes(mixes))
|
||||
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch*2))
|
||||
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
|
||||
@@ -358,7 +358,7 @@ func (is *infostream) generatePendingValidatorInfo(info *ethpb.ValidatorInfo) (*
|
||||
if deposit.block != nil {
|
||||
info.Status = ethpb.ValidatorStatus_DEPOSITED
|
||||
if queueTimestamp, err := is.depositQueueTimestamp(deposit.block); err != nil {
|
||||
log.WithError(err).Error("Failed to obtain queue activation timestamp")
|
||||
log.WithError(err).Error("Could not obtain queue activation timestamp")
|
||||
} else {
|
||||
info.TransitionTimestamp = queueTimestamp
|
||||
}
|
||||
@@ -415,7 +415,7 @@ func (is *infostream) calculateActivationTimeForPendingValidators(res []*ethpb.V
|
||||
for curEpoch := epoch + 1; len(sortedIndices) > 0 && len(pendingValidators) > 0; curEpoch++ {
|
||||
toProcess, err := helpers.ValidatorChurnLimit(numAttestingValidators)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to determine validator churn limit")
|
||||
log.WithError(err).Error("Could not determine validator churn limit")
|
||||
}
|
||||
if toProcess > uint64(len(sortedIndices)) {
|
||||
toProcess = uint64(len(sortedIndices))
|
||||
@@ -456,7 +456,7 @@ func (is *infostream) handleBlockProcessed() {
|
||||
is.currentEpoch = blockEpoch
|
||||
if err := is.sendValidatorsInfo(is.pubKeys); err != nil {
|
||||
// Client probably disconnected.
|
||||
log.WithError(err).Debug("Failed to send infostream response")
|
||||
log.WithError(err).Debug("Could not send infostream response")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -958,7 +958,7 @@ func TestServer_ListValidators_FromOldEpoch(t *testing.T) {
|
||||
}
|
||||
|
||||
st := testutil.NewBeaconState()
|
||||
require.NoError(t, st.SetSlot(30*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, st.SetSlot(20*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
b := testutil.NewBeaconBlock()
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
|
||||
@@ -18,7 +18,7 @@ func (bs *Server) ListValidators(ctx context.Context, req *ethpb.StateValidators
|
||||
}
|
||||
|
||||
// ListValidatorBalances returns a filterable list of validator balances.
|
||||
func (bs *Server) ListValidatorBalances(ctx context.Context, req *ethpb.StateValidatorsRequest) (*ethpb.ValidatorBalancesResponse, error) {
|
||||
func (bs *Server) ListValidatorBalances(ctx context.Context, req *ethpb.ValidatorBalancesRequest) (*ethpb.ValidatorBalancesResponse, error) {
|
||||
return nil, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//proto/beacon/rpc/v1:go_default_library",
|
||||
"//shared/version:go_default_library",
|
||||
"@com_github_gogo_protobuf//types:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//network:go_default_library",
|
||||
@@ -32,6 +33,7 @@ go_test(
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//proto/beacon/rpc/v1:go_default_library",
|
||||
"//shared/bytesutil:go_default_library",
|
||||
"//shared/testutil:go_default_library",
|
||||
"//shared/testutil/assert:go_default_library",
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/sync"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/version"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
@@ -27,13 +28,15 @@ import (
|
||||
// providing RPC endpoints for verifying a beacon node's sync status, genesis and
|
||||
// version information, and services the node implements and runs.
|
||||
type Server struct {
|
||||
SyncChecker sync.Checker
|
||||
Server *grpc.Server
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
GenesisFetcher blockchain.GenesisFetcher
|
||||
SyncChecker sync.Checker
|
||||
Server *grpc.Server
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
GenesisFetcher blockchain.GenesisFetcher
|
||||
BeaconMonitoringHost string
|
||||
BeaconMonitoringPort int
|
||||
}
|
||||
|
||||
// GetSyncStatus checks the current network sync status of the node.
|
||||
@@ -117,6 +120,13 @@ func (ns *Server) GetHost(_ context.Context, _ *ptypes.Empty) (*ethpb.HostData,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetLogsEndpoint
|
||||
func (ns *Server) GetLogsEndpoint(_ context.Context, _ *ptypes.Empty) (*pbrpc.LogsEndpointResponse, error) {
|
||||
return &pbrpc.LogsEndpointResponse{
|
||||
BeaconLogsEndpoint: fmt.Sprintf("%s:%d", ns.BeaconMonitoringHost, ns.BeaconMonitoringPort),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetPeer returns the data known about the peer defined by the provided peer id.
|
||||
func (ns *Server) GetPeer(_ context.Context, peerReq *ethpb.PeerRequest) (*ethpb.Peer, error) {
|
||||
pid, err := peer.Decode(peerReq.PeerId)
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
mockP2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -77,6 +78,19 @@ func TestNodeServer_GetVersion(t *testing.T) {
|
||||
assert.Equal(t, v, res.Version)
|
||||
}
|
||||
|
||||
func TestNodeServer_GetLogsEndpoint(t *testing.T) {
|
||||
ns := &Server{
|
||||
BeaconMonitoringHost: "localhost",
|
||||
BeaconMonitoringPort: 8080,
|
||||
}
|
||||
res, err := ns.GetLogsEndpoint(context.Background(), &ptypes.Empty{})
|
||||
require.NoError(t, err)
|
||||
want := &pbrpc.LogsEndpointResponse{
|
||||
BeaconLogsEndpoint: "localhost:8080",
|
||||
}
|
||||
assert.DeepEqual(t, want, res)
|
||||
}
|
||||
|
||||
func TestNodeServer_GetImplementedServices(t *testing.T) {
|
||||
server := grpc.NewServer()
|
||||
ns := &Server{
|
||||
|
||||
@@ -4,6 +4,7 @@ package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
@@ -77,6 +78,8 @@ type Service struct {
|
||||
syncService chainSync.Checker
|
||||
host string
|
||||
port string
|
||||
beaconMonitoringHost string
|
||||
beaconMonitoringPort int
|
||||
listener net.Listener
|
||||
withCert string
|
||||
withKey string
|
||||
@@ -104,6 +107,8 @@ type Config struct {
|
||||
Port string
|
||||
CertFlag string
|
||||
KeyFlag string
|
||||
BeaconMonitoringHost string
|
||||
BeaconMonitoringPort int
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
@@ -161,6 +166,8 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
syncService: cfg.SyncService,
|
||||
host: cfg.Host,
|
||||
port: cfg.Port,
|
||||
beaconMonitoringHost: cfg.BeaconMonitoringHost,
|
||||
beaconMonitoringPort: cfg.BeaconMonitoringPort,
|
||||
withCert: cfg.CertFlag,
|
||||
withKey: cfg.KeyFlag,
|
||||
depositFetcher: cfg.DepositFetcher,
|
||||
@@ -249,13 +256,15 @@ func (s *Service) Start() {
|
||||
StateGen: s.stateGen,
|
||||
}
|
||||
nodeServer := &node.Server{
|
||||
BeaconDB: s.beaconDB,
|
||||
Server: s.grpcServer,
|
||||
SyncChecker: s.syncService,
|
||||
GenesisTimeFetcher: s.genesisTimeFetcher,
|
||||
PeersFetcher: s.peersFetcher,
|
||||
PeerManager: s.peerManager,
|
||||
GenesisFetcher: s.genesisFetcher,
|
||||
BeaconDB: s.beaconDB,
|
||||
Server: s.grpcServer,
|
||||
SyncChecker: s.syncService,
|
||||
GenesisTimeFetcher: s.genesisTimeFetcher,
|
||||
PeersFetcher: s.peersFetcher,
|
||||
PeerManager: s.peerManager,
|
||||
GenesisFetcher: s.genesisFetcher,
|
||||
BeaconMonitoringHost: s.beaconMonitoringHost,
|
||||
BeaconMonitoringPort: s.beaconMonitoringPort,
|
||||
}
|
||||
beaconChainServer := &beacon.Server{
|
||||
Ctx: s.ctx,
|
||||
@@ -297,6 +306,7 @@ func (s *Service) Start() {
|
||||
SyncChecker: s.syncService,
|
||||
}
|
||||
ethpb.RegisterNodeServer(s.grpcServer, nodeServer)
|
||||
pbrpc.RegisterHealthServer(s.grpcServer, nodeServer)
|
||||
ethpb.RegisterBeaconChainServer(s.grpcServer, beaconChainServer)
|
||||
ethpbv1.RegisterBeaconChainServer(s.grpcServer, beaconChainServerV1)
|
||||
if s.enableDebugRPCEndpoints {
|
||||
@@ -337,6 +347,9 @@ func (s *Service) Stop() error {
|
||||
|
||||
// Status returns nil or credentialError
|
||||
func (s *Service) Status() error {
|
||||
if s.syncService.Syncing() {
|
||||
return errors.New("syncing")
|
||||
}
|
||||
if s.credentialError != nil {
|
||||
return s.credentialError
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestLifecycle_OK(t *testing.T) {
|
||||
|
||||
func TestStatus_CredentialError(t *testing.T) {
|
||||
credentialErr := errors.New("credentialError")
|
||||
s := &Service{credentialError: credentialErr}
|
||||
s := &Service{credentialError: credentialErr, syncService: &mockSync.Sync{IsSyncing: false}}
|
||||
|
||||
assert.ErrorContains(t, s.credentialError.Error(), s.Status())
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func (vs *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.
|
||||
if len(aggregatedAtts) == 0 {
|
||||
aggregatedAtts = vs.AttPool.UnaggregatedAttestationsBySlotIndex(req.Slot, req.CommitteeIndex)
|
||||
if len(aggregatedAtts) == 0 {
|
||||
return nil, status.Errorf(codes.Internal, "Could not find attestation for slot and committee in pool")
|
||||
return nil, status.Errorf(codes.NotFound, "Could not find attestation for slot and committee in pool")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -39,6 +39,9 @@ func (vs *Server) StreamDuties(req *ethpb.DutiesRequest, stream ethpb.BeaconNode
|
||||
// If we are post-genesis time, then set the current epoch to
|
||||
// the number epochs since the genesis time, otherwise 0 by default.
|
||||
genesisTime := vs.GenesisTimeFetcher.GenesisTime()
|
||||
if genesisTime.IsZero() {
|
||||
return status.Error(codes.Unavailable, "genesis time is not set")
|
||||
}
|
||||
var currentEpoch uint64
|
||||
if genesisTime.Before(timeutils.Now()) {
|
||||
currentEpoch = slotutil.EpochsSinceGenesis(vs.GenesisTimeFetcher.GenesisTime())
|
||||
|
||||
@@ -64,7 +64,7 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation
|
||||
}
|
||||
defer func() {
|
||||
if err := vs.AttestationCache.MarkNotInProgress(req); err != nil {
|
||||
log.WithError(err).Error("Failed to mark cache not in progress")
|
||||
log.WithError(err).Error("Could not mark cache not in progress")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -89,7 +89,7 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation
|
||||
}
|
||||
}
|
||||
if headState == nil {
|
||||
return nil, status.Error(codes.Internal, "Failed to lookup parent state from head.")
|
||||
return nil, status.Error(codes.Internal, "Could not lookup parent state from head.")
|
||||
}
|
||||
|
||||
if helpers.CurrentEpoch(headState) < helpers.SlotToEpoch(req.Slot) {
|
||||
|
||||
@@ -192,12 +192,12 @@ func (vs *Server) eth1Data(ctx context.Context, slot uint64) (*ethpb.Eth1Data, e
|
||||
// Look up most recent block up to timestamp
|
||||
blockNumber, err := vs.Eth1BlockFetcher.BlockNumberByTimestamp(ctx, eth1VotingPeriodStartTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get block number from timestamp")
|
||||
log.WithError(err).Error("Could not get block number from timestamp")
|
||||
return vs.randomETH1DataVote(ctx)
|
||||
}
|
||||
eth1Data, err := vs.defaultEth1DataResponse(ctx, blockNumber)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get eth1 data from block number")
|
||||
log.WithError(err).Error("Could not get eth1 data from block number")
|
||||
return vs.randomETH1DataVote(ctx)
|
||||
}
|
||||
|
||||
@@ -237,12 +237,12 @@ func (vs *Server) eth1DataMajorityVote(ctx context.Context, beaconState *stateTr
|
||||
|
||||
lastBlockByEarliestValidTime, err := vs.Eth1BlockFetcher.BlockNumberByTimestamp(ctx, earliestValidTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get last block by earliest valid time")
|
||||
log.WithError(err).Error("Could not get last block by earliest valid time")
|
||||
return vs.randomETH1DataVote(ctx)
|
||||
}
|
||||
timeOfLastBlockByEarliestValidTime, err := vs.Eth1BlockFetcher.BlockTimeByHeight(ctx, lastBlockByEarliestValidTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get time of last block by earliest valid time")
|
||||
log.WithError(err).Error("Could not get time of last block by earliest valid time")
|
||||
return vs.randomETH1DataVote(ctx)
|
||||
}
|
||||
// Increment the earliest block if the original block's time is before valid time.
|
||||
@@ -253,12 +253,12 @@ func (vs *Server) eth1DataMajorityVote(ctx context.Context, beaconState *stateTr
|
||||
|
||||
lastBlockByLatestValidTime, err := vs.Eth1BlockFetcher.BlockNumberByTimestamp(ctx, latestValidTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get last block by latest valid time")
|
||||
log.WithError(err).Error("Could not get last block by latest valid time")
|
||||
return vs.randomETH1DataVote(ctx)
|
||||
}
|
||||
timeOfLastBlockByLatestValidTime, err := vs.Eth1BlockFetcher.BlockTimeByHeight(ctx, lastBlockByLatestValidTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get time of last block by latest valid time")
|
||||
log.WithError(err).Error("Could not get time of last block by latest valid time")
|
||||
return vs.randomETH1DataVote(ctx)
|
||||
}
|
||||
if timeOfLastBlockByLatestValidTime < earliestValidTime {
|
||||
@@ -278,7 +278,7 @@ func (vs *Server) eth1DataMajorityVote(ctx context.Context, beaconState *stateTr
|
||||
if lastBlockDepositCount >= vs.HeadFetcher.HeadETH1Data().DepositCount {
|
||||
hash, err := vs.Eth1BlockFetcher.BlockHashByHeight(ctx, lastBlockByLatestValidTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get hash of last block by latest valid time")
|
||||
log.WithError(err).Error("Could not get hash of last block by latest valid time")
|
||||
return vs.randomETH1DataVote(ctx)
|
||||
}
|
||||
return ðpb.Eth1Data{
|
||||
@@ -507,7 +507,7 @@ func (vs *Server) canonicalEth1Data(
|
||||
|
||||
// Add in current vote, to get accurate vote tally
|
||||
if err := beaconState.AppendEth1DataVotes(currentVote); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to append eth1 data votes to state")
|
||||
return nil, nil, errors.Wrap(err, "could not append eth1 data votes to state")
|
||||
}
|
||||
hasSupport, err := blocks.Eth1DataHasEnoughSupport(beaconState, currentVote)
|
||||
if err != nil {
|
||||
|
||||
@@ -164,8 +164,9 @@ func (vs *Server) WaitForChainStart(_ *ptypes.Empty, stream ethpb.BeaconNodeVali
|
||||
}
|
||||
if head != nil {
|
||||
res := ðpb.ChainStartResponse{
|
||||
Started: true,
|
||||
GenesisTime: head.GenesisTime(),
|
||||
Started: true,
|
||||
GenesisTime: head.GenesisTime(),
|
||||
GenesisValidatorsRoot: head.GenesisValidatorRoot(),
|
||||
}
|
||||
return stream.Send(res)
|
||||
}
|
||||
@@ -176,71 +177,17 @@ func (vs *Server) WaitForChainStart(_ *ptypes.Empty, stream ethpb.BeaconNodeVali
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.ChainStarted {
|
||||
data, ok := event.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
return errors.New("event data is not type *statefeed.ChainStartData")
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain started event")
|
||||
log.Debug("Sending genesis time notification to connected validator clients")
|
||||
res := ðpb.ChainStartResponse{
|
||||
Started: true,
|
||||
GenesisTime: uint64(data.StartTime.Unix()),
|
||||
}
|
||||
return stream.Send(res)
|
||||
}
|
||||
// Handle race condition in the event the blockchain
|
||||
// service isn't initialized in time and the saved head state is nil.
|
||||
if event.Type == statefeed.Initialized {
|
||||
data, ok := event.Data.(*statefeed.InitializedData)
|
||||
if !ok {
|
||||
return errors.New("event data is not type *statefeed.InitializedData")
|
||||
}
|
||||
res := ðpb.ChainStartResponse{
|
||||
Started: true,
|
||||
GenesisTime: uint64(data.StartTime.Unix()),
|
||||
}
|
||||
return stream.Send(res)
|
||||
}
|
||||
case <-stateSub.Err():
|
||||
return status.Error(codes.Aborted, "Subscriber closed, exiting goroutine")
|
||||
case <-vs.Ctx.Done():
|
||||
return status.Error(codes.Canceled, "Context canceled")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForSynced subscribes to the state channel and ends the stream when the state channel
|
||||
// indicates the beacon node has been initialized and is ready
|
||||
func (vs *Server) WaitForSynced(_ *ptypes.Empty, stream ethpb.BeaconNodeValidator_WaitForSyncedServer) error {
|
||||
head, err := vs.HeadFetcher.HeadState(stream.Context())
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "Could not retrieve head state: %v", err)
|
||||
}
|
||||
if head != nil && !vs.SyncChecker.Syncing() {
|
||||
res := ðpb.SyncedResponse{
|
||||
Synced: true,
|
||||
GenesisTime: head.GenesisTime(),
|
||||
}
|
||||
return stream.Send(res)
|
||||
}
|
||||
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
stateSub := vs.StateNotifier.StateFeed().Subscribe(stateChannel)
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.Synced {
|
||||
data, ok := event.Data.(*statefeed.SyncedData)
|
||||
if !ok {
|
||||
return errors.New("event data is not type *statefeed.SyncedData")
|
||||
}
|
||||
log.WithField("starttime", data.StartTime).Debug("Received sync completed event")
|
||||
log.WithField("starttime", data.StartTime).Debug("Received chain started event")
|
||||
log.Debug("Sending genesis time notification to connected validator clients")
|
||||
res := ðpb.SyncedResponse{
|
||||
Synced: true,
|
||||
GenesisTime: uint64(data.StartTime.Unix()),
|
||||
res := ðpb.ChainStartResponse{
|
||||
Started: true,
|
||||
GenesisTime: uint64(data.StartTime.Unix()),
|
||||
GenesisValidatorsRoot: data.GenesisValidatorsRoot,
|
||||
}
|
||||
return stream.Send(res)
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
@@ -321,8 +320,10 @@ func TestWaitForChainStart_AlreadyStarted(t *testing.T) {
|
||||
require.NoError(t, trie.SetSlot(3))
|
||||
require.NoError(t, db.SaveState(ctx, trie, headBlockRoot))
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, headBlockRoot))
|
||||
genesisValidatorsRoot := bytesutil.ToBytes32([]byte("validators"))
|
||||
require.NoError(t, trie.SetGenesisValidatorRoot(genesisValidatorsRoot[:]))
|
||||
|
||||
chainService := &mockChain.ChainService{State: trie}
|
||||
chainService := &mockChain.ChainService{State: trie, ValidatorsRoot: genesisValidatorsRoot}
|
||||
Server := &Server{
|
||||
Ctx: context.Background(),
|
||||
ChainStartFetcher: &mockPOW.POWChain{
|
||||
@@ -337,8 +338,9 @@ func TestWaitForChainStart_AlreadyStarted(t *testing.T) {
|
||||
mockStream := mock.NewMockBeaconNodeValidator_WaitForChainStartServer(ctrl)
|
||||
mockStream.EXPECT().Send(
|
||||
ðpb.ChainStartResponse{
|
||||
Started: true,
|
||||
GenesisTime: uint64(time.Unix(0, 0).Unix()),
|
||||
Started: true,
|
||||
GenesisTime: uint64(time.Unix(0, 0).Unix()),
|
||||
GenesisValidatorsRoot: genesisValidatorsRoot[:],
|
||||
},
|
||||
).Return(nil)
|
||||
mockStream.EXPECT().Context().Return(context.Background())
|
||||
@@ -347,7 +349,7 @@ func TestWaitForChainStart_AlreadyStarted(t *testing.T) {
|
||||
|
||||
func TestWaitForChainStart_HeadStateDoesNotExist(t *testing.T) {
|
||||
db, _ := dbutil.SetupDB(t)
|
||||
genesisValidatorRoot := [32]byte{0x01, 0x02}
|
||||
genesisValidatorRoot := params.BeaconConfig().ZeroHash
|
||||
|
||||
// Set head state to nil
|
||||
chainService := &mockChain.ChainService{State: nil}
|
||||
@@ -386,8 +388,9 @@ func TestWaitForChainStart_HeadStateDoesNotExist(t *testing.T) {
|
||||
|
||||
func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
|
||||
db, _ := dbutil.SetupDB(t)
|
||||
|
||||
hook := logTest.NewGlobal()
|
||||
|
||||
genesisValidatorsRoot := bytesutil.ToBytes32([]byte("validators"))
|
||||
chainService := &mockChain.ChainService{}
|
||||
Server := &Server{
|
||||
Ctx: context.Background(),
|
||||
@@ -404,8 +407,9 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
|
||||
mockStream := mock.NewMockBeaconNodeValidator_WaitForChainStartServer(ctrl)
|
||||
mockStream.EXPECT().Send(
|
||||
ðpb.ChainStartResponse{
|
||||
Started: true,
|
||||
GenesisTime: uint64(time.Unix(0, 0).Unix()),
|
||||
Started: true,
|
||||
GenesisTime: uint64(time.Unix(0, 0).Unix()),
|
||||
GenesisValidatorsRoot: genesisValidatorsRoot[:],
|
||||
},
|
||||
).Return(nil)
|
||||
mockStream.EXPECT().Context().Return(context.Background())
|
||||
@@ -417,114 +421,10 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = Server.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.ChainStarted,
|
||||
Data: &statefeed.ChainStartedData{
|
||||
StartTime: time.Unix(0, 0),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
exitRoutine <- true
|
||||
require.LogsContain(t, hook, "Sending genesis time")
|
||||
}
|
||||
|
||||
func TestWaitForSynced_ContextClosed(t *testing.T) {
|
||||
db, _ := dbutil.SetupDB(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
chainService := &mockChain.ChainService{}
|
||||
Server := &Server{
|
||||
Ctx: ctx,
|
||||
ChainStartFetcher: &mockPOW.FaultyMockPOWChain{
|
||||
ChainFeed: new(event.Feed),
|
||||
},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
BeaconDB: db,
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockStream := mock.NewMockBeaconNodeValidator_WaitForSyncedServer(ctrl)
|
||||
mockStream.EXPECT().Context().Return(context.Background())
|
||||
go func(tt *testing.T) {
|
||||
err := Server.WaitForSynced(&ptypes.Empty{}, mockStream)
|
||||
assert.ErrorContains(tt, "Context canceled", err)
|
||||
<-exitRoutine
|
||||
}(t)
|
||||
cancel()
|
||||
exitRoutine <- true
|
||||
}
|
||||
|
||||
func TestWaitForSynced_AlreadySynced(t *testing.T) {
|
||||
db, _ := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
headBlockRoot := [32]byte{0x01, 0x02}
|
||||
trie := testutil.NewBeaconState()
|
||||
require.NoError(t, trie.SetSlot(3))
|
||||
require.NoError(t, db.SaveState(ctx, trie, headBlockRoot))
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, headBlockRoot))
|
||||
|
||||
chainService := &mockChain.ChainService{State: trie}
|
||||
Server := &Server{
|
||||
Ctx: context.Background(),
|
||||
ChainStartFetcher: &mockPOW.POWChain{
|
||||
ChainFeed: new(event.Feed),
|
||||
},
|
||||
BeaconDB: db,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockStream := mock.NewMockBeaconNodeValidator_WaitForSyncedServer(ctrl)
|
||||
mockStream.EXPECT().Send(
|
||||
ðpb.SyncedResponse{
|
||||
Synced: true,
|
||||
GenesisTime: uint64(time.Unix(0, 0).Unix()),
|
||||
},
|
||||
).Return(nil)
|
||||
mockStream.EXPECT().Context().Return(context.Background())
|
||||
assert.NoError(t, Server.WaitForSynced(&ptypes.Empty{}, mockStream), "Could not call RPC method")
|
||||
}
|
||||
|
||||
func TestWaitForSynced_NotStartedThenLogFired(t *testing.T) {
|
||||
db, _ := dbutil.SetupDB(t)
|
||||
|
||||
hook := logTest.NewGlobal()
|
||||
chainService := &mockChain.ChainService{}
|
||||
Server := &Server{
|
||||
Ctx: context.Background(),
|
||||
ChainStartFetcher: &mockPOW.FaultyMockPOWChain{
|
||||
ChainFeed: new(event.Feed),
|
||||
},
|
||||
BeaconDB: db,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockStream := mock.NewMockBeaconNodeValidator_WaitForSyncedServer(ctrl)
|
||||
mockStream.EXPECT().Send(
|
||||
ðpb.SyncedResponse{
|
||||
Synced: true,
|
||||
GenesisTime: uint64(time.Unix(0, 0).Unix()),
|
||||
},
|
||||
).Return(nil)
|
||||
mockStream.EXPECT().Context().Return(context.Background())
|
||||
go func(tt *testing.T) {
|
||||
assert.NoError(tt, Server.WaitForSynced(&ptypes.Empty{}, mockStream), "Could not call RPC method")
|
||||
<-exitRoutine
|
||||
}(t)
|
||||
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = Server.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.Synced,
|
||||
Data: &statefeed.SyncedData{
|
||||
StartTime: time.Unix(0, 0),
|
||||
Type: statefeed.Initialized,
|
||||
Data: &statefeed.InitializedData{
|
||||
StartTime: time.Unix(0, 0),
|
||||
GenesisValidatorsRoot: genesisValidatorsRoot[:],
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -131,13 +131,13 @@ func (s *State) stateSummary(ctx context.Context, blockRoot [32]byte) (*pb.State
|
||||
}
|
||||
}
|
||||
if summary == nil {
|
||||
return s.recoverStateSummary(ctx, blockRoot)
|
||||
return s.RecoverStateSummary(ctx, blockRoot)
|
||||
}
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
// This recovers state summary object of a given block root by using the saved block in DB.
|
||||
func (s *State) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*pb.StateSummary, error) {
|
||||
// RecoverStateSummary recovers state summary object of a given block root by using the saved block in DB.
|
||||
func (s *State) RecoverStateSummary(ctx context.Context, blockRoot [32]byte) (*pb.StateSummary, error) {
|
||||
if s.beaconDB.HasBlock(ctx, blockRoot) {
|
||||
b, err := s.beaconDB.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
@@ -149,7 +149,7 @@ func (s *State) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*p
|
||||
}
|
||||
return summary, nil
|
||||
}
|
||||
return nil, errUnknownStateSummary
|
||||
return nil, errors.New("could not find block in DB")
|
||||
}
|
||||
|
||||
// This loads a beacon state from either the cache or DB then replay blocks up the requested block root.
|
||||
@@ -193,6 +193,11 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (*state
|
||||
return nil, errUnknownBoundaryState
|
||||
}
|
||||
|
||||
// Return state early if we are retrieving it from our finalized state cache.
|
||||
if startState.Slot() == targetSlot {
|
||||
return startState, nil
|
||||
}
|
||||
|
||||
blks, err := s.LoadBlocks(ctx, startState.Slot()+1, targetSlot, bytesutil.ToBytes32(summary.Root))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not load blocks for hot state using root")
|
||||
@@ -225,6 +230,13 @@ func (s *State) loadStateBySlot(ctx context.Context, slot uint64) (*state.Beacon
|
||||
return nil, errors.Wrap(err, "could not get last valid block for hot state using slot")
|
||||
}
|
||||
|
||||
// This is an impossible scenario.
|
||||
// In the event where current state slot is greater or equal to last valid block slot,
|
||||
// we should just process state up to input slot.
|
||||
if startState.Slot() >= lastValidSlot {
|
||||
return processSlotsStateGen(ctx, startState, slot)
|
||||
}
|
||||
|
||||
// Load and replay blocks to get the intermediate state.
|
||||
replayBlks, err := s.LoadBlocks(ctx, startState.Slot()+1, lastValidSlot, lastValidRoot)
|
||||
if err != nil {
|
||||
|
||||
@@ -174,10 +174,18 @@ func TestStateBySlot_ColdState(t *testing.T) {
|
||||
service.slotsPerArchivedPoint = params.BeaconConfig().SlotsPerEpoch * 2
|
||||
service.finalizedInfo.slot = service.slotsPerArchivedPoint + 1
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
b := testutil.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
beaconState, pks := testutil.DeterministicGenesisState(t, 32)
|
||||
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, db.SaveBlock(ctx, genesis))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, db.SaveState(ctx, beaconState, gRoot))
|
||||
assert.NoError(t, db.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
|
||||
b, err := testutil.GenerateFullBlock(beaconState, pks, testutil.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
bRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -200,12 +208,14 @@ func TestStateBySlot_HotStateDB(t *testing.T) {
|
||||
service := New(db, cache.NewStateSummaryCache())
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
b := testutil.NewBeaconBlock()
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
bRoot, err := b.Block.HashTreeRoot()
|
||||
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, bRoot))
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, db.SaveBlock(ctx, genesis))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, db.SaveState(ctx, beaconState, gRoot))
|
||||
assert.NoError(t, db.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
|
||||
slot := uint64(10)
|
||||
loadedState, err := service.StateBySlot(ctx, slot)
|
||||
@@ -222,7 +232,7 @@ func TestStateSummary_CanGetFromCacheOrDB(t *testing.T) {
|
||||
r := [32]byte{'a'}
|
||||
summary := &pb.StateSummary{Slot: 100}
|
||||
_, err := service.stateSummary(ctx, r)
|
||||
require.ErrorContains(t, errUnknownStateSummary.Error(), err)
|
||||
require.ErrorContains(t, "could not find block in DB", err)
|
||||
|
||||
service.stateSummaryCache.Put(r, summary)
|
||||
got, err := service.stateSummary(ctx, r)
|
||||
@@ -234,7 +244,7 @@ func TestStateSummary_CanGetFromCacheOrDB(t *testing.T) {
|
||||
r = [32]byte{'b'}
|
||||
summary = &pb.StateSummary{Root: r[:], Slot: 101}
|
||||
_, err = service.stateSummary(ctx, r)
|
||||
require.ErrorContains(t, errUnknownStateSummary.Error(), err)
|
||||
require.ErrorContains(t, "could not find block in DB", err)
|
||||
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, summary))
|
||||
got, err = service.stateSummary(ctx, r)
|
||||
@@ -262,6 +272,33 @@ func TestLoadeStateByRoot_Cached(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadeStateByRoot_FinalizedState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, _ := testDB.SetupDB(t)
|
||||
service := New(db, cache.NewStateSummaryCache())
|
||||
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, db.SaveBlock(ctx, genesis))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 0, Root: gRoot[:]}))
|
||||
|
||||
service.finalizedInfo.state = beaconState
|
||||
service.finalizedInfo.slot = beaconState.Slot()
|
||||
service.finalizedInfo.root = gRoot
|
||||
|
||||
// This tests where hot state was already cached.
|
||||
loadedState, err := service.loadStateByRoot(ctx, gRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
if !proto.Equal(loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
|
||||
t.Error("Did not correctly retrieve finalized state")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadeStateByRoot_EpochBoundaryStateCanProcess(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db, ssc := testDB.SetupDB(t)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
@@ -67,29 +68,42 @@ func TestMigrateToCold_RegeneratePath(t *testing.T) {
|
||||
|
||||
service := New(db, cache.NewStateSummaryCache())
|
||||
service.slotsPerArchivedPoint = 1
|
||||
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
|
||||
stateSlot := uint64(1)
|
||||
require.NoError(t, beaconState.SetSlot(stateSlot))
|
||||
blk := testutil.NewBeaconBlock()
|
||||
blk.Block.Slot = 2
|
||||
fRoot, err := blk.Block.HashTreeRoot()
|
||||
beaconState, pks := testutil.DeterministicGenesisState(t, 32)
|
||||
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, blk))
|
||||
require.NoError(t, service.beaconDB.SaveGenesisBlockRoot(ctx, fRoot))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: fRoot[:]}))
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
assert.NoError(t, db.SaveBlock(ctx, genesis))
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, db.SaveState(ctx, beaconState, gRoot))
|
||||
assert.NoError(t, db.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
|
||||
b1, err := testutil.GenerateFullBlock(beaconState, pks, testutil.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
r1, err := b1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b1))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: r1[:]}))
|
||||
|
||||
b4, err := testutil.GenerateFullBlock(beaconState, pks, testutil.DefaultBlockGenConfig(), 4)
|
||||
require.NoError(t, err)
|
||||
r4, err := b4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.beaconDB.SaveBlock(ctx, b4))
|
||||
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 4, Root: r4[:]}))
|
||||
service.finalizedInfo = &finalizedInfo{
|
||||
slot: 1,
|
||||
root: fRoot,
|
||||
slot: 0,
|
||||
root: genesisStateRoot,
|
||||
state: beaconState,
|
||||
}
|
||||
|
||||
require.NoError(t, service.MigrateToCold(ctx, fRoot))
|
||||
require.NoError(t, service.MigrateToCold(ctx, r4))
|
||||
|
||||
gotState, err := service.beaconDB.State(ctx, fRoot)
|
||||
s1, err := service.beaconDB.State(ctx, r1)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, beaconState.InnerStateUnsafe(), gotState.InnerStateUnsafe(), "Did not save state")
|
||||
gotRoot := service.beaconDB.ArchivedPointRoot(ctx, stateSlot/service.slotsPerArchivedPoint)
|
||||
assert.Equal(t, fRoot, gotRoot, "Did not save archived root")
|
||||
assert.Equal(t, s1.Slot(), uint64(1), "Did not save state")
|
||||
gotRoot := service.beaconDB.ArchivedPointRoot(ctx, 1/service.slotsPerArchivedPoint)
|
||||
assert.Equal(t, r1, gotRoot, "Did not save archived root")
|
||||
lastIndex, err := service.beaconDB.LastArchivedSlot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint64(1), lastIndex, "Did not save last archived index")
|
||||
|
||||
@@ -54,9 +54,8 @@ func (s *State) ReplayBlocks(ctx context.Context, state *stateTrie.BeaconState,
|
||||
// The Blocks are returned in slot-descending order.
|
||||
func (s *State) LoadBlocks(ctx context.Context, startSlot, endSlot uint64, endBlockRoot [32]byte) ([]*ethpb.SignedBeaconBlock, error) {
|
||||
// Nothing to load for invalid range.
|
||||
// TODO(#7620): Return error for invalid range.
|
||||
if endSlot < startSlot {
|
||||
return nil, nil
|
||||
return nil, fmt.Errorf("start slot %d >= end slot %d", startSlot, endSlot)
|
||||
}
|
||||
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot)
|
||||
blocks, blockRoots, err := s.beaconDB.Blocks(ctx, filter)
|
||||
|
||||
@@ -90,6 +90,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p_core//peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_core//protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -176,6 +177,7 @@ go_test(
|
||||
"@com_github_libp2p_go_libp2p_core//protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_protolambda_zssz//:go_default_library",
|
||||
"@com_github_protolambda_zssz//types:go_default_library",
|
||||
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
|
||||
|
||||
@@ -33,7 +33,7 @@ func SetStreamReadDeadline(stream network.Stream, duration time.Duration) {
|
||||
"peer": stream.Conn().RemotePeer(),
|
||||
"protocol": stream.Protocol(),
|
||||
"direction": stream.Stat().Direction,
|
||||
}).Debug("Failed to set stream deadline")
|
||||
}).Debug("Could not set stream deadline")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,6 +51,6 @@ func SetStreamWriteDeadline(stream network.Stream, duration time.Duration) {
|
||||
"peer": stream.Conn().RemotePeer(),
|
||||
"protocol": stream.Protocol(),
|
||||
"direction": stream.Stat().Direction,
|
||||
}).Debug("Failed to set stream deadline")
|
||||
}).Debug("Could not set stream deadline")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,19 +15,6 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const genericError = "internal service error"
|
||||
const rateLimitedError = "rate limited"
|
||||
const reqError = "invalid range, step or count"
|
||||
const seqError = "invalid sequence number provided"
|
||||
const deadlineError = "i/o deadline exceeded"
|
||||
|
||||
var errWrongForkDigestVersion = errors.New("wrong fork digest version")
|
||||
var errInvalidEpoch = errors.New("invalid epoch")
|
||||
var errInvalidFinalizedRoot = errors.New("invalid finalized root")
|
||||
var errInvalidSequenceNum = errors.New(seqError)
|
||||
var errGeneric = errors.New(genericError)
|
||||
var errInvalidParent = errors.New("mismatched parent root")
|
||||
|
||||
var responseCodeSuccess = byte(0x00)
|
||||
var responseCodeInvalidRequest = byte(0x01)
|
||||
var responseCodeServerError = byte(0x02)
|
||||
@@ -66,9 +53,9 @@ func ReadStatusCode(stream network.Stream, encoding encoder.NetworkEncoding) (ui
|
||||
func writeErrorResponseToStream(responseCode byte, reason string, stream libp2pcore.Stream, encoder p2p.EncodingProvider) {
|
||||
resp, err := createErrorResponse(responseCode, reason, encoder)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to generate a response error")
|
||||
log.WithError(err).Debug("Could not generate a response error")
|
||||
} else if _, err := stream.Write(resp); err != nil {
|
||||
log.WithError(err).Debugf("Failed to write to stream")
|
||||
log.WithError(err).Debugf("Could not write to stream")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,6 +96,6 @@ func isValidStreamError(err error) bool {
|
||||
|
||||
func closeStream(stream network.Stream, log *logrus.Entry) {
|
||||
if err := helpers.FullClose(stream); err != nil && err.Error() != mux.ErrReset.Error() {
|
||||
log.WithError(err).Debug("Failed to reset stream")
|
||||
log.WithError(err).Debug("Could not reset stream")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,10 +4,12 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -26,7 +28,7 @@ func NewRegularSyncFuzz(cfg *Config) *Service {
|
||||
chain: cfg.Chain,
|
||||
initialSync: cfg.InitialSync,
|
||||
attestationNotifier: cfg.AttestationNotifier,
|
||||
slotToPendingBlocks: make(map[uint64][]*ethpb.SignedBeaconBlock),
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
|
||||
@@ -36,6 +36,10 @@ const (
|
||||
noRequiredPeersErrRefreshInterval = 15 * time.Second
|
||||
// maxResetAttempts number of times stale FSM is reset, before backtracking is triggered.
|
||||
maxResetAttempts = 4
|
||||
// startBackSlots defines number of slots before the current head, which defines a start position
|
||||
// of the initial machine. This allows more robustness in case of normal sync sets head to some
|
||||
// orphaned block: in that case starting earlier and re-fetching blocks allows to reorganize chain.
|
||||
startBackSlots = 32
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -173,6 +177,9 @@ func (q *blocksQueue) loop() {
|
||||
|
||||
// Define initial state machines.
|
||||
startSlot := q.chain.HeadSlot()
|
||||
if startSlot > startBackSlots {
|
||||
startSlot -= startBackSlots
|
||||
}
|
||||
blocksPerRequest := q.blocksFetcher.blocksPerSecond
|
||||
for i := startSlot; i < startSlot+blocksPerRequest*lookaheadSteps; i += blocksPerRequest {
|
||||
q.smm.addStateMachine(i)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kevinms/leakybucket-go"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
@@ -1255,3 +1256,120 @@ func TestBlocksQueue_stuckInUnfavourableFork(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestBlocksQueue_stuckWhenHeadIsSetToOrphanedBlock(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
beaconDB, _ := dbtest.SetupDB(t)
|
||||
p2p := p2pt.NewTestP2P(t)
|
||||
|
||||
chain := extendBlockSequence(t, []*eth.SignedBeaconBlock{}, 128)
|
||||
finalizedSlot := uint64(82)
|
||||
finalizedEpoch := helpers.SlotToEpoch(finalizedSlot)
|
||||
|
||||
genesisBlock := chain[0]
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), genesisBlock))
|
||||
genesisRoot, err := genesisBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
st := testutil.NewBeaconState()
|
||||
mc := &mock.ChainService{
|
||||
State: st,
|
||||
Root: genesisRoot[:],
|
||||
DB: beaconDB,
|
||||
FinalizedCheckPoint: ð.Checkpoint{
|
||||
Epoch: finalizedEpoch,
|
||||
Root: []byte(fmt.Sprintf("finalized_root %d", finalizedEpoch)),
|
||||
},
|
||||
}
|
||||
|
||||
// Populate database with blocks with part of the chain, orphaned block will be added on top.
|
||||
for _, blk := range chain[1:84] {
|
||||
parentRoot := bytesutil.ToBytes32(blk.Block.ParentRoot)
|
||||
// Save block only if parent root is already in database or cache.
|
||||
if beaconDB.HasBlock(ctx, parentRoot) || mc.HasInitSyncBlock(parentRoot) {
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, blk))
|
||||
require.NoError(t, st.SetSlot(blk.Block.Slot))
|
||||
}
|
||||
}
|
||||
require.Equal(t, uint64(83), mc.HeadSlot())
|
||||
require.Equal(t, chain[83].Block.Slot, mc.HeadSlot())
|
||||
|
||||
// Set head to slot 85, while we do not have block with slot 84 in DB, so block is orphaned.
|
||||
// Moreover, block with slot 85 is a forked block and should be replaced, with block from peer.
|
||||
orphanedBlock := testutil.NewBeaconBlock()
|
||||
orphanedBlock.Block.Slot = 85
|
||||
orphanedBlock.Block.StateRoot = testutil.Random32Bytes(t)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, orphanedBlock))
|
||||
require.NoError(t, st.SetSlot(orphanedBlock.Block.Slot))
|
||||
require.Equal(t, uint64(85), mc.HeadSlot())
|
||||
|
||||
fetcher := newBlocksFetcher(
|
||||
ctx,
|
||||
&blocksFetcherConfig{
|
||||
chain: mc,
|
||||
p2p: p2p,
|
||||
db: beaconDB,
|
||||
},
|
||||
)
|
||||
fetcher.rateLimiter = leakybucket.NewCollector(6400, 6400, false)
|
||||
|
||||
// Connect peer that has all the blocks available.
|
||||
allBlocksPeer := connectPeerHavingBlocks(t, p2p, chain, finalizedSlot, p2p.Peers())
|
||||
defer func() {
|
||||
p2p.Peers().SetConnectionState(allBlocksPeer, peers.PeerDisconnected)
|
||||
}()
|
||||
|
||||
// Queue should be able to fetch whole chain (including slot which comes before the currently set head).
|
||||
queue := newBlocksQueue(ctx, &blocksQueueConfig{
|
||||
blocksFetcher: fetcher,
|
||||
chain: mc,
|
||||
highestExpectedSlot: uint64(len(chain) - 1),
|
||||
mode: modeNonConstrained,
|
||||
})
|
||||
|
||||
require.NoError(t, queue.start())
|
||||
isProcessedBlock := func(ctx context.Context, blk *eth.SignedBeaconBlock, blkRoot [32]byte) bool {
|
||||
finalizedSlot, err := helpers.StartSlot(mc.FinalizedCheckpt().Epoch)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if blk.Block.Slot <= finalizedSlot || (beaconDB.HasBlock(ctx, blkRoot) || mc.HasInitSyncBlock(blkRoot)) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatal("test takes to long to complete")
|
||||
case data := <-queue.fetchedData:
|
||||
for _, blk := range data.blocks {
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
if isProcessedBlock(ctx, blk, blkRoot) {
|
||||
log.Errorf("slot: %d , root %#x: %v", blk.Block.Slot, blkRoot, errBlockAlreadyProcessed)
|
||||
continue
|
||||
}
|
||||
|
||||
parentRoot := bytesutil.ToBytes32(blk.Block.ParentRoot)
|
||||
if !beaconDB.HasBlock(ctx, parentRoot) && !mc.HasInitSyncBlock(parentRoot) {
|
||||
log.Errorf("%v: %#x", errParentDoesNotExist, blk.Block.ParentRoot)
|
||||
continue
|
||||
}
|
||||
|
||||
// Block is not already processed, and parent exists in database - process.
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, blk))
|
||||
require.NoError(t, st.SetSlot(blk.Block.Slot))
|
||||
}
|
||||
}
|
||||
require.NoError(t, queue.stop())
|
||||
|
||||
// Check that all blocks available in chain are produced by queue.
|
||||
for _, blk := range chain[:orphanedBlock.Block.Slot+32] {
|
||||
blkRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, beaconDB.HasBlock(ctx, blkRoot) || mc.HasInitSyncBlock(blkRoot), "slot %d", blk.Block.Slot)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
pb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
)
|
||||
@@ -46,30 +47,6 @@ var (
|
||||
Help: "Count the number of times a node resyncs.",
|
||||
},
|
||||
)
|
||||
numberOfBlocksRecoveredFromAtt = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "beacon_blocks_recovered_from_attestation_total",
|
||||
Help: "Count the number of times a missing block recovered from attestation vote.",
|
||||
},
|
||||
)
|
||||
numberOfBlocksNotRecoveredFromAtt = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "beacon_blocks_not_recovered_from_attestation_total",
|
||||
Help: "Count the number of times a missing block not recovered and pruned from attestation vote.",
|
||||
},
|
||||
)
|
||||
numberOfAttsRecovered = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "beacon_attestations_recovered_total",
|
||||
Help: "Count the number of times attestation recovered because of missing block",
|
||||
},
|
||||
)
|
||||
numberOfAttsNotRecovered = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "beacon_attestations_not_recovered_total",
|
||||
Help: "Count the number of times attestation not recovered and pruned because of missing block",
|
||||
},
|
||||
)
|
||||
arrivalBlockPropagationHistogram = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "block_arrival_latency_milliseconds",
|
||||
@@ -90,11 +67,19 @@ func (s *Service) updateMetrics() {
|
||||
if err != nil {
|
||||
log.WithError(err).Debugf("Could not compute fork digest")
|
||||
}
|
||||
indices := s.aggregatorSubnetIndices(s.chain.CurrentSlot())
|
||||
attTopic := p2p.GossipTypeMapping[reflect.TypeOf(&pb.Attestation{})]
|
||||
attTopic += s.p2p.Encoding().ProtocolSuffix()
|
||||
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
|
||||
formattedTopic := fmt.Sprintf(attTopic, digest, i)
|
||||
topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(s.p2p.PubSub().ListPeers(formattedTopic))))
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
for i := uint64(0); i < params.BeaconNetworkConfig().AttestationSubnetCount; i++ {
|
||||
formattedTopic := fmt.Sprintf(attTopic, digest, i)
|
||||
topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(s.p2p.PubSub().ListPeers(formattedTopic))))
|
||||
}
|
||||
} else {
|
||||
for _, committeeIdx := range indices {
|
||||
formattedTopic := fmt.Sprintf(attTopic, digest, committeeIdx)
|
||||
topicPeerCount.WithLabelValues(formattedTopic).Set(float64(len(s.p2p.PubSub().ListPeers(formattedTopic))))
|
||||
}
|
||||
}
|
||||
|
||||
// We update all other gossip topics.
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/shared/bls"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/rand"
|
||||
@@ -63,7 +62,6 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
|
||||
// Has the pending attestation's missing block arrived and the node processed block yet?
|
||||
hasStateSummary := s.db.HasStateSummary(ctx, bRoot) || s.stateSummaryCache.Has(bRoot)
|
||||
if s.db.HasBlock(ctx, bRoot) && (s.db.HasState(ctx, bRoot) || hasStateSummary) {
|
||||
numberOfBlocksRecoveredFromAtt.Inc()
|
||||
for _, signedAtt := range attestations {
|
||||
att := signedAtt.Message
|
||||
// The pending attestations can arrive in both aggregated and unaggregated forms,
|
||||
@@ -74,41 +72,57 @@ func (s *Service) processPendingAtts(ctx context.Context) error {
|
||||
aggValid := s.validateAggregatedAtt(ctx, signedAtt) == pubsub.ValidationAccept
|
||||
if s.validateBlockInAttestation(ctx, signedAtt) && aggValid {
|
||||
if err := s.attPool.SaveAggregatedAttestation(att.Aggregate); err != nil {
|
||||
return err
|
||||
log.WithError(err).Debug("Could not save aggregate attestation")
|
||||
continue
|
||||
}
|
||||
numberOfAttsRecovered.Inc()
|
||||
s.setAggregatorIndexEpochSeen(att.Aggregate.Data.Target.Epoch, att.AggregatorIndex)
|
||||
|
||||
// Broadcasting the signed attestation again once a node is able to process it.
|
||||
if err := s.p2p.Broadcast(ctx, signedAtt); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast")
|
||||
log.WithError(err).Debug("Could not broadcast")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Save the pending unaggregated attestation to the pool if the BLS signature is
|
||||
// valid.
|
||||
if _, err := bls.SignatureFromBytes(att.Aggregate.Signature); err != nil {
|
||||
// This is an important validation before retrieving attestation pre state to defend against
|
||||
// attestation's target intentionally reference checkpoint that's long ago.
|
||||
// Verify current finalized checkpoint is an ancestor of the block defined by the attestation's beacon block root.
|
||||
if err := s.chain.VerifyFinalizedConsistency(ctx, att.Aggregate.Data.BeaconBlockRoot); err != nil {
|
||||
log.WithError(err).Debug("Could not verify finalized consistency")
|
||||
continue
|
||||
}
|
||||
if err := s.attPool.SaveUnaggregatedAttestation(att.Aggregate); err != nil {
|
||||
return err
|
||||
}
|
||||
numberOfAttsRecovered.Inc()
|
||||
|
||||
// Verify signed aggregate has a valid signature.
|
||||
if _, err := bls.SignatureFromBytes(signedAtt.Signature); err != nil {
|
||||
if err := s.chain.VerifyLmdFfgConsistency(ctx, att.Aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not verify FFG consistency")
|
||||
continue
|
||||
}
|
||||
preState, err := s.chain.AttestationPreState(ctx, att.Aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not retrieve attestation prestate")
|
||||
continue
|
||||
}
|
||||
valid := s.validateUnaggregatedAttWithState(ctx, att.Aggregate, preState)
|
||||
if valid == pubsub.ValidationAccept {
|
||||
if err := s.attPool.SaveUnaggregatedAttestation(att.Aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not save unaggregated attestation")
|
||||
continue
|
||||
}
|
||||
s.setSeenCommitteeIndicesSlot(att.Aggregate.Data.Slot, att.Aggregate.Data.CommitteeIndex, att.Aggregate.AggregationBits)
|
||||
|
||||
// Broadcasting the signed attestation again once a node is able to process it.
|
||||
if err := s.p2p.Broadcast(ctx, signedAtt); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast")
|
||||
valCount, err := helpers.ActiveValidatorCount(preState, helpers.SlotToEpoch(att.Aggregate.Data.Slot))
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not retrieve active validator count")
|
||||
continue
|
||||
}
|
||||
// Broadcasting the signed attestation again once a node is able to process it.
|
||||
if err := s.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, signedAtt.Message.Aggregate), signedAtt.Message.Aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
"pendingAttsCount": len(attestations),
|
||||
}).Info("Verified and saved pending attestations to pool")
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
|
||||
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
|
||||
s.pendingAttsLock.Lock()
|
||||
@@ -168,7 +182,6 @@ func (s *Service) validatePendingAtts(ctx context.Context, slot uint64) {
|
||||
if slot >= atts[i].Message.Aggregate.Data.Slot+params.BeaconConfig().SlotsPerEpoch {
|
||||
// Remove the pending attestation from the list in place.
|
||||
atts = append(atts[:i], atts[i+1:]...)
|
||||
numberOfAttsNotRecovered.Inc()
|
||||
}
|
||||
}
|
||||
s.blkRootToPendingAtts[bRoot] = atts
|
||||
@@ -177,7 +190,6 @@ func (s *Service) validatePendingAtts(ctx context.Context, slot uint64) {
|
||||
// a node will remove the key from the map to avoid dangling keys.
|
||||
if len(s.blkRootToPendingAtts[bRoot]) == 0 {
|
||||
delete(s.blkRootToPendingAtts, bRoot)
|
||||
numberOfBlocksNotRecoveredFromAtt.Inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
@@ -59,45 +60,83 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
db, _ := dbtest.SetupDB(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
validators := uint64(256)
|
||||
testutil.ResetCache()
|
||||
beaconState, privKeys := testutil.DeterministicGenesisState(t, validators)
|
||||
|
||||
sb := testutil.NewBeaconBlock()
|
||||
require.NoError(t, db.SaveBlock(context.Background(), sb))
|
||||
root, err := sb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
aggBits := bitfield.NewBitlist(8)
|
||||
aggBits.SetBitAt(1, true)
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: root[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
|
||||
committee, err := helpers.BeaconCommitteeFromState(beaconState, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
assert.NoError(t, err)
|
||||
attestingIndices := attestationutil.AttestingIndices(att.AggregationBits, committee)
|
||||
assert.NoError(t, err)
|
||||
attesterDomain, err := helpers.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot, err := helpers.ComputeSigningRoot(att.Data, attesterDomain)
|
||||
assert.NoError(t, err)
|
||||
for _, i := range attestingIndices {
|
||||
att.Signature = privKeys[i].Sign(hashTreeRoot[:]).Marshal()
|
||||
}
|
||||
|
||||
// Arbitrary aggregator index for testing purposes.
|
||||
aggregatorIndex := committee[0]
|
||||
sig, err := helpers.ComputeDomainAndSign(beaconState, 0, att.Data.Slot, params.BeaconConfig().DomainSelectionProof, privKeys[aggregatorIndex])
|
||||
require.NoError(t, err)
|
||||
aggregateAndProof := ðpb.AggregateAttestationAndProof{
|
||||
SelectionProof: sig,
|
||||
Aggregate: att,
|
||||
AggregatorIndex: aggregatorIndex,
|
||||
}
|
||||
aggreSig, err := helpers.ComputeDomainAndSign(beaconState, 0, aggregateAndProof, params.BeaconConfig().DomainAggregateAndProof, privKeys[aggregatorIndex])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix())))
|
||||
|
||||
c, err := lru.New(10)
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
p2p: p1,
|
||||
db: db,
|
||||
chain: &mock.ChainService{Genesis: timeutils.Now()},
|
||||
p2p: p1,
|
||||
db: db,
|
||||
chain: &mock.ChainService{Genesis: time.Now(),
|
||||
State: beaconState,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
}},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
attPool: attestations.NewPool(),
|
||||
stateSummaryCache: cache.NewStateSummaryCache(),
|
||||
seenAttestationCache: c,
|
||||
}
|
||||
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
a := ðpb.AggregateAttestationAndProof{
|
||||
Aggregate: ðpb.Attestation{
|
||||
Signature: priv.Sign([]byte("foo")).Marshal(),
|
||||
AggregationBits: bitfield.Bitlist{0x02},
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
SelectionProof: make([]byte, 96),
|
||||
}
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
r32, err := b.Block.HashTreeRoot()
|
||||
sb = testutil.NewBeaconBlock()
|
||||
r32, err := sb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), sb))
|
||||
s := testutil.NewBeaconState()
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), b))
|
||||
require.NoError(t, r.db.SaveState(context.Background(), s, r32))
|
||||
|
||||
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a, Signature: make([]byte, 96)}}
|
||||
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
atts, err := r.attPool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(atts), "Did not save unaggregated att")
|
||||
assert.DeepEqual(t, a.Aggregate, atts[0], "Incorrect saved att")
|
||||
assert.DeepEqual(t, att, atts[0], "Incorrect saved att")
|
||||
assert.Equal(t, 0, len(r.attPool.AggregatedAttestations()), "Did save aggregated att")
|
||||
require.LogsContain(t, hook, "Verified and saved pending attestations to pool")
|
||||
}
|
||||
@@ -109,7 +148,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
r := &Service{
|
||||
p2p: p1,
|
||||
db: db,
|
||||
chain: &mock.ChainService{Genesis: timeutils.Now()},
|
||||
chain: &mock.ChainService{Genesis: timeutils.Now(), FinalizedCheckPoint: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
attPool: attestations.NewPool(),
|
||||
stateSummaryCache: cache.NewStateSummaryCache(),
|
||||
@@ -145,9 +184,62 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
err = r.attPool.DeleteUnaggregatedAttestation(a.Aggregate)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: a, Signature: make([]byte, 96)}}
|
||||
// Make the signature a zero sig
|
||||
r.blkRootToPendingAtts[r32][0].Signature[0] = 0xC0
|
||||
validators := uint64(256)
|
||||
testutil.ResetCache()
|
||||
s, privKeys := testutil.DeterministicGenesisState(t, validators)
|
||||
aggBits := bitfield.NewBitlist(8)
|
||||
aggBits.SetBitAt(1, true)
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: r32[:],
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: r32[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
committee, err := helpers.BeaconCommitteeFromState(s, att.Data.Slot, att.Data.CommitteeIndex)
|
||||
assert.NoError(t, err)
|
||||
attestingIndices := attestationutil.AttestingIndices(att.AggregationBits, committee)
|
||||
assert.NoError(t, err)
|
||||
attesterDomain, err := helpers.Domain(s.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, s.GenesisValidatorRoot())
|
||||
require.NoError(t, err)
|
||||
hashTreeRoot, err := helpers.ComputeSigningRoot(att.Data, attesterDomain)
|
||||
assert.NoError(t, err)
|
||||
for _, i := range attestingIndices {
|
||||
att.Signature = privKeys[i].Sign(hashTreeRoot[:]).Marshal()
|
||||
}
|
||||
|
||||
// Arbitrary aggregator index for testing purposes.
|
||||
aggregatorIndex := committee[0]
|
||||
sig, err := helpers.ComputeDomainAndSign(s, 0, att.Data.Slot, params.BeaconConfig().DomainSelectionProof, privKeys[aggregatorIndex])
|
||||
require.NoError(t, err)
|
||||
aggregateAndProof := ðpb.AggregateAttestationAndProof{
|
||||
SelectionProof: sig,
|
||||
Aggregate: att,
|
||||
AggregatorIndex: aggregatorIndex,
|
||||
}
|
||||
aggreSig, err := helpers.ComputeDomainAndSign(s, 0, aggregateAndProof, params.BeaconConfig().DomainAggregateAndProof, privKeys[aggregatorIndex])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.SetGenesisTime(uint64(time.Now().Unix())))
|
||||
c, err := lru.New(10)
|
||||
require.NoError(t, err)
|
||||
r = &Service{
|
||||
p2p: p1,
|
||||
db: db,
|
||||
chain: &mock.ChainService{Genesis: time.Now(),
|
||||
State: s,
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{
|
||||
Root: aggregateAndProof.Aggregate.Data.BeaconBlockRoot,
|
||||
Epoch: 0,
|
||||
}},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
attPool: attestations.NewPool(),
|
||||
stateSummaryCache: cache.NewStateSummaryCache(),
|
||||
seenAttestationCache: c,
|
||||
}
|
||||
|
||||
r.blkRootToPendingAtts[r32] = []*ethpb.SignedAggregateAttestationAndProof{{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
assert.Equal(t, true, p1.BroadcastCalled, "Could not broadcast the good aggregate")
|
||||
@@ -173,7 +265,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: root[:],
|
||||
Source: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: bytesutil.PadTo([]byte("hello-world"), 32)},
|
||||
Target: ðpb.Checkpoint{Epoch: 0, Root: root[:]},
|
||||
},
|
||||
AggregationBits: aggBits,
|
||||
}
|
||||
@@ -207,6 +299,8 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
|
||||
require.NoError(t, beaconState.SetGenesisTime(uint64(time.Now().Unix())))
|
||||
|
||||
c, err := lru.New(10)
|
||||
require.NoError(t, err)
|
||||
r := &Service{
|
||||
p2p: p1,
|
||||
db: db,
|
||||
@@ -219,6 +313,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
attPool: attestations.NewPool(),
|
||||
stateSummaryCache: cache.NewStateSummaryCache(),
|
||||
seenAttestationCache: c,
|
||||
}
|
||||
|
||||
sb = testutil.NewBeaconBlock()
|
||||
@@ -235,7 +330,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
assert.DeepEqual(t, att, r.attPool.AggregatedAttestations()[0], "Incorrect saved att")
|
||||
atts, err := r.attPool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(atts), "Did save unaggregated att")
|
||||
assert.Equal(t, 0, len(atts), "Did save aggregated att")
|
||||
require.LogsContain(t, hook, "Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/hex"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
@@ -26,6 +27,7 @@ var processPendingBlocksPeriod = slotutil.DivideSlotBy(3 /* times per slot */)
|
||||
|
||||
const maxPeerRequest = 50
|
||||
const numOfTries = 5
|
||||
const maxBlocksPerSlot = 3
|
||||
|
||||
// processes pending blocks queue on every processPendingBlocksPeriod
|
||||
func (s *Service) processPendingBlocksQueue() {
|
||||
@@ -34,7 +36,7 @@ func (s *Service) processPendingBlocksQueue() {
|
||||
runutil.RunEvery(s.ctx, processPendingBlocksPeriod, func() {
|
||||
locker.Lock()
|
||||
if err := s.processPendingBlocks(s.ctx); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending blocks")
|
||||
log.WithError(err).Debug("Could not process pending blocks")
|
||||
}
|
||||
locker.Unlock()
|
||||
})
|
||||
@@ -63,7 +65,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
span.AddAttributes(trace.Int64Attribute("slot", int64(slot)))
|
||||
|
||||
s.pendingQueueLock.RLock()
|
||||
bs := s.slotToPendingBlocks[slot]
|
||||
bs := s.pendingBlocksInCache(slot)
|
||||
// Skip if there's no block in the queue.
|
||||
if len(bs) == 0 {
|
||||
s.pendingQueueLock.RUnlock()
|
||||
@@ -99,7 +101,9 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
}
|
||||
// Remove block from queue.
|
||||
s.pendingQueueLock.Lock()
|
||||
s.deleteBlockFromPendingQueue(slot, b, blkRoot)
|
||||
if err := s.deleteBlockFromPendingQueue(slot, b, blkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
s.pendingQueueLock.Unlock()
|
||||
span.End()
|
||||
continue
|
||||
@@ -125,19 +129,38 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
span.End()
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.validateBeaconBlock(ctx, b, blkRoot); err != nil {
|
||||
log.Debugf("Could not validate block from slot %d: %v", b.Block.Slot, err)
|
||||
s.setBadBlock(ctx, blkRoot)
|
||||
traceutil.AnnotateError(span, err)
|
||||
// In the next iteration of the queue, this block will be removed from
|
||||
// the pending queue as it has been marked as a 'bad' block.
|
||||
span.End()
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.chain.ReceiveBlock(ctx, b, blkRoot); err != nil {
|
||||
log.Debugf("Could not process block from slot %d: %v", b.Block.Slot, err)
|
||||
s.setBadBlock(ctx, blkRoot)
|
||||
traceutil.AnnotateError(span, err)
|
||||
// In the next iteration of the queue, this block will be removed from
|
||||
// the pending queue as it has been marked as a 'bad' block.
|
||||
span.End()
|
||||
continue
|
||||
}
|
||||
|
||||
s.setSeenBlockIndexSlot(b.Block.Slot, b.Block.ProposerIndex)
|
||||
|
||||
// Broadcasting the block again once a node is able to process it.
|
||||
if err := s.p2p.Broadcast(ctx, b); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast block")
|
||||
log.WithError(err).Debug("Could not broadcast block")
|
||||
}
|
||||
|
||||
s.pendingQueueLock.Lock()
|
||||
s.deleteBlockFromPendingQueue(slot, b, blkRoot)
|
||||
if err := s.deleteBlockFromPendingQueue(slot, b, blkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
s.pendingQueueLock.Unlock()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -200,8 +223,11 @@ func (s *Service) sortedPendingSlots() []uint64 {
|
||||
s.pendingQueueLock.RLock()
|
||||
defer s.pendingQueueLock.RUnlock()
|
||||
|
||||
slots := make([]uint64, 0, len(s.slotToPendingBlocks))
|
||||
for slot := range s.slotToPendingBlocks {
|
||||
items := s.slotToPendingBlocks.Items()
|
||||
|
||||
slots := make([]uint64, 0, len(items))
|
||||
for k := range items {
|
||||
slot := cacheKeyToSlot(k)
|
||||
slots = append(slots, slot)
|
||||
}
|
||||
sort.Slice(slots, func(i, j int) bool {
|
||||
@@ -219,7 +245,13 @@ func (s *Service) validatePendingSlots() error {
|
||||
oldBlockRoots := make(map[[32]byte]bool)
|
||||
|
||||
finalizedEpoch := s.chain.FinalizedCheckpt().Epoch
|
||||
for slot, blks := range s.slotToPendingBlocks {
|
||||
if s.slotToPendingBlocks == nil {
|
||||
return errors.New("slotToPendingBlocks cache can't be nil")
|
||||
}
|
||||
items := s.slotToPendingBlocks.Items()
|
||||
for k := range items {
|
||||
slot := cacheKeyToSlot(k)
|
||||
blks := s.pendingBlocksInCache(slot)
|
||||
for _, b := range blks {
|
||||
epoch := helpers.SlotToEpoch(slot)
|
||||
// remove all descendant blocks of old blocks
|
||||
@@ -229,7 +261,9 @@ func (s *Service) validatePendingSlots() error {
|
||||
return err
|
||||
}
|
||||
oldBlockRoots[root] = true
|
||||
s.deleteBlockFromPendingQueue(slot, b, root)
|
||||
if err := s.deleteBlockFromPendingQueue(slot, b, root); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// don't process old blocks
|
||||
@@ -239,7 +273,9 @@ func (s *Service) validatePendingSlots() error {
|
||||
return err
|
||||
}
|
||||
oldBlockRoots[blkRoot] = true
|
||||
s.deleteBlockFromPendingQueue(slot, b, blkRoot)
|
||||
if err := s.deleteBlockFromPendingQueue(slot, b, blkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -249,19 +285,20 @@ func (s *Service) validatePendingSlots() error {
|
||||
func (s *Service) clearPendingSlots() {
|
||||
s.pendingQueueLock.Lock()
|
||||
defer s.pendingQueueLock.Unlock()
|
||||
s.slotToPendingBlocks = make(map[uint64][]*ethpb.SignedBeaconBlock)
|
||||
s.slotToPendingBlocks.Flush()
|
||||
s.seenPendingBlocks = make(map[[32]byte]bool)
|
||||
}
|
||||
|
||||
// Delete block from the list from the pending queue using the slot as key.
|
||||
// Note: this helper is not thread safe.
|
||||
func (s *Service) deleteBlockFromPendingQueue(slot uint64, b *ethpb.SignedBeaconBlock, r [32]byte) {
|
||||
func (s *Service) deleteBlockFromPendingQueue(slot uint64, b *ethpb.SignedBeaconBlock, r [32]byte) error {
|
||||
mutexasserts.AssertRWMutexLocked(&s.pendingQueueLock)
|
||||
|
||||
blks, ok := s.slotToPendingBlocks[slot]
|
||||
if !ok {
|
||||
return
|
||||
blks := s.pendingBlocksInCache(slot)
|
||||
if len(blks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
newBlks := make([]*ethpb.SignedBeaconBlock, 0, len(blks))
|
||||
for _, blk := range blks {
|
||||
if ssz.DeepEqual(blk, b) {
|
||||
@@ -270,28 +307,76 @@ func (s *Service) deleteBlockFromPendingQueue(slot uint64, b *ethpb.SignedBeacon
|
||||
newBlks = append(newBlks, blk)
|
||||
}
|
||||
if len(newBlks) == 0 {
|
||||
delete(s.slotToPendingBlocks, slot)
|
||||
return
|
||||
s.slotToPendingBlocks.Delete(slotToCacheKey(slot))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decrease exp itme in proportion to how many blocks are still in the cache for slot key.
|
||||
d := pendingBlockExpTime / time.Duration(len(newBlks))
|
||||
if err := s.slotToPendingBlocks.Replace(slotToCacheKey(slot), newBlks, d); err != nil {
|
||||
return err
|
||||
}
|
||||
s.slotToPendingBlocks[slot] = newBlks
|
||||
delete(s.seenPendingBlocks, r)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Insert block to the list in the pending queue using the slot as key.
|
||||
// Note: this helper is not thread safe.
|
||||
func (s *Service) insertBlockToPendingQueue(slot uint64, b *ethpb.SignedBeaconBlock, r [32]byte) {
|
||||
func (s *Service) insertBlockToPendingQueue(slot uint64, b *ethpb.SignedBeaconBlock, r [32]byte) error {
|
||||
mutexasserts.AssertRWMutexLocked(&s.pendingQueueLock)
|
||||
|
||||
if s.seenPendingBlocks[r] {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
_, ok := s.slotToPendingBlocks[slot]
|
||||
if ok {
|
||||
blks := s.slotToPendingBlocks[slot]
|
||||
s.slotToPendingBlocks[slot] = append(blks, b)
|
||||
} else {
|
||||
s.slotToPendingBlocks[slot] = []*ethpb.SignedBeaconBlock{b}
|
||||
if err := s.addPendingBlockToCache(b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.seenPendingBlocks[r] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// This returns signed beacon blocks given input key from slotToPendingBlocks.
|
||||
func (s *Service) pendingBlocksInCache(slot uint64) []*ethpb.SignedBeaconBlock {
|
||||
k := slotToCacheKey(slot)
|
||||
value, ok := s.slotToPendingBlocks.Get(k)
|
||||
if !ok {
|
||||
return []*ethpb.SignedBeaconBlock{}
|
||||
}
|
||||
blks, ok := value.([]*ethpb.SignedBeaconBlock)
|
||||
if !ok {
|
||||
return []*ethpb.SignedBeaconBlock{}
|
||||
}
|
||||
return blks
|
||||
}
|
||||
|
||||
// This adds input signed beacon block to slotToPendingBlocks cache.
|
||||
func (s *Service) addPendingBlockToCache(b *ethpb.SignedBeaconBlock) error {
|
||||
if b == nil || b.Block == nil {
|
||||
return errors.New("nil block")
|
||||
}
|
||||
|
||||
blks := s.pendingBlocksInCache(b.Block.Slot)
|
||||
|
||||
if len(blks) >= maxBlocksPerSlot {
|
||||
return nil
|
||||
}
|
||||
|
||||
blks = append(blks, b)
|
||||
k := slotToCacheKey(b.Block.Slot)
|
||||
s.slotToPendingBlocks.Set(k, blks, pendingBlockExpTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
// This converts input string to slot number in uint64.
|
||||
func cacheKeyToSlot(s string) uint64 {
|
||||
b := []byte(s)
|
||||
return bytesutil.BytesToUint64BigEndian(b)
|
||||
}
|
||||
|
||||
// This converts input slot number to a key to be used for slotToPendingBlocks cache.
|
||||
func slotToCacheKey(s uint64) string {
|
||||
b := bytesutil.Uint64ToBytesBigEndian(s)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
@@ -10,12 +10,15 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/rand"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
@@ -28,7 +31,7 @@ import (
|
||||
// \- b3
|
||||
// Test b1 was missing then received and we can process b0 -> b1 -> b2
|
||||
func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) {
|
||||
db, _ := dbtest.SetupDB(t)
|
||||
db, stateSummaryCache := dbtest.SetupDB(t)
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
r := &Service{
|
||||
@@ -39,8 +42,10 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) {
|
||||
Epoch: 0,
|
||||
},
|
||||
},
|
||||
slotToPendingBlocks: make(map[uint64][]*ethpb.SignedBeaconBlock),
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
stateSummaryCache: stateSummaryCache,
|
||||
stateGen: stategen.New(db, stateSummaryCache),
|
||||
}
|
||||
err := r.initCaches()
|
||||
require.NoError(t, err)
|
||||
@@ -66,21 +71,23 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add b2 to the cache
|
||||
r.insertBlockToPendingQueue(b2.Block.Slot, b2, b2Root)
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, b2, b2Root))
|
||||
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 1, len(r.slotToPendingBlocks), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 1, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
|
||||
// Add b1 to the cache
|
||||
r.insertBlockToPendingQueue(b1.Block.Slot, b1, b1Root)
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, b1, b1Root))
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), b1))
|
||||
|
||||
// Insert bad b1 in the cache to verify the good one doesn't get replaced.
|
||||
r.insertBlockToPendingQueue(b1.Block.Slot, testutil.NewBeaconBlock(), [32]byte{})
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, testutil.NewBeaconBlock(), [32]byte{}))
|
||||
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 1, len(r.slotToPendingBlocks), "Incorrect size for slot to pending blocks cache")
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
|
||||
|
||||
assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 2, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
}
|
||||
|
||||
@@ -97,7 +104,7 @@ func TestRegularSync_InsertDuplicateBlocks(t *testing.T) {
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
slotToPendingBlocks: make(map[uint64][]*ethpb.SignedBeaconBlock),
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
}
|
||||
err := r.initCaches()
|
||||
@@ -113,19 +120,19 @@ func TestRegularSync_InsertDuplicateBlocks(t *testing.T) {
|
||||
b1.Block.ParentRoot = b0Root[:]
|
||||
b1r := [32]byte{'b'}
|
||||
|
||||
r.insertBlockToPendingQueue(b0.Block.Slot, b0, b0r)
|
||||
require.Equal(t, 1, len(r.slotToPendingBlocks[b0.Block.Slot]), "Block was not added to map")
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b0.Block.Slot, b0, b0r))
|
||||
require.Equal(t, 1, len(r.pendingBlocksInCache(b0.Block.Slot)), "Block was not added to map")
|
||||
|
||||
r.insertBlockToPendingQueue(b1.Block.Slot, b1, b1r)
|
||||
require.Equal(t, 1, len(r.slotToPendingBlocks[b1.Block.Slot]), "Block was not added to map")
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, b1, b1r))
|
||||
require.Equal(t, 1, len(r.pendingBlocksInCache(b1.Block.Slot)), "Block was not added to map")
|
||||
|
||||
// Add duplicate block which should not be saved.
|
||||
r.insertBlockToPendingQueue(b0.Block.Slot, b0, b0r)
|
||||
require.Equal(t, 1, len(r.slotToPendingBlocks[b0.Block.Slot]), "Block was added to map")
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b0.Block.Slot, b0, b0r))
|
||||
require.Equal(t, 1, len(r.pendingBlocksInCache(b0.Block.Slot)), "Block was added to map")
|
||||
|
||||
// Add duplicate block which should not be saved.
|
||||
r.insertBlockToPendingQueue(b1.Block.Slot, b1, b1r)
|
||||
require.Equal(t, 1, len(r.slotToPendingBlocks[b1.Block.Slot]), "Block was added to map")
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, b1, b1r))
|
||||
require.Equal(t, 1, len(r.pendingBlocksInCache(b1.Block.Slot)), "Block was added to map")
|
||||
|
||||
}
|
||||
|
||||
@@ -134,7 +141,7 @@ func TestRegularSync_InsertDuplicateBlocks(t *testing.T) {
|
||||
// \- b3 - b4
|
||||
// Test b2 and b3 were missed, after receiving them we can process 2 chains.
|
||||
func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testing.T) {
|
||||
db, _ := dbtest.SetupDB(t)
|
||||
db, stateSummaryCache := dbtest.SetupDB(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
@@ -149,9 +156,9 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
|
||||
if code == 0 {
|
||||
t.Error("Expected a non-zero code")
|
||||
}
|
||||
if errMsg != errWrongForkDigestVersion.Error() {
|
||||
t.Logf("Received error string len %d, wanted error string len %d", len(errMsg), len(errWrongForkDigestVersion.Error()))
|
||||
t.Errorf("Received unexpected message response in the stream: %s. Wanted %s.", errMsg, errWrongForkDigestVersion.Error())
|
||||
if errMsg != p2ptypes.ErrWrongForkDigestVersion.Error() {
|
||||
t.Logf("Received error string len %d, wanted error string len %d", len(errMsg), len(p2ptypes.ErrWrongForkDigestVersion.Error()))
|
||||
t.Errorf("Received unexpected message response in the stream: %s. Wanted %s.", errMsg, p2ptypes.ErrWrongForkDigestVersion.Error())
|
||||
}
|
||||
})
|
||||
|
||||
@@ -164,8 +171,10 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
slotToPendingBlocks: make(map[uint64][]*ethpb.SignedBeaconBlock),
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
stateSummaryCache: stateSummaryCache,
|
||||
stateGen: stategen.New(db, stateSummaryCache),
|
||||
}
|
||||
err := r.initCaches()
|
||||
require.NoError(t, err)
|
||||
@@ -206,26 +215,34 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin
|
||||
b4Root, err := b4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
r.insertBlockToPendingQueue(b4.Block.Slot, b4, b4Root)
|
||||
r.insertBlockToPendingQueue(b5.Block.Slot, b5, b5Root)
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b4.Block.Slot, b4, b4Root))
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b5.Block.Slot, b5, b5Root))
|
||||
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 2, len(r.slotToPendingBlocks), "Incorrect size for slot to pending blocks cache")
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
|
||||
|
||||
assert.Equal(t, 2, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 2, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
|
||||
// Add b3 to the cache
|
||||
r.insertBlockToPendingQueue(b3.Block.Slot, b3, b3Root)
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b3.Block.Slot, b3, b3Root))
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), b3))
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 1, len(r.slotToPendingBlocks), "Incorrect size for slot to pending blocks cache")
|
||||
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
|
||||
|
||||
assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 3, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
|
||||
// Add b2 to the cache
|
||||
r.insertBlockToPendingQueue(b2.Block.Slot, b2, b2Root)
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, b2, b2Root))
|
||||
|
||||
require.NoError(t, r.db.SaveBlock(context.Background(), b2))
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 0, len(r.slotToPendingBlocks), "Incorrect size for slot to pending blocks cache")
|
||||
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad
|
||||
require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run
|
||||
|
||||
assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 4, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
}
|
||||
|
||||
@@ -245,7 +262,7 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
slotToPendingBlocks: make(map[uint64][]*ethpb.SignedBeaconBlock),
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
}
|
||||
err := r.initCaches()
|
||||
@@ -287,27 +304,27 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) {
|
||||
b4Root, err := b4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
r.insertBlockToPendingQueue(b2.Block.Slot, b2, b2Root)
|
||||
r.insertBlockToPendingQueue(b3.Block.Slot, b3, b3Root)
|
||||
r.insertBlockToPendingQueue(b4.Block.Slot, b4, b4Root)
|
||||
r.insertBlockToPendingQueue(b5.Block.Slot, b5, b5Root)
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, b2, b2Root))
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b3.Block.Slot, b3, b3Root))
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b4.Block.Slot, b4, b4Root))
|
||||
require.NoError(t, r.insertBlockToPendingQueue(b5.Block.Slot, b5, b5Root))
|
||||
|
||||
require.NoError(t, r.processPendingBlocks(context.Background()))
|
||||
assert.Equal(t, 0, len(r.slotToPendingBlocks), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 4, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
}
|
||||
|
||||
func TestService_sortedPendingSlots(t *testing.T) {
|
||||
r := &Service{
|
||||
slotToPendingBlocks: make(map[uint64][]*ethpb.SignedBeaconBlock),
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
}
|
||||
|
||||
var lastSlot uint64 = math.MaxUint64
|
||||
r.insertBlockToPendingQueue(lastSlot, ðpb.SignedBeaconBlock{}, [32]byte{1})
|
||||
r.insertBlockToPendingQueue(lastSlot-3, ðpb.SignedBeaconBlock{}, [32]byte{2})
|
||||
r.insertBlockToPendingQueue(lastSlot-5, ðpb.SignedBeaconBlock{}, [32]byte{3})
|
||||
r.insertBlockToPendingQueue(lastSlot-2, ðpb.SignedBeaconBlock{}, [32]byte{4})
|
||||
require.NoError(t, r.insertBlockToPendingQueue(lastSlot, ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: lastSlot}}, [32]byte{1}))
|
||||
require.NoError(t, r.insertBlockToPendingQueue(lastSlot-3, ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: lastSlot - 3}}, [32]byte{2}))
|
||||
require.NoError(t, r.insertBlockToPendingQueue(lastSlot-5, ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: lastSlot - 5}}, [32]byte{3}))
|
||||
require.NoError(t, r.insertBlockToPendingQueue(lastSlot-2, ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Slot: lastSlot - 2}}, [32]byte{4}))
|
||||
|
||||
want := []uint64{lastSlot - 5, lastSlot - 3, lastSlot - 2, lastSlot}
|
||||
assert.DeepEqual(t, want, r.sortedPendingSlots(), "Unexpected pending slots list")
|
||||
@@ -329,7 +346,7 @@ func TestService_BatchRootRequest(t *testing.T) {
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
slotToPendingBlocks: make(map[uint64][]*ethpb.SignedBeaconBlock),
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
}
|
||||
|
||||
@@ -372,21 +389,21 @@ func TestService_BatchRootRequest(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Send in duplicated roots to also test deduplicaton.
|
||||
sentRoots := p2pTypes.BeaconBlockByRootsReq{b2Root, b2Root, b3Root, b3Root, b4Root, b5Root}
|
||||
expectedRoots := p2pTypes.BeaconBlockByRootsReq{b2Root, b3Root, b4Root, b5Root}
|
||||
sentRoots := p2ptypes.BeaconBlockByRootsReq{b2Root, b2Root, b3Root, b3Root, b4Root, b5Root}
|
||||
expectedRoots := p2ptypes.BeaconBlockByRootsReq{b2Root, b3Root, b4Root, b5Root}
|
||||
|
||||
pcl := protocol.ID("/eth2/beacon_chain/req/beacon_blocks_by_root/1/ssz_snappy")
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
var out p2pTypes.BeaconBlockByRootsReq
|
||||
var out p2ptypes.BeaconBlockByRootsReq
|
||||
assert.NoError(t, p2.Encoding().DecodeWithMaxLength(stream, &out))
|
||||
assert.DeepEqual(t, expectedRoots, out, "Did not receive expected message")
|
||||
response := []*ethpb.SignedBeaconBlock{b2, b3, b4, b5}
|
||||
for _, blk := range response {
|
||||
_, err := stream.Write([]byte{responseCodeSuccess})
|
||||
assert.NoError(t, err, "Failed to write to stream")
|
||||
assert.NoError(t, err, "Could not write to stream")
|
||||
_, err = p2.Encoding().EncodeWithMaxLength(stream, blk)
|
||||
assert.NoError(t, err, "Could not send response back")
|
||||
}
|
||||
@@ -398,6 +415,27 @@ func TestService_BatchRootRequest(t *testing.T) {
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
}
|
||||
assert.Equal(t, 4, len(r.slotToPendingBlocks), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 4, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache")
|
||||
assert.Equal(t, 4, len(r.seenPendingBlocks), "Incorrect size for seen pending block")
|
||||
}
|
||||
|
||||
func TestService_AddPeningBlockToQueueOverMax(t *testing.T) {
|
||||
r := &Service{
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
}
|
||||
|
||||
b := testutil.NewBeaconBlock()
|
||||
b1 := state.CopySignedBeaconBlock(b)
|
||||
b1.Block.StateRoot = []byte{'a'}
|
||||
b2 := state.CopySignedBeaconBlock(b)
|
||||
b2.Block.StateRoot = []byte{'b'}
|
||||
require.NoError(t, r.insertBlockToPendingQueue(0, b, [32]byte{}))
|
||||
require.NoError(t, r.insertBlockToPendingQueue(0, b1, [32]byte{1}))
|
||||
require.NoError(t, r.insertBlockToPendingQueue(0, b2, [32]byte{2}))
|
||||
|
||||
b3 := state.CopySignedBeaconBlock(b)
|
||||
b3.Block.StateRoot = []byte{'c'}
|
||||
require.NoError(t, r.insertBlockToPendingQueue(0, b2, [32]byte{3}))
|
||||
require.Equal(t, maxBlocksPerSlot, len(r.pendingBlocksInCache(0)))
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
)
|
||||
@@ -81,8 +82,8 @@ func (l *limiter) validateRequest(stream network.Stream, amt uint64) error {
|
||||
}
|
||||
if amt > uint64(remaining) {
|
||||
l.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
writeErrorResponseToStream(responseCodeInvalidRequest, rateLimitedError, stream, l.p2p)
|
||||
return errors.New(rateLimitedError)
|
||||
writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrRateLimited.Error(), stream, l.p2p)
|
||||
return p2ptypes.ErrRateLimited
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
@@ -42,8 +43,7 @@ func TestRateLimiter_ExceedCapacity(t *testing.T) {
|
||||
code, errMsg, err := readStatusCodeNoDeadline(stream, p2.Encoding())
|
||||
require.NoError(t, err, "could not read incoming stream")
|
||||
assert.Equal(t, responseCodeInvalidRequest, code, "not equal response codes")
|
||||
assert.Equal(t, rateLimitedError, errMsg, "not equal errors")
|
||||
|
||||
assert.Equal(t, p2ptypes.ErrRateLimited.Error(), errMsg, "not equal errors")
|
||||
})
|
||||
wg.Add(1)
|
||||
stream, err := p1.BHost.NewStream(context.Background(), p2.PeerID(), protocol.ID(topic))
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p-core"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
"github.com/prysmaticlabs/prysm/shared/traceutil"
|
||||
@@ -73,7 +74,7 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
// Check before hand that peer is valid.
|
||||
if s.p2p.Peers().IsBad(stream.Conn().RemotePeer()) {
|
||||
closeStream(stream, log)
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, codeBanned, stream.Conn().RemotePeer()); err != nil {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeBanned, stream.Conn().RemotePeer()); err != nil {
|
||||
log.Debugf("Could not disconnect from peer: %v", err)
|
||||
}
|
||||
return
|
||||
@@ -100,8 +101,8 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
if baseTopic == p2p.RPCMetaDataTopic {
|
||||
if err := handle(ctx, base, stream); err != nil {
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
if err != errWrongForkDigestVersion {
|
||||
log.WithError(err).Debug("Failed to handle p2p RPC")
|
||||
if err != p2ptypes.ErrWrongForkDigestVersion {
|
||||
log.WithError(err).Debug("Could not handle p2p RPC")
|
||||
}
|
||||
traceutil.AnnotateError(span, err)
|
||||
}
|
||||
@@ -116,32 +117,32 @@ func (s *Service) registerRPC(baseTopic string, handle rpcHandler) {
|
||||
if err := s.p2p.Encoding().DecodeWithMaxLength(stream, msg.Interface()); err != nil {
|
||||
// Debug logs for goodbye/status errors
|
||||
if strings.Contains(topic, p2p.RPCGoodByeTopic) || strings.Contains(topic, p2p.RPCStatusTopic) {
|
||||
log.WithError(err).Debug("Failed to decode goodbye stream message")
|
||||
log.WithError(err).Debug("Could not decode goodbye stream message")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return
|
||||
}
|
||||
log.WithError(err).Debug("Failed to decode stream message")
|
||||
log.WithError(err).Debug("Could not decode stream message")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return
|
||||
}
|
||||
if err := handle(ctx, msg.Interface(), stream); err != nil {
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
if err != errWrongForkDigestVersion {
|
||||
log.WithError(err).Debug("Failed to handle p2p RPC")
|
||||
if err != p2ptypes.ErrWrongForkDigestVersion {
|
||||
log.WithError(err).Debug("Could not handle p2p RPC")
|
||||
}
|
||||
traceutil.AnnotateError(span, err)
|
||||
}
|
||||
} else {
|
||||
msg := reflect.New(t)
|
||||
if err := s.p2p.Encoding().DecodeWithMaxLength(stream, msg.Interface()); err != nil {
|
||||
log.WithError(err).Debug("Failed to decode stream message")
|
||||
log.WithError(err).Debug("Could not decode stream message")
|
||||
traceutil.AnnotateError(span, err)
|
||||
return
|
||||
}
|
||||
if err := handle(ctx, msg.Elem().Interface(), stream); err != nil {
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
if err != errWrongForkDigestVersion {
|
||||
log.WithError(err).Debug("Failed to handle p2p RPC")
|
||||
if err != p2ptypes.ErrWrongForkDigestVersion {
|
||||
log.WithError(err).Debug("Could not handle p2p RPC")
|
||||
}
|
||||
traceutil.AnnotateError(span, err)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -22,7 +23,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
defer span.End()
|
||||
defer func() {
|
||||
if err := stream.Close(); err != nil {
|
||||
log.WithError(err).Debug("Failed to close stream")
|
||||
log.WithError(err).Debug("Could not close stream")
|
||||
}
|
||||
}()
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
@@ -80,14 +81,14 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
}
|
||||
|
||||
if endSlot-startSlot > rangeLimit {
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, reqError, stream)
|
||||
err := errors.New(reqError)
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, p2ptypes.ErrInvalidRequest.Error(), stream)
|
||||
err := p2ptypes.ErrInvalidRequest
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
err := s.writeBlockRangeToStream(ctx, startSlot, endSlot, m.Step, &prevRoot, stream)
|
||||
if err != nil && !errors.Is(err, errInvalidParent) {
|
||||
if err != nil && !errors.Is(err, p2ptypes.ErrInvalidParent) {
|
||||
return err
|
||||
}
|
||||
// Reduce capacity of peer in the rate limiter first.
|
||||
@@ -97,7 +98,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
}
|
||||
// Exit in the event we have a disjoint chain to
|
||||
// return.
|
||||
if errors.Is(err, errInvalidParent) {
|
||||
if errors.Is(err, p2ptypes.ErrInvalidParent) {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -127,8 +128,8 @@ func (s *Service) writeBlockRangeToStream(ctx context.Context, startSlot, endSlo
|
||||
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot).SetSlotStep(step)
|
||||
blks, roots, err := s.db.Blocks(ctx, filter)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to retrieve blocks")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, genericError, stream)
|
||||
log.WithError(err).Debug("Could not retrieve blocks")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
@@ -136,8 +137,8 @@ func (s *Service) writeBlockRangeToStream(ctx context.Context, startSlot, endSlo
|
||||
if startSlot == 0 {
|
||||
genBlock, genRoot, err := s.retrieveGenesisBlock(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to retrieve genesis block")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, genericError, stream)
|
||||
log.WithError(err).Debug("Could not retrieve genesis block")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
@@ -148,15 +149,15 @@ func (s *Service) writeBlockRangeToStream(ctx context.Context, startSlot, endSlo
|
||||
// we only return valid sets of blocks.
|
||||
blks, roots, err = s.dedupBlocksAndRoots(blks, roots)
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, genericError, stream)
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
blks, roots = s.sortBlocksAndRoots(blks, roots)
|
||||
|
||||
blks, err = s.filterBlocks(ctx, blks, roots, prevRoot, step, startSlot)
|
||||
if err != nil && err != errInvalidParent {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, genericError, stream)
|
||||
if err != nil && err != p2ptypes.ErrInvalidParent {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
traceutil.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
@@ -165,8 +166,8 @@ func (s *Service) writeBlockRangeToStream(ctx context.Context, startSlot, endSlo
|
||||
continue
|
||||
}
|
||||
if chunkErr := s.chunkWriter(stream, b); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Failed to send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, genericError, stream)
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
traceutil.AnnotateError(span, chunkErr)
|
||||
return chunkErr
|
||||
}
|
||||
@@ -189,20 +190,20 @@ func (s *Service) validateRangeRequest(r *pb.BeaconBlocksByRangeRequest) error {
|
||||
|
||||
// Ensure all request params are within appropriate bounds
|
||||
if count == 0 || count > maxRequestBlocks {
|
||||
return errors.New(reqError)
|
||||
return p2ptypes.ErrInvalidRequest
|
||||
}
|
||||
|
||||
if step == 0 || step > rangeLimit {
|
||||
return errors.New(reqError)
|
||||
return p2ptypes.ErrInvalidRequest
|
||||
}
|
||||
|
||||
if startSlot > highestExpectedSlot {
|
||||
return errors.New(reqError)
|
||||
return p2ptypes.ErrInvalidRequest
|
||||
}
|
||||
|
||||
endSlot := startSlot + (step * (count - 1))
|
||||
if endSlot-startSlot > rangeLimit {
|
||||
return errors.New(reqError)
|
||||
return p2ptypes.ErrInvalidRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -228,7 +229,7 @@ func (s *Service) filterBlocks(ctx context.Context, blks []*ethpb.SignedBeaconBl
|
||||
if isRequestedSlotStep && isCanonical {
|
||||
// Exit early if our valid block is non linear.
|
||||
if parentValid && isSingular && !isLinear {
|
||||
return newBlks, errInvalidParent
|
||||
return newBlks, p2ptypes.ErrInvalidParent
|
||||
}
|
||||
newBlks = append(newBlks, blks[i])
|
||||
// Set the previous root as the
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -368,7 +369,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
|
||||
for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
|
||||
err := sendRequest(p1, p2, r, req, false, true)
|
||||
assert.ErrorContains(t, rateLimitedError, err)
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
|
||||
}
|
||||
|
||||
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
||||
@@ -402,7 +403,7 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
// One more request should result in overflow.
|
||||
for i := 0; i < p2.Peers().Scorers().BadResponsesScorer().Params().Threshold; i++ {
|
||||
err := sendRequest(p1, p2, r, req, false, false)
|
||||
assert.ErrorContains(t, rateLimitedError, err)
|
||||
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), err)
|
||||
}
|
||||
|
||||
remainingCapacity := r.rateLimiter.limiterMap[topic].Remaining(p2.PeerID().String())
|
||||
@@ -420,7 +421,7 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
req *pb.BeaconBlocksByRangeRequest
|
||||
expectedError string
|
||||
expectedError error
|
||||
errorToLog string
|
||||
}{
|
||||
{
|
||||
@@ -429,7 +430,7 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
|
||||
Count: 0,
|
||||
Step: 1,
|
||||
},
|
||||
expectedError: reqError,
|
||||
expectedError: p2ptypes.ErrInvalidRequest,
|
||||
errorToLog: "validation did not fail with bad count",
|
||||
},
|
||||
{
|
||||
@@ -438,7 +439,7 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
|
||||
Count: params.BeaconNetworkConfig().MaxRequestBlocks + 1,
|
||||
Step: 1,
|
||||
},
|
||||
expectedError: reqError,
|
||||
expectedError: p2ptypes.ErrInvalidRequest,
|
||||
errorToLog: "validation did not fail with bad count",
|
||||
},
|
||||
{
|
||||
@@ -455,7 +456,7 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
|
||||
Step: 0,
|
||||
Count: 1,
|
||||
},
|
||||
expectedError: reqError,
|
||||
expectedError: p2ptypes.ErrInvalidRequest,
|
||||
errorToLog: "validation did not fail with bad step",
|
||||
},
|
||||
{
|
||||
@@ -464,7 +465,7 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
|
||||
Step: rangeLimit + 1,
|
||||
Count: 1,
|
||||
},
|
||||
expectedError: reqError,
|
||||
expectedError: p2ptypes.ErrInvalidRequest,
|
||||
errorToLog: "validation did not fail with bad step",
|
||||
},
|
||||
{
|
||||
@@ -482,7 +483,7 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
|
||||
Step: 1,
|
||||
Count: 1,
|
||||
},
|
||||
expectedError: reqError,
|
||||
expectedError: p2ptypes.ErrInvalidRequest,
|
||||
errorToLog: "validation did not fail with bad start slot",
|
||||
},
|
||||
{
|
||||
@@ -491,7 +492,7 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
|
||||
Step: 1,
|
||||
Count: params.BeaconNetworkConfig().MaxRequestBlocks + 1,
|
||||
},
|
||||
expectedError: reqError,
|
||||
expectedError: p2ptypes.ErrInvalidRequest,
|
||||
errorToLog: "validation did not fail with bad end slot",
|
||||
},
|
||||
{
|
||||
@@ -500,7 +501,7 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
|
||||
Step: 3,
|
||||
Count: uint64(slotsSinceGenesis / 2),
|
||||
},
|
||||
expectedError: reqError,
|
||||
expectedError: p2ptypes.ErrInvalidRequest,
|
||||
errorToLog: "validation did not fail with bad range",
|
||||
},
|
||||
{
|
||||
@@ -516,8 +517,8 @@ func TestRPCBeaconBlocksByRange_validateRangeRequest(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.expectedError != "" {
|
||||
assert.ErrorContains(t, tt.expectedError, r.validateRangeRequest(tt.req), tt.errorToLog)
|
||||
if tt.expectedError != nil {
|
||||
assert.ErrorContains(t, tt.expectedError.Error(), r.validateRangeRequest(tt.req), tt.errorToLog)
|
||||
} else {
|
||||
assert.NoError(t, r.validateRangeRequest(tt.req), tt.errorToLog)
|
||||
}
|
||||
|
||||
@@ -23,7 +23,9 @@ func (s *Service) sendRecentBeaconBlocksRequest(ctx context.Context, blockRoots
|
||||
return err
|
||||
}
|
||||
s.pendingQueueLock.Lock()
|
||||
s.insertBlockToPendingQueue(blk.Block.Slot, blk, blkRoot)
|
||||
if err := s.insertBlockToPendingQueue(blk.Block.Slot, blk, blkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
s.pendingQueueLock.Unlock()
|
||||
return nil
|
||||
})
|
||||
@@ -34,7 +36,7 @@ func (s *Service) sendRecentBeaconBlocksRequest(ctx context.Context, blockRoots
|
||||
func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
defer func() {
|
||||
if err := stream.Close(); err != nil {
|
||||
log.WithError(err).Debug("Failed to close stream")
|
||||
log.WithError(err).Debug("Could not close stream")
|
||||
}
|
||||
}()
|
||||
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||
@@ -56,9 +58,9 @@ func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{
|
||||
s.rateLimiter.add(stream, 1)
|
||||
resp, err := s.generateErrorResponse(responseCodeInvalidRequest, "no block roots provided in request")
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to generate a response error")
|
||||
log.WithError(err).Debug("Could not generate a response error")
|
||||
} else if _, err := stream.Write(resp); err != nil {
|
||||
log.WithError(err).Debugf("Failed to write to stream")
|
||||
log.WithError(err).Debugf("Could not write to stream")
|
||||
}
|
||||
return errors.New("no block roots provided")
|
||||
}
|
||||
@@ -66,9 +68,9 @@ func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{
|
||||
if uint64(len(blockRoots)) > params.BeaconNetworkConfig().MaxRequestBlocks {
|
||||
resp, err := s.generateErrorResponse(responseCodeInvalidRequest, "requested more than the max block limit")
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to generate a response error")
|
||||
log.WithError(err).Debug("Could not generate a response error")
|
||||
} else if _, err := stream.Write(resp); err != nil {
|
||||
log.WithError(err).Debugf("Failed to write to stream")
|
||||
log.WithError(err).Debugf("Could not write to stream")
|
||||
}
|
||||
return errors.New("requested more than the max block limit")
|
||||
}
|
||||
@@ -77,12 +79,12 @@ func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{
|
||||
for _, root := range blockRoots {
|
||||
blk, err := s.db.Block(ctx, root)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to fetch block")
|
||||
resp, err := s.generateErrorResponse(responseCodeServerError, genericError)
|
||||
log.WithError(err).Debug("Could not fetch block")
|
||||
resp, err := s.generateErrorResponse(responseCodeServerError, types.ErrGeneric.Error())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to generate a response error")
|
||||
log.WithError(err).Debug("Could not generate a response error")
|
||||
} else if _, err := stream.Write(resp); err != nil {
|
||||
log.WithError(err).Debugf("Failed to write to stream")
|
||||
log.WithError(err).Debugf("Could not write to stream")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/kevinms/leakybucket-go"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
"github.com/protolambda/zssz"
|
||||
"github.com/protolambda/zssz/types"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
@@ -105,7 +106,7 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) {
|
||||
FinalizedCheckPoint: finalizedCheckpt,
|
||||
Root: blockARoot[:],
|
||||
},
|
||||
slotToPendingBlocks: make(map[uint64][]*ethpb.SignedBeaconBlock),
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
ctx: context.Background(),
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
@@ -126,7 +127,7 @@ func TestRecentBeaconBlocks_RPCRequestSent(t *testing.T) {
|
||||
response := []*ethpb.SignedBeaconBlock{blockB, blockA}
|
||||
for _, blk := range response {
|
||||
_, err := stream.Write([]byte{responseCodeSuccess})
|
||||
assert.NoError(t, err, "Failed to write to stream")
|
||||
assert.NoError(t, err, "Could not write to stream")
|
||||
_, err = p2.Encoding().EncodeWithMaxLength(stream, blk)
|
||||
assert.NoError(t, err, "Could not send response back")
|
||||
}
|
||||
|
||||
@@ -14,53 +14,28 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// Spec defined codes
|
||||
codeClientShutdown types.SSZUint64 = iota
|
||||
codeWrongNetwork
|
||||
codeGenericError
|
||||
|
||||
// Teku specific codes
|
||||
codeUnableToVerifyNetwork = types.SSZUint64(128)
|
||||
|
||||
// Lighthouse specific codes
|
||||
codeTooManyPeers = types.SSZUint64(129)
|
||||
codeBadScore = types.SSZUint64(250)
|
||||
codeBanned = types.SSZUint64(251)
|
||||
)
|
||||
|
||||
var goodByes = map[types.SSZUint64]string{
|
||||
codeClientShutdown: "client shutdown",
|
||||
codeWrongNetwork: "irrelevant network",
|
||||
codeGenericError: "fault/error",
|
||||
codeUnableToVerifyNetwork: "unable to verify network",
|
||||
codeTooManyPeers: "client has too many peers",
|
||||
codeBadScore: "peer score too low",
|
||||
codeBanned: "client banned this node",
|
||||
}
|
||||
|
||||
var backOffTime = map[types.SSZUint64]time.Duration{
|
||||
// Do not dial peers which are from a different/unverifiable
|
||||
// network.
|
||||
codeWrongNetwork: 24 * time.Hour,
|
||||
codeUnableToVerifyNetwork: 24 * time.Hour,
|
||||
types.GoodbyeCodeWrongNetwork: 24 * time.Hour,
|
||||
types.GoodbyeCodeUnableToVerifyNetwork: 24 * time.Hour,
|
||||
// If local peer is banned, we back off for
|
||||
// 2 hours to let the remote peer score us
|
||||
// back up again.
|
||||
codeBadScore: 2 * time.Hour,
|
||||
codeBanned: 2 * time.Hour,
|
||||
codeClientShutdown: 1 * time.Hour,
|
||||
types.GoodbyeCodeBadScore: 2 * time.Hour,
|
||||
types.GoodbyeCodeBanned: 2 * time.Hour,
|
||||
types.GoodbyeCodeClientShutdown: 1 * time.Hour,
|
||||
// Wait 5 minutes before dialing a peer who is
|
||||
// 'full'
|
||||
codeTooManyPeers: 5 * time.Minute,
|
||||
codeGenericError: 2 * time.Minute,
|
||||
types.GoodbyeCodeTooManyPeers: 5 * time.Minute,
|
||||
types.GoodbyeCodeGenericError: 2 * time.Minute,
|
||||
}
|
||||
|
||||
// goodbyeRPCHandler reads the incoming goodbye rpc message from the peer.
|
||||
func (s *Service) goodbyeRPCHandler(_ context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
defer func() {
|
||||
if err := stream.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close stream")
|
||||
log.WithError(err).Debug("Could not close stream")
|
||||
}
|
||||
}()
|
||||
SetRPCStreamDeadlines(stream)
|
||||
@@ -80,13 +55,25 @@ func (s *Service) goodbyeRPCHandler(_ context.Context, msg interface{}, stream l
|
||||
return s.p2p.Disconnect(stream.Conn().RemotePeer())
|
||||
}
|
||||
|
||||
// disconnectBadPeer checks whether peer is considered bad by some scorer, and tries to disconnect
|
||||
// the peer, if that is the case. Additionally, disconnection reason is obtained from scorer.
|
||||
func (s *Service) disconnectBadPeer(ctx context.Context, id peer.ID) {
|
||||
if !s.p2p.Peers().IsBad(id) {
|
||||
return
|
||||
}
|
||||
goodbyeCode := types.ErrToGoodbyeCode(s.p2p.Peers().Scorers().ValidationError(id))
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, goodbyeCode, id); err != nil {
|
||||
log.Debugf("Error when disconnecting with bad peer: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// A custom goodbye method that is used by our connection handler, in the
|
||||
// event we receive bad peers.
|
||||
func (s *Service) sendGoodbye(ctx context.Context, id peer.ID) error {
|
||||
return s.sendGoodByeAndDisconnect(ctx, codeGenericError, id)
|
||||
return s.sendGoodByeAndDisconnect(ctx, types.GoodbyeCodeGenericError, id)
|
||||
}
|
||||
|
||||
func (s *Service) sendGoodByeAndDisconnect(ctx context.Context, code types.SSZUint64, id peer.ID) error {
|
||||
func (s *Service) sendGoodByeAndDisconnect(ctx context.Context, code types.RPCGoodbyeCode, id peer.ID) error {
|
||||
if err := s.sendGoodByeMessage(ctx, code, id); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
@@ -96,7 +83,7 @@ func (s *Service) sendGoodByeAndDisconnect(ctx context.Context, code types.SSZUi
|
||||
return s.p2p.Disconnect(id)
|
||||
}
|
||||
|
||||
func (s *Service) sendGoodByeMessage(ctx context.Context, code types.SSZUint64, id peer.ID) error {
|
||||
func (s *Service) sendGoodByeMessage(ctx context.Context, code types.RPCGoodbyeCode, id peer.ID) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, respTimeout)
|
||||
defer cancel()
|
||||
|
||||
@@ -106,7 +93,7 @@ func (s *Service) sendGoodByeMessage(ctx context.Context, code types.SSZUint64,
|
||||
}
|
||||
defer func() {
|
||||
if err := helpers.FullClose(stream); err != nil && err.Error() != mux.ErrReset.Error() {
|
||||
log.WithError(err).Debugf("Failed to reset stream with protocol %s", stream.Protocol())
|
||||
log.WithError(err).Debugf("Could not reset stream with protocol %s", stream.Protocol())
|
||||
}
|
||||
}()
|
||||
log := log.WithField("Reason", goodbyeMessage(code))
|
||||
@@ -114,17 +101,17 @@ func (s *Service) sendGoodByeMessage(ctx context.Context, code types.SSZUint64,
|
||||
return nil
|
||||
}
|
||||
|
||||
func goodbyeMessage(num types.SSZUint64) string {
|
||||
reason, ok := goodByes[num]
|
||||
func goodbyeMessage(num types.RPCGoodbyeCode) string {
|
||||
reason, ok := types.GoodbyeCodeMessages[num]
|
||||
if ok {
|
||||
return reason
|
||||
}
|
||||
return fmt.Sprintf("unknown goodbye value of %d Received", num)
|
||||
return fmt.Sprintf("unknown goodbye value of %d received", num)
|
||||
}
|
||||
|
||||
// determines which backoff time to use depending on the
|
||||
// goodbye code provided.
|
||||
func goodByeBackoff(num types.SSZUint64) time.Time {
|
||||
func goodByeBackoff(num types.RPCGoodbyeCode) time.Time {
|
||||
duration, ok := backOffTime[num]
|
||||
if !ok {
|
||||
return time.Time{}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
db "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/require"
|
||||
@@ -43,7 +43,7 @@ func TestGoodByeRPCHandler_Disconnects_With_Peer(t *testing.T) {
|
||||
})
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
require.NoError(t, err)
|
||||
failureCode := codeClientShutdown
|
||||
failureCode := p2ptypes.GoodbyeCodeClientShutdown
|
||||
|
||||
assert.NoError(t, r.goodbyeRPCHandler(context.Background(), &failureCode, stream1))
|
||||
|
||||
@@ -86,7 +86,7 @@ func TestGoodByeRPCHandler_BackOffPeer(t *testing.T) {
|
||||
})
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
require.NoError(t, err)
|
||||
failureCode := codeClientShutdown
|
||||
failureCode := p2ptypes.GoodbyeCodeClientShutdown
|
||||
|
||||
assert.NoError(t, r.goodbyeRPCHandler(context.Background(), &failureCode, stream1))
|
||||
|
||||
@@ -113,7 +113,7 @@ func TestGoodByeRPCHandler_BackOffPeer(t *testing.T) {
|
||||
|
||||
stream2, err := p1.BHost.NewStream(context.Background(), p3.BHost.ID(), pcl)
|
||||
require.NoError(t, err)
|
||||
failureCode = codeBanned
|
||||
failureCode = p2ptypes.GoodbyeCodeBanned
|
||||
|
||||
assert.NoError(t, r.goodbyeRPCHandler(context.Background(), &failureCode, stream2))
|
||||
|
||||
@@ -146,7 +146,7 @@ func TestSendGoodbye_SendsMessage(t *testing.T) {
|
||||
p2p: p1,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
failureCode := codeClientShutdown
|
||||
failureCode := p2ptypes.GoodbyeCodeClientShutdown
|
||||
|
||||
// Setup streams
|
||||
pcl := protocol.ID("/eth2/beacon_chain/req/goodbye/1/ssz_snappy")
|
||||
@@ -156,7 +156,7 @@ func TestSendGoodbye_SendsMessage(t *testing.T) {
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
out := new(p2pTypes.SSZUint64)
|
||||
out := new(p2ptypes.SSZUint64)
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.Equal(t, failureCode, *out)
|
||||
assert.NoError(t, stream.Close())
|
||||
@@ -188,7 +188,7 @@ func TestSendGoodbye_DisconnectWithPeer(t *testing.T) {
|
||||
p2p: p1,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
failureCode := codeClientShutdown
|
||||
failureCode := p2ptypes.GoodbyeCodeClientShutdown
|
||||
|
||||
// Setup streams
|
||||
pcl := protocol.ID("/eth2/beacon_chain/req/goodbye/1/ssz_snappy")
|
||||
@@ -198,7 +198,7 @@ func TestSendGoodbye_DisconnectWithPeer(t *testing.T) {
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
out := new(p2pTypes.SSZUint64)
|
||||
out := new(p2ptypes.SSZUint64)
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.Equal(t, failureCode, *out)
|
||||
assert.NoError(t, stream.Close())
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
func (s *Service) metaDataHandler(_ context.Context, _ interface{}, stream libp2pcore.Stream) error {
|
||||
defer func() {
|
||||
if err := stream.Close(); err != nil {
|
||||
log.WithError(err).Debug("Failed to close stream")
|
||||
log.WithError(err).Debug("Could not close stream")
|
||||
}
|
||||
}()
|
||||
SetRPCStreamDeadlines(stream)
|
||||
@@ -45,7 +45,7 @@ func (s *Service) sendMetaDataRequest(ctx context.Context, id peer.ID) (*pb.Meta
|
||||
// stream early leads it to a reset.
|
||||
defer func() {
|
||||
if err := helpers.FullClose(stream); isValidStreamError(err) {
|
||||
log.WithError(err).Debugf("Failed to reset stream for protocol %s", stream.Protocol())
|
||||
log.WithError(err).Debugf("Could not reset stream for protocol %s", stream.Protocol())
|
||||
}
|
||||
}()
|
||||
code, errMsg, err := ReadStatusCode(stream, s.p2p.Encoding())
|
||||
|
||||
@@ -22,7 +22,7 @@ func (s *Service) pingHandler(_ context.Context, msg interface{}, stream libp2pc
|
||||
m, ok := msg.(*types.SSZUint64)
|
||||
if !ok {
|
||||
if err := stream.Close(); err != nil {
|
||||
log.WithError(err).Debug("Failed to close stream")
|
||||
log.WithError(err).Debug("Could not close stream")
|
||||
}
|
||||
return fmt.Errorf("wrong message type for ping, got %T, wanted *uint64", msg)
|
||||
}
|
||||
@@ -33,25 +33,25 @@ func (s *Service) pingHandler(_ context.Context, msg interface{}, stream libp2pc
|
||||
valid, err := s.validateSequenceNum(*m, stream.Conn().RemotePeer())
|
||||
if err != nil {
|
||||
// Descore peer for giving us a bad sequence number.
|
||||
if errors.Is(err, errInvalidSequenceNum) {
|
||||
if errors.Is(err, types.ErrInvalidSequenceNum) {
|
||||
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, seqError, stream)
|
||||
s.writeErrorResponseToStream(responseCodeInvalidRequest, types.ErrInvalidSequenceNum.Error(), stream)
|
||||
}
|
||||
if err := stream.Close(); err != nil {
|
||||
log.WithError(err).Debug("Failed to close stream")
|
||||
log.WithError(err).Debug("Could not close stream")
|
||||
}
|
||||
return err
|
||||
}
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
if err := stream.Close(); err != nil {
|
||||
log.WithError(err).Debug("Failed to close stream")
|
||||
log.WithError(err).Debug("Could not close stream")
|
||||
}
|
||||
return err
|
||||
}
|
||||
sq := types.SSZUint64(s.p2p.MetadataSeq())
|
||||
if _, err := s.p2p.Encoding().EncodeWithMaxLength(stream, &sq); err != nil {
|
||||
if err := stream.Close(); err != nil {
|
||||
log.WithError(err).Debug("Failed to close stream")
|
||||
log.WithError(err).Debug("Could not close stream")
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -59,7 +59,7 @@ func (s *Service) pingHandler(_ context.Context, msg interface{}, stream libp2pc
|
||||
if valid {
|
||||
// If the sequence number was valid we're done.
|
||||
if err := stream.Close(); err != nil {
|
||||
log.WithError(err).Debug("Failed to close stream")
|
||||
log.WithError(err).Debug("Could not close stream")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -68,7 +68,7 @@ func (s *Service) pingHandler(_ context.Context, msg interface{}, stream libp2pc
|
||||
go func() {
|
||||
defer func() {
|
||||
if err := stream.Close(); err != nil {
|
||||
log.WithError(err).Debug("Failed to close stream")
|
||||
log.WithError(err).Debug("Could not close stream")
|
||||
}
|
||||
}()
|
||||
// New context so the calling function doesn't cancel on us.
|
||||
@@ -76,8 +76,11 @@ func (s *Service) pingHandler(_ context.Context, msg interface{}, stream libp2pc
|
||||
defer cancel()
|
||||
md, err := s.sendMetaDataRequest(ctx, stream.Conn().RemotePeer())
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), deadlineError) {
|
||||
log.WithField("peer", stream.Conn().RemotePeer()).WithError(err).Debug("Failed to send metadata request")
|
||||
// We cannot compare errors directly as the stream muxer error
|
||||
// type isn't compatible with the error we have, so a direct
|
||||
// equality checks fails.
|
||||
if !strings.Contains(err.Error(), types.ErrIODeadline.Error()) {
|
||||
log.WithField("peer", stream.Conn().RemotePeer()).WithError(err).Debug("Could not send metadata request")
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -100,7 +103,7 @@ func (s *Service) sendPingRequest(ctx context.Context, id peer.ID) error {
|
||||
currentTime := timeutils.Now()
|
||||
defer func() {
|
||||
if err := helpers.FullClose(stream); err != nil && err.Error() != mux.ErrReset.Error() {
|
||||
log.WithError(err).Debugf("Failed to reset stream with protocol %s", stream.Protocol())
|
||||
log.WithError(err).Debugf("Could not reset stream with protocol %s", stream.Protocol())
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -122,7 +125,7 @@ func (s *Service) sendPingRequest(ctx context.Context, id peer.ID) error {
|
||||
valid, err := s.validateSequenceNum(*msg, stream.Conn().RemotePeer())
|
||||
if err != nil {
|
||||
// Descore peer for giving us a bad sequence number.
|
||||
if errors.Is(err, errInvalidSequenceNum) {
|
||||
if errors.Is(err, types.ErrInvalidSequenceNum) {
|
||||
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
}
|
||||
return err
|
||||
@@ -151,7 +154,7 @@ func (s *Service) validateSequenceNum(seq types.SSZUint64, id peer.ID) (bool, er
|
||||
}
|
||||
// Return error on invalid sequence number.
|
||||
if md.SeqNumber > uint64(seq) {
|
||||
return false, errInvalidSequenceNum
|
||||
return false, types.ErrInvalidSequenceNum
|
||||
}
|
||||
return md.SeqNumber == uint64(seq), nil
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
db "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
|
||||
@@ -54,13 +54,13 @@ func TestPingRPCHandler_ReceivesPing(t *testing.T) {
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
expectSuccess(t, stream)
|
||||
out := new(p2pTypes.SSZUint64)
|
||||
out := new(p2ptypes.SSZUint64)
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.Equal(t, uint64(2), uint64(*out))
|
||||
})
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
require.NoError(t, err)
|
||||
seqNumber := p2pTypes.SSZUint64(2)
|
||||
seqNumber := p2ptypes.SSZUint64(2)
|
||||
|
||||
assert.NoError(t, r.pingHandler(context.Background(), &seqNumber, stream1))
|
||||
|
||||
@@ -117,7 +117,7 @@ func TestPingRPCHandler_SendsPing(t *testing.T) {
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
out := new(p2pTypes.SSZUint64)
|
||||
out := new(p2ptypes.SSZUint64)
|
||||
assert.NoError(t, r2.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.Equal(t, uint64(2), uint64(*out))
|
||||
assert.NoError(t, r2.pingHandler(context.Background(), out, stream))
|
||||
@@ -174,14 +174,15 @@ func TestPingRPCHandler_BadSequenceNumber(t *testing.T) {
|
||||
wg.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg.Done()
|
||||
expectFailure(t, responseCodeInvalidRequest, seqError, stream)
|
||||
expectFailure(t, responseCodeInvalidRequest, p2ptypes.ErrInvalidSequenceNum.Error(), stream)
|
||||
|
||||
})
|
||||
stream1, err := p1.BHost.NewStream(context.Background(), p2.BHost.ID(), pcl)
|
||||
require.NoError(t, err)
|
||||
|
||||
wantedSeq := p2pTypes.SSZUint64(p2.LocalMetadata.SeqNumber)
|
||||
assert.ErrorContains(t, seqError, r.pingHandler(context.Background(), &wantedSeq, stream1))
|
||||
wantedSeq := p2ptypes.SSZUint64(p2.LocalMetadata.SeqNumber)
|
||||
err = r.pingHandler(context.Background(), &wantedSeq, stream1)
|
||||
assert.ErrorContains(t, p2ptypes.ErrInvalidSequenceNum.Error(), err)
|
||||
|
||||
if testutil.WaitTimeout(&wg, 1*time.Second) {
|
||||
t.Fatal("Did not receive stream within 1 sec")
|
||||
|
||||
@@ -33,7 +33,7 @@ func SendBeaconBlocksByRangeRequest(
|
||||
}
|
||||
defer func() {
|
||||
if err := streamhelpers.FullClose(stream); err != nil && err.Error() != mux.ErrReset.Error() {
|
||||
log.WithError(err).Debugf("Failed to close stream with protocol %s", stream.Protocol())
|
||||
log.WithError(err).Debugf("Could not close stream with protocol %s", stream.Protocol())
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -89,7 +89,7 @@ func SendBeaconBlocksByRootRequest(
|
||||
}
|
||||
defer func() {
|
||||
if err := streamhelpers.FullClose(stream); err != nil && err.Error() != mux.ErrReset.Error() {
|
||||
log.WithError(err).Debugf("Failed to reset stream with protocol %s", stream.Protocol())
|
||||
log.WithError(err).Debugf("Could not reset stream with protocol %s", stream.Protocol())
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -256,7 +256,7 @@ func TestSendRequest_SendBeaconBlocksByRootRequest(t *testing.T) {
|
||||
}
|
||||
}
|
||||
_, err := stream.Write([]byte{0x00})
|
||||
assert.NoError(t, err, "Failed to write to stream")
|
||||
assert.NoError(t, err, "Could not write to stream")
|
||||
_, err = p2pProvider.Encoding().EncodeWithMaxLength(stream, blk)
|
||||
assert.NoError(t, err, "Could not send response back")
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
"github.com/prysmaticlabs/prysm/shared/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
@@ -41,10 +42,9 @@ func (s *Service) maintainPeerStatuses() {
|
||||
s.p2p.Peers().SetConnectionState(id, peers.PeerDisconnected)
|
||||
return
|
||||
}
|
||||
// Disconnect from peers that are considered bad by any of the registered scorers.
|
||||
if s.p2p.Peers().IsBad(id) {
|
||||
if err := s.sendGoodByeAndDisconnect(s.ctx, codeGenericError, id); err != nil {
|
||||
log.Debugf("Error when disconnecting with bad peer: %v", err)
|
||||
}
|
||||
s.disconnectBadPeer(s.ctx, id)
|
||||
return
|
||||
}
|
||||
// If the status hasn't been updated in the recent interval time.
|
||||
@@ -55,7 +55,7 @@ func (s *Service) maintainPeerStatuses() {
|
||||
}
|
||||
if timeutils.Now().After(lastUpdated.Add(interval)) {
|
||||
if err := s.reValidatePeer(s.ctx, id); err != nil {
|
||||
log.WithField("peer", id).WithError(err).Debug("Failed to revalidate peer")
|
||||
log.WithField("peer", id).WithError(err).Debug("Could not revalidate peer")
|
||||
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(id)
|
||||
}
|
||||
}
|
||||
@@ -64,8 +64,8 @@ func (s *Service) maintainPeerStatuses() {
|
||||
})
|
||||
}
|
||||
|
||||
// resyncIfBehind checks periodically to see if we are in normal sync but have fallen behind our peers by more than an epoch,
|
||||
// in which case we attempt a resync using the initial sync method to catch up.
|
||||
// resyncIfBehind checks periodically to see if we are in normal sync but have fallen behind our peers
|
||||
// by more than an epoch, in which case we attempt a resync using the initial sync method to catch up.
|
||||
func (s *Service) resyncIfBehind() {
|
||||
millisecondsPerEpoch := params.BeaconConfig().SecondsPerSlot * params.BeaconConfig().SlotsPerEpoch * 1000
|
||||
// Run sixteen times per epoch.
|
||||
@@ -132,7 +132,7 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
|
||||
}
|
||||
defer func() {
|
||||
if err := streamhelpers.FullClose(stream); err != nil && err.Error() != mux.ErrReset.Error() {
|
||||
log.WithError(err).Debugf("Failed to reset stream with protocol %s", stream.Protocol())
|
||||
log.WithError(err).Debugf("Could not reset stream with protocol %s", stream.Protocol())
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -142,7 +142,7 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
|
||||
}
|
||||
|
||||
if code != 0 {
|
||||
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(id)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
|
||||
@@ -150,22 +150,18 @@ func (s *Service) sendRPCStatusRequest(ctx context.Context, id peer.ID) error {
|
||||
if err := s.p2p.Encoding().DecodeWithMaxLength(stream, msg); err != nil {
|
||||
return err
|
||||
}
|
||||
s.p2p.Peers().SetChainState(stream.Conn().RemotePeer(), msg)
|
||||
|
||||
// If validation fails, validation error is logged, and peer status scorer will mark peer as bad.
|
||||
err = s.validateStatusMessage(ctx, msg)
|
||||
if err != nil {
|
||||
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
// Disconnect if on a wrong fork.
|
||||
if errors.Is(err, errWrongForkDigestVersion) {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, codeWrongNetwork, stream.Conn().RemotePeer()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.p2p.Peers().Scorers().PeerStatusScorer().SetPeerStatus(id, msg, err)
|
||||
if s.p2p.Peers().IsBad(id) {
|
||||
s.disconnectBadPeer(s.ctx, id)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Service) reValidatePeer(ctx context.Context, id peer.ID) error {
|
||||
s.p2p.Peers().Scorers().PeerStatusScorer().SetHeadSlot(s.chain.HeadSlot())
|
||||
if err := s.sendRPCStatusRequest(ctx, id); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -181,7 +177,7 @@ func (s *Service) reValidatePeer(ctx context.Context, id peer.ID) error {
|
||||
func (s *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream libp2pcore.Stream) error {
|
||||
defer func() {
|
||||
if err := stream.Close(); err != nil {
|
||||
log.WithError(err).Debug("Failed to close stream")
|
||||
log.WithError(err).Debug("Could not close stream")
|
||||
}
|
||||
}()
|
||||
ctx, cancel := context.WithTimeout(ctx, ttfbTimeout)
|
||||
@@ -197,50 +193,52 @@ func (s *Service) statusRPCHandler(ctx context.Context, msg interface{}, stream
|
||||
}
|
||||
s.rateLimiter.add(stream, 1)
|
||||
|
||||
remotePeer := stream.Conn().RemotePeer()
|
||||
if err := s.validateStatusMessage(ctx, m); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": stream.Conn().RemotePeer(),
|
||||
"error": err}).Debug("Invalid status message from peer")
|
||||
"peer": remotePeer,
|
||||
"error": err,
|
||||
}).Debug("Invalid status message from peer")
|
||||
|
||||
respCode := byte(0)
|
||||
switch err {
|
||||
case errGeneric:
|
||||
case p2ptypes.ErrGeneric:
|
||||
respCode = responseCodeServerError
|
||||
case errWrongForkDigestVersion:
|
||||
case p2ptypes.ErrWrongForkDigestVersion:
|
||||
// Respond with our status and disconnect with the peer.
|
||||
s.p2p.Peers().SetChainState(stream.Conn().RemotePeer(), m)
|
||||
s.p2p.Peers().SetChainState(remotePeer, m)
|
||||
if err := s.respondWithStatus(ctx, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := stream.Close(); err != nil { // Close before disconnecting.
|
||||
log.WithError(err).Debug("Failed to close stream")
|
||||
log.WithError(err).Debug("Could not close stream")
|
||||
}
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, codeWrongNetwork, stream.Conn().RemotePeer()); err != nil {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeWrongNetwork, remotePeer); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
respCode = responseCodeInvalidRequest
|
||||
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(stream.Conn().RemotePeer())
|
||||
s.p2p.Peers().Scorers().BadResponsesScorer().Increment(remotePeer)
|
||||
}
|
||||
|
||||
originalErr := err
|
||||
resp, err := s.generateErrorResponse(respCode, err.Error())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Failed to generate a response error")
|
||||
log.WithError(err).Debug("Could not generate a response error")
|
||||
} else if _, err := stream.Write(resp); err != nil {
|
||||
// The peer may already be ignoring us, as we disagree on fork version, so log this as debug only.
|
||||
log.WithError(err).Debug("Failed to write to stream")
|
||||
log.WithError(err).Debug("Could not write to stream")
|
||||
}
|
||||
if err := stream.Close(); err != nil { // Close before disconnecting.
|
||||
log.WithError(err).Debug("Failed to close stream")
|
||||
log.WithError(err).Debug("Could not close stream")
|
||||
}
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, codeGenericError, stream.Conn().RemotePeer()); err != nil {
|
||||
if err := s.sendGoodByeAndDisconnect(ctx, p2ptypes.GoodbyeCodeGenericError, remotePeer); err != nil {
|
||||
return err
|
||||
}
|
||||
return originalErr
|
||||
}
|
||||
s.p2p.Peers().SetChainState(stream.Conn().RemotePeer(), m)
|
||||
s.p2p.Peers().SetChainState(remotePeer, m)
|
||||
|
||||
return s.respondWithStatus(ctx, stream)
|
||||
}
|
||||
@@ -264,7 +262,7 @@ func (s *Service) respondWithStatus(ctx context.Context, stream network.Stream)
|
||||
}
|
||||
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
log.WithError(err).Debug("Failed to write to stream")
|
||||
log.WithError(err).Debug("Could not write to stream")
|
||||
}
|
||||
_, err = s.p2p.Encoding().EncodeWithMaxLength(stream, resp)
|
||||
return err
|
||||
@@ -276,7 +274,7 @@ func (s *Service) validateStatusMessage(ctx context.Context, msg *pb.Status) err
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(forkDigest[:], msg.ForkDigest) {
|
||||
return errWrongForkDigestVersion
|
||||
return p2ptypes.ErrWrongForkDigestVersion
|
||||
}
|
||||
genesis := s.chain.GenesisTime()
|
||||
finalizedEpoch := s.chain.FinalizedCheckpt().Epoch
|
||||
@@ -288,7 +286,7 @@ func (s *Service) validateStatusMessage(ctx context.Context, msg *pb.Status) err
|
||||
maxFinalizedEpoch = maxEpoch - 2
|
||||
}
|
||||
if msg.FinalizedEpoch > maxFinalizedEpoch {
|
||||
return errInvalidEpoch
|
||||
return p2ptypes.ErrInvalidEpoch
|
||||
}
|
||||
// Exit early if the peer's finalized epoch
|
||||
// is less than that of the remote peer's.
|
||||
@@ -302,14 +300,14 @@ func (s *Service) validateStatusMessage(ctx context.Context, msg *pb.Status) err
|
||||
return nil
|
||||
}
|
||||
if !s.db.IsFinalizedBlock(ctx, bytesutil.ToBytes32(msg.FinalizedRoot)) {
|
||||
return errInvalidFinalizedRoot
|
||||
return p2ptypes.ErrInvalidFinalizedRoot
|
||||
}
|
||||
blk, err := s.db.Block(ctx, bytesutil.ToBytes32(msg.FinalizedRoot))
|
||||
if err != nil {
|
||||
return errGeneric
|
||||
return p2ptypes.ErrGeneric
|
||||
}
|
||||
if blk == nil {
|
||||
return errGeneric
|
||||
return p2ptypes.ErrGeneric
|
||||
}
|
||||
if helpers.SlotToEpoch(blk.Block.Slot) == msg.FinalizedEpoch {
|
||||
return nil
|
||||
@@ -317,12 +315,12 @@ func (s *Service) validateStatusMessage(ctx context.Context, msg *pb.Status) err
|
||||
|
||||
startSlot, err := helpers.StartSlot(msg.FinalizedEpoch)
|
||||
if err != nil {
|
||||
return errGeneric
|
||||
return p2ptypes.ErrGeneric
|
||||
}
|
||||
if startSlot > blk.Block.Slot {
|
||||
childBlock, err := s.db.FinalizedChildBlock(ctx, bytesutil.ToBytes32(msg.FinalizedRoot))
|
||||
if err != nil {
|
||||
return errGeneric
|
||||
return p2ptypes.ErrGeneric
|
||||
}
|
||||
// Is a valid finalized block if no
|
||||
// other child blocks exist yet.
|
||||
@@ -332,9 +330,9 @@ func (s *Service) validateStatusMessage(ctx context.Context, msg *pb.Status) err
|
||||
// If child finalized block also has a smaller or
|
||||
// equal slot number we return an error.
|
||||
if startSlot >= childBlock.Block.Slot {
|
||||
return errInvalidEpoch
|
||||
return p2ptypes.ErrInvalidEpoch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return errInvalidEpoch
|
||||
return p2ptypes.ErrInvalidEpoch
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
testingDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
|
||||
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
|
||||
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
|
||||
@@ -76,9 +76,9 @@ func TestStatusRPCHandler_Disconnects_OnForkVersionMismatch(t *testing.T) {
|
||||
wg2.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl2, func(stream network.Stream) {
|
||||
defer wg2.Done()
|
||||
msg := new(p2pTypes.SSZUint64)
|
||||
msg := new(p2ptypes.SSZUint64)
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, msg))
|
||||
assert.Equal(t, codeWrongNetwork, *msg)
|
||||
assert.Equal(t, p2ptypes.GoodbyeCodeWrongNetwork, *msg)
|
||||
assert.NoError(t, stream.Close())
|
||||
})
|
||||
|
||||
@@ -324,7 +324,7 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
wg2.Add(1)
|
||||
p2.BHost.SetStreamHandler(pcl, func(stream network.Stream) {
|
||||
defer wg2.Done()
|
||||
out := new(p2pTypes.SSZUint64)
|
||||
out := new(p2ptypes.SSZUint64)
|
||||
assert.NoError(t, r.p2p.Encoding().DecodeWithMaxLength(stream, out))
|
||||
assert.Equal(t, uint64(2), uint64(*out))
|
||||
assert.NoError(t, r2.pingHandler(context.Background(), out, stream))
|
||||
@@ -758,12 +758,13 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
FinalizedRoot: finalizedRoot[:],
|
||||
}
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
log.WithError(err).Error("Failed to write to stream")
|
||||
log.WithError(err).Debug("Could not write to stream")
|
||||
}
|
||||
_, err := r.p2p.Encoding().EncodeWithMaxLength(stream, expected)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
assert.Equal(t, false, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is marked as bad")
|
||||
p1.Connect(p2)
|
||||
|
||||
if testutil.WaitTimeout(&wg, time.Second) {
|
||||
@@ -772,12 +773,10 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
connectionState, err := p1.Peers().ConnectionState(p2.PeerID())
|
||||
require.NoError(t, err, "Failed to obtain peer connection state")
|
||||
require.NoError(t, err, "Could not obtain peer connection state")
|
||||
assert.Equal(t, peers.PeerDisconnected, connectionState, "Expected peer to be disconnected")
|
||||
|
||||
badResponses, err := p1.Peers().Scorers().BadResponsesScorer().Count(p2.PeerID())
|
||||
require.NoError(t, err, "Failed to obtain peer connection state")
|
||||
assert.Equal(t, 1, badResponses, "Bad response was not bumped to one")
|
||||
assert.Equal(t, true, p1.Peers().Scorers().IsBadPeer(p2.PeerID()), "Peer is not marked as bad")
|
||||
}
|
||||
|
||||
func TestStatusRPC_ValidGenesisMessage(t *testing.T) {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/shared"
|
||||
"github.com/prysmaticlabs/prysm/shared/abool"
|
||||
"github.com/prysmaticlabs/prysm/shared/params"
|
||||
"github.com/prysmaticlabs/prysm/shared/runutil"
|
||||
"github.com/prysmaticlabs/prysm/shared/timeutils"
|
||||
)
|
||||
@@ -46,6 +48,8 @@ const badBlockSize = 1000
|
||||
|
||||
const syncMetricsInterval = 10 * time.Second
|
||||
|
||||
var pendingBlockExpTime = time.Duration(params.BeaconConfig().SlotsPerEpoch*params.BeaconConfig().SecondsPerSlot) * time.Second // Seconds in one epoch.
|
||||
|
||||
// Config to set up the regular sync service.
|
||||
type Config struct {
|
||||
P2P p2p.P2P
|
||||
@@ -85,7 +89,7 @@ type Service struct {
|
||||
exitPool *voluntaryexits.Pool
|
||||
slashingPool *slashings.Pool
|
||||
chain blockchainService
|
||||
slotToPendingBlocks map[uint64][]*ethpb.SignedBeaconBlock
|
||||
slotToPendingBlocks *gcache.Cache
|
||||
seenPendingBlocks map[[32]byte]bool
|
||||
blkRootToPendingAtts map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof
|
||||
pendingAttsLock sync.RWMutex
|
||||
@@ -115,6 +119,8 @@ type Service struct {
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
c := gcache.New(pendingBlockExpTime /* exp time */, 2*pendingBlockExpTime /* prune time */)
|
||||
|
||||
rLimiter := newRateLimiter(cfg.P2P)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
r := &Service{
|
||||
@@ -129,7 +135,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
chain: cfg.Chain,
|
||||
initialSync: cfg.InitialSync,
|
||||
attestationNotifier: cfg.AttestationNotifier,
|
||||
slotToPendingBlocks: make(map[uint64][]*ethpb.SignedBeaconBlock),
|
||||
slotToPendingBlocks: c,
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
blkRootToPendingAtts: make(map[[32]byte][]*ethpb.SignedAggregateAttestationAndProof),
|
||||
stateNotifier: cfg.StateNotifier,
|
||||
@@ -190,12 +196,6 @@ func (s *Service) Stop() error {
|
||||
|
||||
// Status of the currently running regular sync service.
|
||||
func (s *Service) Status() error {
|
||||
if s.chainStarted.IsNotSet() {
|
||||
return errors.New("chain not yet started")
|
||||
}
|
||||
if s.initialSync.Syncing() {
|
||||
return errors.New("waiting for initial sync")
|
||||
}
|
||||
// If our head slot is on a previous epoch and our peers are reporting their head block are
|
||||
// in the most recent epoch, then we might be out of sync.
|
||||
if headEpoch := helpers.SlotToEpoch(s.chain.HeadSlot()); headEpoch+1 < helpers.SlotToEpoch(s.chain.CurrentSlot()) &&
|
||||
@@ -284,7 +284,7 @@ func (s *Service) registerHandlers() {
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
return
|
||||
case err := <-stateSub.Err():
|
||||
log.WithError(err).Error("Subscription to state notifier failed")
|
||||
log.WithError(err).Error("Could not subscribe to state notifier")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
mockChain "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
@@ -86,12 +87,13 @@ func TestSyncHandlers_WaitForChainStart(t *testing.T) {
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
r := Service{
|
||||
ctx: context.Background(),
|
||||
p2p: p2p,
|
||||
chain: chainService,
|
||||
stateNotifier: chainService.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chainStarted: abool.New(),
|
||||
ctx: context.Background(),
|
||||
p2p: p2p,
|
||||
chain: chainService,
|
||||
stateNotifier: chainService.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chainStarted: abool.New(),
|
||||
slotToPendingBlocks: gcache.New(time.Second, 2*time.Second),
|
||||
}
|
||||
|
||||
go r.registerHandlers()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user